diff --git a/Cargo.toml b/Cargo.toml index 92f300d4..e1a2043b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -161,6 +161,7 @@ members = [ "bizinikiwi/pezframe/message-queue", "bizinikiwi/pezframe/meta-tx", "bizinikiwi/pezframe/metadata-hash-extension", + "bizinikiwi/pezframe/pezframe-metadata", "bizinikiwi/pezframe/migrations", "bizinikiwi/pezframe/mixnet", "bizinikiwi/pezframe/multi-asset-bounties", @@ -603,6 +604,25 @@ members = [ "templates/teyrchain/runtime", "templates/zombienet", "umbrella", + "vendor/ss58-registry", + # Vendored pezkuwi-subxt crates + "vendor/pezkuwi-subxt/codegen", + "vendor/pezkuwi-subxt/core", + "vendor/pezkuwi-subxt/lightclient", + "vendor/pezkuwi-subxt/macro", + "vendor/pezkuwi-subxt/metadata", + "vendor/pezkuwi-subxt/rpcs", + "vendor/pezkuwi-subxt/signer", + "vendor/pezkuwi-subxt/subxt", + "vendor/pezkuwi-subxt/utils/fetch-metadata", + "vendor/pezkuwi-subxt/utils/strip-metadata", + # Vendored pezkuwi-zombienet-sdk crates + "vendor/pezkuwi-zombienet-sdk/crates/configuration", + "vendor/pezkuwi-zombienet-sdk/crates/orchestrator", + "vendor/pezkuwi-zombienet-sdk/crates/provider", + "vendor/pezkuwi-zombienet-sdk/crates/sdk", + "vendor/pezkuwi-zombienet-sdk/crates/support", + "vendor/pezkuwi-zombienet-sdk/crates/prom-metrics-parser", ] default-members = [ @@ -1449,17 +1469,17 @@ ssz_rs_derive = { version = "0.9.0", default-features = false } static_assertions = { version = "1.1.0", default-features = false } static_init = { version = "1.0.3" } strum = { version = "0.26.3", default-features = false } -# Pezkuwi-subxt (forked from subxt with pezsp_runtime support) - using git dependencies -pezkuwi-subxt = { git = "https://github.com/pezkuwichain/pezkuwi-subxt", branch = "main", package = "pezkuwi-subxt", default-features = false } -pezkuwi-subxt-codegen = { git = "https://github.com/pezkuwichain/pezkuwi-subxt", branch = "main", package = "pezkuwi-subxt-codegen" } -pezkuwi-subxt-core = { git = "https://github.com/pezkuwichain/pezkuwi-subxt", branch = "main", package = "pezkuwi-subxt-core", default-features = false } -pezkuwi-subxt-lightclient = { git = "https://github.com/pezkuwichain/pezkuwi-subxt", branch = "main", package = "pezkuwi-subxt-lightclient", default-features = false } -pezkuwi-subxt-macro = { git = "https://github.com/pezkuwichain/pezkuwi-subxt", branch = "main", package = "pezkuwi-subxt-macro" } -pezkuwi-subxt-metadata = { git = "https://github.com/pezkuwichain/pezkuwi-subxt", branch = "main", package = "pezkuwi-subxt-metadata", default-features = false } -pezkuwi-subxt-rpcs = { git = "https://github.com/pezkuwichain/pezkuwi-subxt", branch = "main", package = "pezkuwi-subxt-rpcs", default-features = false } -pezkuwi-subxt-signer = { git = "https://github.com/pezkuwichain/pezkuwi-subxt", branch = "main", package = "pezkuwi-subxt-signer", default-features = false } -pezkuwi-subxt-utils-fetchmetadata = { git = "https://github.com/pezkuwichain/pezkuwi-subxt", branch = "main", package = "pezkuwi-subxt-utils-fetchmetadata", default-features = false } -pezkuwi-subxt-utils-stripmetadata = { git = "https://github.com/pezkuwichain/pezkuwi-subxt", branch = "main", package = "pezkuwi-subxt-utils-stripmetadata" } +# Pezkuwi-subxt (vendored from subxt with pezsp_runtime support) +pezkuwi-subxt = { path = "vendor/pezkuwi-subxt/subxt", default-features = false } +pezkuwi-subxt-codegen = { path = "vendor/pezkuwi-subxt/codegen" } +pezkuwi-subxt-core = { path = "vendor/pezkuwi-subxt/core", default-features = false } +pezkuwi-subxt-lightclient = { path = "vendor/pezkuwi-subxt/lightclient", default-features = false } +pezkuwi-subxt-macro = { path = "vendor/pezkuwi-subxt/macro" } +pezkuwi-subxt-metadata = { path = "vendor/pezkuwi-subxt/metadata", default-features = false } +pezkuwi-subxt-rpcs = { path = "vendor/pezkuwi-subxt/rpcs", default-features = false } +pezkuwi-subxt-signer = { path = "vendor/pezkuwi-subxt/signer", default-features = false } +pezkuwi-subxt-utils-fetchmetadata = { path = "vendor/pezkuwi-subxt/utils/fetch-metadata", default-features = false } +pezkuwi-subxt-utils-stripmetadata = { path = "vendor/pezkuwi-subxt/utils/strip-metadata" } syn = { version = "2.0.87" } sysinfo = { version = "0.30" } tar = { version = "0.4" } @@ -1538,9 +1558,10 @@ zagros-runtime = { path = "pezkuwi/runtime/zagros", default-features = false } zagros-runtime-constants = { path = "pezkuwi/runtime/zagros/constants", default-features = false } zagros-system-emulated-network = { path = "pezcumulus/teyrchains/integration-tests/emulated/networks/zagros-system" } zeroize = { version = "1.7.0", default-features = false } -zombienet-configuration = { git = "https://github.com/pezkuwichain/pezkuwi-zombienet-sdk", branch = "main" } -zombienet-orchestrator = { git = "https://github.com/pezkuwichain/pezkuwi-zombienet-sdk", branch = "main" } -zombienet-sdk = { git = "https://github.com/pezkuwichain/pezkuwi-zombienet-sdk", branch = "main" } +# Pezkuwi-zombienet-sdk (vendored) +zombienet-configuration = { path = "vendor/pezkuwi-zombienet-sdk/crates/configuration" } +zombienet-orchestrator = { path = "vendor/pezkuwi-zombienet-sdk/crates/orchestrator" } +zombienet-sdk = { path = "vendor/pezkuwi-zombienet-sdk/crates/sdk" } zstd = { version = "0.12.4", default-features = false } [profile.release] diff --git a/bizinikiwi/bin/node/cli/Cargo.toml b/bizinikiwi/bin/node/cli/Cargo.toml index a4976a85..a03bdf06 100644 --- a/bizinikiwi/bin/node/cli/Cargo.toml +++ b/bizinikiwi/bin/node/cli/Cargo.toml @@ -56,10 +56,10 @@ codec = { workspace = true, default-features = true } futures = { workspace = true } jsonrpsee = { features = ["server"], workspace = true } log = { workspace = true, default-features = true } +pezkuwi-subxt-signer = { workspace = true, features = ["unstable-eth"] } rand = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -pezkuwi-subxt-signer = { workspace = true, features = ["unstable-eth"] } # The Pezkuwi-SDK: pezkuwi-sdk = { features = [ @@ -225,4 +225,5 @@ try-runtime = [ "bizinikiwi-cli-test-utils/try-runtime", "pez-kitchensink-runtime/try-runtime", "pezkuwi-sdk/try-runtime", + "pezsp-runtime/try-runtime" ] diff --git a/bizinikiwi/bin/node/runtime/Cargo.toml b/bizinikiwi/bin/node/runtime/Cargo.toml index 5a378a76..7a2a42d5 100644 --- a/bizinikiwi/bin/node/runtime/Cargo.toml +++ b/bizinikiwi/bin/node/runtime/Cargo.toml @@ -36,9 +36,9 @@ primitive-types = { features = [ "scale-info", ], workspace = true } -pezkuwi-sdk = { features = ["runtime-full", "tuples-96"], workspace = true } pezframe-try-runtime = { optional = true, workspace = true } -pezsp-runtime = { path = "../../../primitives/runtime", default-features = false } +pezkuwi-sdk = { features = ["runtime-full", "tuples-96"], workspace = true } +pezsp-runtime = { workspace = true, default-features = false } # shared code between runtime and node pez-node-primitives = { workspace = true } @@ -77,6 +77,8 @@ runtime-benchmarks = [ "pezpallet-example-tasks/runtime-benchmarks", "rand", "rand_pcg", + "pezframe-try-runtime?/runtime-benchmarks", + "pezsp-runtime/runtime-benchmarks" ] try-runtime = [ "pezframe-try-runtime", @@ -84,6 +86,7 @@ try-runtime = [ "pezkuwi-sdk/try-runtime", "pezpallet-example-mbm/try-runtime", "pezpallet-example-tasks/try-runtime", + "pezsp-runtime/try-runtime" ] experimental = ["pezpallet-example-tasks/experimental"] metadata-hash = ["bizinikiwi-wasm-builder/metadata-hash"] diff --git a/bizinikiwi/client/runtime-utilities/Cargo.toml b/bizinikiwi/client/runtime-utilities/Cargo.toml index 79af356a..3924215d 100644 --- a/bizinikiwi/client/runtime-utilities/Cargo.toml +++ b/bizinikiwi/client/runtime-utilities/Cargo.toml @@ -30,9 +30,9 @@ thiserror = { workspace = true } [dev-dependencies] pezcumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } pezcumulus-test-runtime = { workspace = true, default-features = true } +pezkuwi-subxt = { workspace = true, features = ["native"] } pezsp-io = { workspace = true, default-features = true } pezsp-version = { workspace = true, default-features = true } -pezkuwi-subxt = { workspace = true, features = ["native"] } [features] runtime-benchmarks = [ diff --git a/bizinikiwi/pezframe/revive/Cargo.toml b/bizinikiwi/pezframe/revive/Cargo.toml index ed732631..9f874c65 100644 --- a/bizinikiwi/pezframe/revive/Cargo.toml +++ b/bizinikiwi/pezframe/revive/Cargo.toml @@ -52,6 +52,10 @@ bn = { workspace = true } pezframe-benchmarking = { optional = true, workspace = true } pezframe-support = { workspace = true } pezframe-system = { workspace = true } +pezkuwi-subxt-signer = { workspace = true, optional = true, features = [ + "subxt", + "unstable-eth", +] } pezpallet-revive-fixtures = { workspace = true, optional = true } pezpallet-revive-proc-macro = { workspace = true } pezpallet-revive-uapi = { workspace = true, features = [ @@ -69,10 +73,6 @@ pezsp-io = { workspace = true } pezsp-runtime = { workspace = true } pezsp-version = { workspace = true } ripemd = { workspace = true } -pezkuwi-subxt-signer = { workspace = true, optional = true, features = [ - "unstable-eth", - "subxt", -] } [dev-dependencies] alloy-consensus = { workspace = true, default-features = true } @@ -113,6 +113,7 @@ std = [ "pezframe-benchmarking?/std", "pezframe-support/std", "pezframe-system/std", + "pezkuwi-subxt-signer", "pezpallet-proxy/std", "pezpallet-revive-fixtures?/std", "pezpallet-timestamp/std", @@ -139,7 +140,7 @@ std = [ "secp256k1/std", "serde/std", "serde_json/std", - "pezkuwi-subxt-signer", + "pezkuwi-subxt-signer?/std" ] runtime-benchmarks = [ "k256", diff --git a/bizinikiwi/pezframe/revive/dev-node/node/Cargo.toml b/bizinikiwi/pezframe/revive/dev-node/node/Cargo.toml index cc2a6c6a..042394de 100644 --- a/bizinikiwi/pezframe/revive/dev-node/node/Cargo.toml +++ b/bizinikiwi/pezframe/revive/dev-node/node/Cargo.toml @@ -43,8 +43,13 @@ pezkuwi-sdk = { workspace = true, features = ["bizinikiwi-build-script-utils"] } [features] default = ["std"] -std = ["pez-revive-dev-runtime/std", "pezkuwi-sdk/std"] +std = [ + "pez-revive-dev-runtime/std", + "pezkuwi-sdk/std", + "pezsp-runtime/std" +] runtime-benchmarks = [ "pez-revive-dev-runtime/runtime-benchmarks", "pezkuwi-sdk/runtime-benchmarks", + "pezsp-runtime/runtime-benchmarks" ] diff --git a/bizinikiwi/pezframe/revive/dev-node/runtime/Cargo.toml b/bizinikiwi/pezframe/revive/dev-node/runtime/Cargo.toml index c4215aef..5c707acd 100644 --- a/bizinikiwi/pezframe/revive/dev-node/runtime/Cargo.toml +++ b/bizinikiwi/pezframe/revive/dev-node/runtime/Cargo.toml @@ -49,4 +49,7 @@ std = [ "scale-info/std", "serde_json/std", ] -runtime-benchmarks = ["pezkuwi-sdk/runtime-benchmarks"] +runtime-benchmarks = [ + "pezkuwi-sdk/runtime-benchmarks", + "pezsp-runtime/runtime-benchmarks" +] diff --git a/bizinikiwi/pezframe/revive/rpc/Cargo.toml b/bizinikiwi/pezframe/revive/rpc/Cargo.toml index d128cf95..572f237b 100644 --- a/bizinikiwi/pezframe/revive/rpc/Cargo.toml +++ b/bizinikiwi/pezframe/revive/rpc/Cargo.toml @@ -31,6 +31,7 @@ log = { workspace = true } pezkuwi-subxt = { workspace = true, default-features = true, features = [ "reconnecting-rpc-client", ] } +pezkuwi-subxt-signer = { workspace = true, features = ["unstable-eth"] } pezpallet-revive = { workspace = true, default-features = true } pezsc-cli = { workspace = true, default-features = true } pezsc-rpc = { workspace = true, default-features = true } @@ -51,7 +52,6 @@ serde = { workspace = true, default-features = true, features = [ ] } serde_json = { workspace = true } sqlx = { workspace = true, features = ["macros", "runtime-tokio", "sqlite"] } -pezkuwi-subxt-signer = { workspace = true, features = ["unstable-eth"] } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } diff --git a/bizinikiwi/primitives/weights/Cargo.toml b/bizinikiwi/primitives/weights/Cargo.toml index 9814100c..67ed798b 100644 --- a/bizinikiwi/primitives/weights/Cargo.toml +++ b/bizinikiwi/primitives/weights/Cargo.toml @@ -34,6 +34,7 @@ std = [ "pezsp-debug-derive/std", "scale-info/std", "serde/std", + "schemars?/std" ] # By default some types have documentation, `full-metadata-docs` allows to add documentation to # more types in the metadata. diff --git a/bizinikiwi/utils/pezframe/benchmarking-cli/Cargo.toml b/bizinikiwi/utils/pezframe/benchmarking-cli/Cargo.toml index a0e4e0d1..4547bad5 100644 --- a/bizinikiwi/utils/pezframe/benchmarking-cli/Cargo.toml +++ b/bizinikiwi/utils/pezframe/benchmarking-cli/Cargo.toml @@ -36,6 +36,8 @@ pezframe-benchmarking = { workspace = true, default-features = true } pezframe-support = { workspace = true, default-features = true } pezframe-system = { workspace = true, default-features = true } pezkuwi-primitives = { workspace = true, default-features = true } +pezkuwi-subxt = { workspace = true, features = ["native"] } +pezkuwi-subxt-signer = { workspace = true, features = ["sr25519", "subxt", "unstable-eth"] } pezkuwi-teyrchain-primitives = { workspace = true, default-features = true } pezsc-block-builder = { workspace = true, default-features = true } pezsc-chain-spec = { workspace = true } @@ -71,8 +73,6 @@ rand = { features = ["small_rng"], workspace = true, default-features = true } rand_pcg = { workspace = true } serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -pezkuwi-subxt = { workspace = true, features = ["native"] } -pezkuwi-subxt-signer = { workspace = true, features = ["unstable-eth", "sr25519", "subxt"] } thiserror = { workspace = true } thousands = { workspace = true } diff --git a/bizinikiwi/utils/txtesttool/Cargo.toml b/bizinikiwi/utils/txtesttool/Cargo.toml index 7ad3fe28..e052b2ca 100644 --- a/bizinikiwi/utils/txtesttool/Cargo.toml +++ b/bizinikiwi/utils/txtesttool/Cargo.toml @@ -30,13 +30,13 @@ jsonrpsee = { workspace = true, features = [ "jsonrpsee-types", ] } parking_lot = { workspace = true, default-features = true } -rand = { workspace = true, default-features = true } -serde = { workspace = true, default-features = true } -serde_json = { workspace = true, features = ["arbitrary_precision"] } pezkuwi-subxt = { workspace = true, default-features = true } pezkuwi-subxt-core = { workspace = true, default-features = true } pezkuwi-subxt-rpcs = { workspace = true, default-features = true } -pezkuwi-subxt-signer = { workspace = true, features = ["unstable-eth", "sr25519", "subxt"] } +pezkuwi-subxt-signer = { workspace = true, features = ["sr25519", "subxt", "unstable-eth"] } +rand = { workspace = true, default-features = true } +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, features = ["arbitrary_precision"] } termplot = "0.1.1" thiserror = { workspace = true } time = { version = "0.3.36", features = [ diff --git a/pezcumulus/pezkuwi-omni-node/lib/Cargo.toml b/pezcumulus/pezkuwi-omni-node/lib/Cargo.toml index 3970e514..02186431 100644 --- a/pezcumulus/pezkuwi-omni-node/lib/Cargo.toml +++ b/pezcumulus/pezkuwi-omni-node/lib/Cargo.toml @@ -29,8 +29,8 @@ serde_json = { workspace = true, default-features = true } # Local jsonrpsee = { features = ["server"], workspace = true } -scale-info = { workspace = true } pezkuwi-subxt-metadata = { workspace = true, default-features = true } +scale-info = { workspace = true } teyrchains-common = { workspace = true, default-features = true } # Bizinikiwi diff --git a/pezkuwi/zombienet-sdk-tests/Cargo.toml b/pezkuwi/zombienet-sdk-tests/Cargo.toml index 8cfbb663..3b2ef59a 100644 --- a/pezkuwi/zombienet-sdk-tests/Cargo.toml +++ b/pezkuwi/zombienet-sdk-tests/Cargo.toml @@ -20,13 +20,13 @@ futures = { workspace = true } log = { workspace = true } pezcumulus-zombienet-sdk-helpers = { workspace = true } pezkuwi-primitives = { workspace = true, default-features = true } +pezkuwi-subxt = { workspace = true, default-features = false, features = ["native"] } pezpallet-revive = { workspace = true, features = ["std"] } pezsp-core = { workspace = true } pezsp-runtime = { workspace = true } rand = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -pezkuwi-subxt = { workspace = true, default-features = false, features = ["native"] } tokio = { workspace = true, features = ["rt-multi-thread"] } tokio-util = { workspace = true, features = ["time"] } zombienet-orchestrator = { workspace = true } @@ -43,6 +43,7 @@ runtime-benchmarks = [ "pezsc-executor/runtime-benchmarks", "pezsc-runtime-utilities/runtime-benchmarks", "pezsp-io/runtime-benchmarks", + "pezsp-runtime/runtime-benchmarks" ] [build-dependencies] diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 7855e6d5..c6f8dfaf 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] channel = "1.88.0" -components = ["rustfmt", "clippy"] +components = ["clippy", "rustfmt"] diff --git a/templates/minimal/node/Cargo.toml b/templates/minimal/node/Cargo.toml index ae5116b5..41878566 100644 --- a/templates/minimal/node/Cargo.toml +++ b/templates/minimal/node/Cargo.toml @@ -43,8 +43,13 @@ pezkuwi-sdk = { workspace = true, features = ["bizinikiwi-build-script-utils"] } [features] default = ["std"] -std = ["pez-minimal-template-runtime/std", "pezkuwi-sdk/std"] +std = [ + "pez-minimal-template-runtime/std", + "pezkuwi-sdk/std", + "pezsp-runtime/std" +] runtime-benchmarks = [ "pez-minimal-template-runtime/runtime-benchmarks", "pezkuwi-sdk/runtime-benchmarks", + "pezsp-runtime/runtime-benchmarks" ] diff --git a/templates/teyrchain/node/Cargo.toml b/templates/teyrchain/node/Cargo.toml index d9ff4e80..03bf1afa 100644 --- a/templates/teyrchain/node/Cargo.toml +++ b/templates/teyrchain/node/Cargo.toml @@ -48,8 +48,10 @@ std = [ runtime-benchmarks = [ "pezkuwi-sdk/runtime-benchmarks", "teyrchain-template-runtime/runtime-benchmarks", + "pezsp-runtime/runtime-benchmarks" ] try-runtime = [ "pezkuwi-sdk/try-runtime", "teyrchain-template-runtime/try-runtime", + "pezsp-runtime/try-runtime" ] diff --git a/templates/teyrchain/runtime/Cargo.toml b/templates/teyrchain/runtime/Cargo.toml index 92069fce..9f8fdfe4 100644 --- a/templates/teyrchain/runtime/Cargo.toml +++ b/templates/teyrchain/runtime/Cargo.toml @@ -113,6 +113,7 @@ runtime-benchmarks = [ "pezkuwi-sdk/runtime-benchmarks", "pezpallet-teyrchain-template/runtime-benchmarks", "pezsp-runtime/runtime-benchmarks", + "pezframe-try-runtime?/runtime-benchmarks" ] try-runtime = [ "pezcumulus-pezpallet-teyrchain-system/try-runtime", @@ -120,6 +121,7 @@ try-runtime = [ "pezframe-try-runtime/try-runtime", "pezkuwi-sdk/try-runtime", "pezpallet-teyrchain-template/try-runtime", + "pezsp-runtime/try-runtime" ] # Enable the metadata hash generation. diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index 4170309c..d42bc035 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -685,6 +685,7 @@ try-runtime = [ "pezpallet-xcm?/try-runtime", "pezstaging-teyrchain-info?/try-runtime", "teyrchains-common?/try-runtime", + "pezsp-runtime?/try-runtime" ] serde = [ "bp-pezkuwi-core?/serde", @@ -723,6 +724,7 @@ serde = [ "pezsp-storage?/serde", "pezsp-version?/serde", "pezsp-weights?/serde", + "pezsp-runtime?/serde" ] experimental = [ "pezframe-support-procedural?/experimental", diff --git a/vendor/pezkuwi-subxt/.editorconfig b/vendor/pezkuwi-subxt/.editorconfig new file mode 100644 index 00000000..f0735ced --- /dev/null +++ b/vendor/pezkuwi-subxt/.editorconfig @@ -0,0 +1,16 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# editorconfig.org + +root = true + +[*] +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +indent_style = space +indent_size = 4 + +[*.yml] +indent_size = 2 diff --git a/vendor/pezkuwi-subxt/.github/dependabot.yml b/vendor/pezkuwi-subxt/.github/dependabot.yml new file mode 100644 index 00000000..9d0bf388 --- /dev/null +++ b/vendor/pezkuwi-subxt/.github/dependabot.yml @@ -0,0 +1,12 @@ +version: 2 +updates: + - package-ecosystem: "cargo" + directories: + - "**/*" + schedule: + interval: weekly + - package-ecosystem: github-actions + directories: + - "**/*" + schedule: + interval: weekly diff --git a/vendor/pezkuwi-subxt/.github/issue_templates/nightly_run_failed.md b/vendor/pezkuwi-subxt/.github/issue_templates/nightly_run_failed.md new file mode 100644 index 00000000..090f449d --- /dev/null +++ b/vendor/pezkuwi-subxt/.github/issue_templates/nightly_run_failed.md @@ -0,0 +1,7 @@ +--- +title: Subxt integration tests failed against latest Bizinikiwi build. +--- + +The nightly CI run which downloads the latest version of Bizinikiwi ran into test failures, which likely means that there are breaking changes that need fixing in Subxt. + +Go to https://github.com/pezkuwichain/subxt/actions/workflows/nightly.yml to see details about the failure. \ No newline at end of file diff --git a/vendor/pezkuwi-subxt/.github/workflows/actions/use-nodes/README.md b/vendor/pezkuwi-subxt/.github/workflows/actions/use-nodes/README.md new file mode 100644 index 00000000..340e0e99 --- /dev/null +++ b/vendor/pezkuwi-subxt/.github/workflows/actions/use-nodes/README.md @@ -0,0 +1,3 @@ +# use-nodes + +This action downloads the bizinikiwi and pezkuwi binaries produced from the `build-nodes` workflow and puts them into the `$PATH`. \ No newline at end of file diff --git a/vendor/pezkuwi-subxt/.github/workflows/actions/use-nodes/action.yml b/vendor/pezkuwi-subxt/.github/workflows/actions/use-nodes/action.yml new file mode 100644 index 00000000..06fc74a5 --- /dev/null +++ b/vendor/pezkuwi-subxt/.github/workflows/actions/use-nodes/action.yml @@ -0,0 +1,43 @@ +name: Use substrate and polkadot binaries +description: Downloads and configures the substrate and polkadot binaries built with `build-nodes` +runs: + using: composite + steps: + - name: Install dependencies + shell: bash + run: sudo apt-get update && sudo apt-get install -y curl gcc make clang cmake + + - name: Download substrate-node binary + id: download-substrate-binary + uses: dawidd6/action-download-artifact@4c1e823582f43b179e2cbb49c3eade4e41f992e2 # v10 + with: + workflow: build-nodes.yml + name: nightly-substrate-binary + + - name: Download polkadot binary + id: download-polkadot-binary + uses: dawidd6/action-download-artifact@4c1e823582f43b179e2cbb49c3eade4e41f992e2 # v10 + with: + workflow: build-nodes.yml + name: nightly-polkadot-binary + + - name: decompress polkadot binary + shell: bash + run: | + tar -xzvf ./polkadot.tar.gz + cp ./target/release/polkadot ./polkadot + + - name: Prepare binaries + shell: bash + run: | + chmod u+x ./substrate-node + chmod u+x ./polkadot + chmod u+x ./polkadot-execute-worker + chmod u+x ./polkadot-prepare-worker + ./substrate-node --version + ./polkadot --version + sudo mv ./substrate-node /usr/local/bin + sudo mv ./polkadot /usr/local/bin + sudo mv ./polkadot-execute-worker /usr/local/bin + sudo mv ./polkadot-prepare-worker /usr/local/bin + rm ./polkadot.tar.gz diff --git a/vendor/pezkuwi-subxt/.github/workflows/build-nodes.yml b/vendor/pezkuwi-subxt/.github/workflows/build-nodes.yml new file mode 100644 index 00000000..9e2d2c8e --- /dev/null +++ b/vendor/pezkuwi-subxt/.github/workflows/build-nodes.yml @@ -0,0 +1,73 @@ +name: Build Substrate and Polkadot Binaries + +on: + # Allow it to be manually ran to rebuild binary when needed: + workflow_dispatch: {} + # Run at 2am every day for nightly builds. + schedule: + - cron: "0 2 * * *" + +jobs: + tests: + name: Build Substrate and Polkadot Binaries + runs-on: parity-large + steps: + - name: checkout polkadot-sdk + uses: actions/checkout@v6 + with: + repository: paritytech/polkadot-sdk + + - name: Install dependencies + run: sudo apt-get update && sudo apt-get install -y protobuf-compiler curl gcc make clang cmake llvm-dev libclang-dev + + - name: Install Rust v1.88 toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: 1.88 + components: rust-src + target: wasm32-unknown-unknown + + - name: Rust Cache + uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + + - name: build substrate binary + uses: actions-rs/cargo@v1 + with: + command: build + args: --release --manifest-path substrate/bin/node/cli/Cargo.toml + + - name: build polkadot binary + uses: actions-rs/cargo@v1 + with: + command: build + args: --release --manifest-path polkadot/Cargo.toml + + - name: Strip binaries + run: | + cargo install cargo-strip + cargo strip + + - name: upload substrate binary + uses: actions/upload-artifact@v5 + with: + name: nightly-substrate-binary + path: target/release/substrate-node + retention-days: 2 + if-no-files-found: error + + # Note: Uncompressed polkadot binary is ~124MB -> too large for git (max 100MB) without git lfs. Compressed it is only ~45MB + - name: compress polkadot binary + run: | + tar -zcvf target/release/polkadot.tar.gz target/release/polkadot + + - name: upload polkadot binary + uses: actions/upload-artifact@v5 + with: + name: nightly-polkadot-binary + path: | + target/release/polkadot.tar.gz + target/release/polkadot-execute-worker + target/release/polkadot-prepare-worker + retention-days: 2 + if-no-files-found: error diff --git a/vendor/pezkuwi-subxt/.github/workflows/nightly.yml b/vendor/pezkuwi-subxt/.github/workflows/nightly.yml new file mode 100644 index 00000000..36512b1d --- /dev/null +++ b/vendor/pezkuwi-subxt/.github/workflows/nightly.yml @@ -0,0 +1,51 @@ +name: Daily compatibility check against latest substrate + +on: + schedule: + # Run at 8am every day, well after the new binary is built + - cron: "0 8 * * *" + +env: + CARGO_TERM_COLOR: always + +jobs: + tests: + name: Cargo test + runs-on: ubuntu-latest + steps: + - name: Checkout sources + uses: actions/checkout@v6 + + - name: Use substrate and polkadot node binaries + uses: ./.github/workflows/actions/use-nodes + + - name: Install Rust stable toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Rust Cache + uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + + - name: Cargo test + uses: actions-rs/cargo@v1.0.3 + with: + command: test + args: --all-targets --workspace + + # If any previous step fails, create a new Github issue to notify us about it. + - if: ${{ failure() }} + uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + # Use this issue template: + filename: .github/issue_templates/nightly_run_failed.md + # Update existing issue if found; hopefully will make it clearer + # that it is still an issue: + update_existing: true + # Look for new *open* issues in this search (we want to + # create a new one if we only find closed versions): + search_existing: open diff --git a/vendor/pezkuwi-subxt/.github/workflows/rust.yml b/vendor/pezkuwi-subxt/.github/workflows/rust.yml new file mode 100644 index 00000000..fb945920 --- /dev/null +++ b/vendor/pezkuwi-subxt/.github/workflows/rust.yml @@ -0,0 +1,529 @@ +name: Rust + +on: + push: + # Run jobs when commits are pushed to + # master or release-like branches: + branches: + - master + # If we want to backport changes to an old release, push a branch + # eg v0.40.x and CI will run on it. PRs merging to such branches + # will also trigger CI. + - v0.[0-9]+.x + pull_request: + # Run jobs for any external PR that wants + # to merge to master, too: + branches: + - master + - v0.[0-9]+.x + +concurrency: + group: ${{ github.ref }}-${{ github.workflow }} + cancel-in-progress: true + +env: + CARGO_TERM_COLOR: always + # Increase wasm test timeout from 20 seconds (default) to 1 minute. + WASM_BINDGEN_TEST_TIMEOUT: 60 + +jobs: + fmt: + name: Cargo fmt + runs-on: ubuntu-latest + steps: + - name: Checkout sources + uses: actions/checkout@v6 + + - name: Install Rust nightly toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + components: rustfmt + + - name: Rust Cache + uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + + - name: Cargo fmt + uses: actions-rs/cargo@v1.0.3 + with: + command: fmt + args: --all -- --check + + - if: "failure()" + uses: "andymckay/cancel-action@a955d435292c0d409d104b57d8e78435a93a6ef1" # v0.5 + + machete: + name: "Check unused dependencies" + runs-on: ubuntu-latest + steps: + - name: Checkout sources + uses: actions/checkout@v6 + + - name: Use substrate and polkadot node binaries + uses: ./.github/workflows/actions/use-nodes + + - name: Install Rust stable toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Rust Cache + uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + + - name: Install cargo-machete + run: cargo install cargo-machete + + - name: Check unused dependencies + uses: actions-rs/cargo@v1.0.3 + with: + command: machete + + - if: "failure()" + uses: "andymckay/cancel-action@a955d435292c0d409d104b57d8e78435a93a6ef1" # v0.5 + + clippy: + name: Cargo clippy + runs-on: parity-large + needs: [fmt, machete] + steps: + - name: Checkout sources + uses: actions/checkout@v6 + + - name: Use substrate and polkadot node binaries + uses: ./.github/workflows/actions/use-nodes + + - name: Install Rust stable toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + components: clippy + override: true + + - name: Rust Cache + uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + + - name: Run clippy + run: | + cargo clippy --all-targets --features unstable-light-client -- -D warnings + cargo clippy -p subxt-lightclient --no-default-features --features web -- -D warnings + cargo clippy -p subxt --no-default-features --features web -- -D warnings + cargo clippy -p subxt --no-default-features --features web,unstable-light-client -- -D warnings + + - if: "failure()" + uses: "andymckay/cancel-action@a955d435292c0d409d104b57d8e78435a93a6ef1" # v0.5 + + wasm_clippy: + name: Cargo clippy (WASM) + runs-on: ubuntu-latest + needs: [fmt, machete] + steps: + - name: Checkout sources + uses: actions/checkout@v6 + + - name: Use substrate and polkadot node binaries + uses: ./.github/workflows/actions/use-nodes + + - name: Install Rust stable toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + target: wasm32-unknown-unknown + override: true + components: clippy + + - name: Rust Cache + uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + + - name: Run clippy + uses: actions-rs/cargo@v1 + with: + command: clippy + args: -p subxt --no-default-features --features web,unstable-light-client,jsonrpsee --target wasm32-unknown-unknown -- -D warnings + + - if: "failure()" + uses: "andymckay/cancel-action@a955d435292c0d409d104b57d8e78435a93a6ef1" # v0.5 + + check: + name: Cargo check + runs-on: parity-large + needs: [fmt, machete] + steps: + - name: Checkout sources + uses: actions/checkout@v6 + + - name: Use substrate and polkadot node binaries + uses: ./.github/workflows/actions/use-nodes + + - name: Install Rust stable toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Rust Cache + uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + + - name: Install cargo-hack + uses: baptiste0928/cargo-install@v3 + with: + crate: cargo-hack + version: 0.5 + + # A basic check over all targets together. This may lead to features being combined etc, + # and doesn't test combinations of different features. + - name: Cargo check all targets. + run: cargo check --all-targets + + # Next, check each subxt feature in isolation. + # - `native` feature must always be enabled + # - `web` feature is always ignored. + - name: Cargo hack; check each subxt feature + run: cargo hack -p subxt --each-feature check --exclude-features web --features native + + # Same with subxt-historic + - name: Cargo hack; check each subxt feature + run: cargo hack -p subxt-historic --each-feature check --exclude-features web --features native + + # And with subxt-rpcs + - name: Cargo hack; check each subxt-rpcs feature + run: cargo hack -p subxt-rpcs --each-feature check --exclude-features web --features native + + # And with subxt-signer (seems to work with a more basic check here; disable web if it becomes an issue). + - name: Cargo hack; check each subxt-signer feature + run: cargo hack -p subxt-signer --each-feature check + + # And for subxt-lightclient. + - name: Cargo check subxt-lightclient + run: cargo hack -p subxt-lightclient --each-feature check --exclude-features web --features native + + # Next, check all other crates. + - name: Cargo hack; check each feature/crate on its own + run: cargo hack --exclude subxt --exclude subxt-historic --exclude subxt-signer --exclude subxt-lightclient --exclude subxt-rpcs --exclude-all-features --each-feature check --workspace + + # Check the full examples, which aren't a part of the workspace so are otherwise ignored. + - name: Cargo check parachain-example + run: cargo check --manifest-path examples/parachain-example/Cargo.toml + - name: Cargo check ffi-example + run: cargo check --manifest-path examples/ffi-example/Cargo.toml + + - if: "failure()" + uses: "andymckay/cancel-action@a955d435292c0d409d104b57d8e78435a93a6ef1" # v0.5 + + ffi_example: + name: Run FFI Example + runs-on: ubuntu-latest + needs: [check] + steps: + - name: Checkout sources + uses: actions/checkout@v6 + + - name: Use substrate and polkadot node binaries + uses: ./.github/workflows/actions/use-nodes + + - name: Install Rust stable toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + target: wasm32-unknown-unknown + override: true + + - name: Rust Cache + uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + + - name: Install + uses: actions/setup-node@v4 + with: + # Node version 20 and higher seem to cause an issue with the JS example so stick to 19 for now. + node-version: 19.x + + - name: Cargo check/run ffi-example + run: | + # Start node on port 8000 + substrate-node --dev --rpc-port 8000 > /dev/null 2>&1 & + + # Build the Rust code (hopefully gives long enough for substrate server to start, too): + cd examples/ffi-example + cargo build + + # Run the python version of the FFI code: + echo "Running Python FFI example..." + python3 src/main.py + echo "Python FFI example completed with exit code $?" + + # Run the node version of the FFI code + echo "Installing Node.js dependencies..." + npm i + echo "Running Node FFI example..." + node src/main.js + echo "Node FFI example completed with exit code $?" + + pkill substrate-node + + wasm_check: + name: Cargo check (WASM) + runs-on: ubuntu-latest + needs: [fmt, machete] + steps: + - name: Checkout sources + uses: actions/checkout@v6 + + - name: Install Rust stable toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + target: wasm32-unknown-unknown + override: true + + - name: Rust Cache + uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + + - name: Cargo check web features which require wasm32 target. + run: | + cargo check -p subxt-rpcs --target wasm32-unknown-unknown --no-default-features --features web + cargo check -p subxt-rpcs --target wasm32-unknown-unknown --no-default-features --features web,reconnecting-rpc-client + + # Check WASM examples, which aren't a part of the workspace and so are otherwise missed: + - name: Cargo check WASM examples + run: | + cargo check --manifest-path examples/wasm-example/Cargo.toml --target wasm32-unknown-unknown + + - if: "failure()" + uses: "andymckay/cancel-action@a955d435292c0d409d104b57d8e78435a93a6ef1" # v0.5 + + docs: + name: Check documentation and run doc tests + runs-on: parity-large + needs: [fmt, machete] + timeout-minutes: 30 + steps: + - name: Checkout sources + uses: actions/checkout@v6 + + - name: Use substrate and polkadot node binaries + uses: ./.github/workflows/actions/use-nodes + + - name: Install Rust stable toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Rust Cache + uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + + - name: Check internal documentation links + run: RUSTDOCFLAGS="--deny rustdoc::broken_intra_doc_links" cargo doc --workspace --no-deps --document-private-items + + - name: Run cargo test on documentation + uses: actions-rs/cargo@v1.0.3 + with: + command: test + args: --doc --features reconnecting-rpc-client + + - if: "failure()" + uses: "andymckay/cancel-action@a955d435292c0d409d104b57d8e78435a93a6ef1" # v0.5 + + tests: + name: "Test (Native)" + runs-on: parity-large + needs: [clippy, wasm_clippy, check, wasm_check, docs] + timeout-minutes: 45 + steps: + - name: Checkout sources + uses: actions/checkout@v6 + + - name: Use substrate and polkadot node binaries + uses: ./.github/workflows/actions/use-nodes + + - name: Install Rust stable toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Rust Cache + uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + + - name: Install cargo-nextest + run: cargo install cargo-nextest + + - name: Run subxt-signer no-std tests + uses: actions-rs/cargo@v1.0.3 + with: + command: test + working-directory: signer/tests/no-std + + - name: Run tests + uses: actions-rs/cargo@v1.0.3 + with: + command: nextest + args: run --workspace --features reconnecting-rpc-client + + - if: "failure()" + uses: "andymckay/cancel-action@a955d435292c0d409d104b57d8e78435a93a6ef1" # v0.5 + + unstable_backend_tests: + name: "Test chainhead backend" + runs-on: parity-large + needs: [clippy, wasm_clippy, check, wasm_check, docs] + timeout-minutes: 30 + steps: + - name: Checkout sources + uses: actions/checkout@v6 + + - name: Use substrate and polkadot node binaries + uses: ./.github/workflows/actions/use-nodes + + - name: Install Rust stable toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Rust Cache + uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + + - name: Install cargo-nextest + run: cargo install cargo-nextest + + - name: Run tests + uses: actions-rs/cargo@v1.0.3 + with: + command: nextest + args: run --workspace --features chainhead-backend + + - if: "failure()" + uses: "andymckay/cancel-action@a955d435292c0d409d104b57d8e78435a93a6ef1" # v0.5 + + light_client_tests: + name: "Test (Light Client)" + runs-on: ubuntu-latest + needs: [clippy, wasm_clippy, check, wasm_check, docs] + timeout-minutes: 15 + steps: + - name: Checkout sources + uses: actions/checkout@v6 + + - name: Use substrate and polkadot node binaries + uses: ./.github/workflows/actions/use-nodes + + - name: Install Rust stable toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Rust Cache + uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + + - name: Run tests + uses: actions-rs/cargo@v1.0.3 + with: + command: test + args: --release --package integration-tests --features unstable-light-client + + - if: "failure()" + uses: "andymckay/cancel-action@a955d435292c0d409d104b57d8e78435a93a6ef1" # v0.5 + + wasm_tests: + name: Test (WASM) + runs-on: ubuntu-latest + needs: [clippy, wasm_clippy, check, wasm_check, docs] + timeout-minutes: 30 + env: + # Set timeout for wasm tests to be much bigger than the default 20 secs. + WASM_BINDGEN_TEST_TIMEOUT: 300 + + steps: + - uses: actions/checkout@v6 + + - name: Install wasm-pack + run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh + + - name: Install firefox + uses: browser-actions/setup-firefox@latest + + - name: Install chrome + uses: browser-actions/setup-chrome@latest + + - name: Use substrate and polkadot node binaries + uses: ./.github/workflows/actions/use-nodes + + - name: Rust Cache + uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + + - name: Run subxt WASM tests + run: | + # `listen-addr` is used to configure p2p to accept websocket connections instead of TCP. + # `node-key` provides a deterministic p2p address. + substrate-node --dev --node-key 0000000000000000000000000000000000000000000000000000000000000001 --listen-addr /ip4/0.0.0.0/tcp/30333/ws > /dev/null 2>&1 & + wasm-pack test --headless --firefox + wasm-pack test --headless --chrome + pkill substrate-node + working-directory: testing/wasm-rpc-tests + + - name: Run subxt-lightclient WASM tests + run: | + # `listen-addr` is used to configure p2p to accept websocket connections instead of TCP. + # `node-key` provides a deterministic p2p address. + substrate-node --dev --node-key 0000000000000000000000000000000000000000000000000000000000000001 --listen-addr /ip4/0.0.0.0/tcp/30333/ws > /dev/null 2>&1 & + wasm-pack test --headless --firefox + wasm-pack test --headless --chrome + pkill substrate-node + working-directory: testing/wasm-lightclient-tests + + - name: Run subxt-signer WASM tests + run: | + wasm-pack test --headless --firefox + wasm-pack test --headless --chrome + working-directory: signer/tests/wasm + + - if: "failure()" + uses: "andymckay/cancel-action@a955d435292c0d409d104b57d8e78435a93a6ef1" # v0.5 + + no-std-tests: + name: "Test (no_std)" + runs-on: ubuntu-latest + needs: [machete, docs] + timeout-minutes: 30 + steps: + - name: Checkout sources + uses: actions/checkout@v6 + + # Note: needs nighly toolchain because otherwise we cannot define custom lang-items. + - name: Install Rust nightly toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: nightly + override: true + target: thumbv7em-none-eabi + + - name: Install the gcc-arm-none-eabi linker + run: sudo apt install gcc-arm-none-eabi + + - name: Rust Cache + uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + + # Note: We currently do not have a way to run real tests in a `no_std` environment. + # We can only make sure that they compile to ARM thumb ISA. + # Running the binary and inspecting the output would require an actual machine with matching ISA or some sort of emulator. + - name: Compile `no-std-tests` crate to `thumbv7em-none-eabi` target. + run: cargo build --target thumbv7em-none-eabi + working-directory: testing/no-std-tests + + - if: "failure()" + uses: "andymckay/cancel-action@a955d435292c0d409d104b57d8e78435a93a6ef1" # v0.5 diff --git a/vendor/pezkuwi-subxt/.github/workflows/update-artifacts.yml b/vendor/pezkuwi-subxt/.github/workflows/update-artifacts.yml new file mode 100644 index 00000000..078db813 --- /dev/null +++ b/vendor/pezkuwi-subxt/.github/workflows/update-artifacts.yml @@ -0,0 +1,62 @@ +name: Update Artifacts + +on: + workflow_dispatch: # Allows manual triggering + schedule: + - cron: "0 0 * * 1" # weekly on Monday at 00:00 UTC + +concurrency: + group: ${{ github.ref }}-${{ github.workflow }} + cancel-in-progress: true + +env: + CARGO_TERM_COLOR: always + +jobs: + check: + name: Renew Artifacts + runs-on: ubuntu-latest + steps: + - name: Checkout sources + uses: actions/checkout@v6 + # We run this (up-to-date) node locally to fetch metadata from it for the artifacts + - name: Use substrate and polkadot node binaries + uses: ./.github/workflows/actions/use-nodes + + - name: Install Rust stable toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Rust Cache + uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + + # This starts a substrate node and runs a few subxt cli child processes to fetch metadata from it and generate code. + # In particular it generates: + # - 4 metadata (*.scale) files in the `artifacts` directory + # - a polkadot.rs file from the full metadata that is checked in integration tests + # - a polkadot.json in the `artifacts/demo_chain_specs` directory + - name: Fetch Artifacts + run: cargo run --bin artifacts + + - uses: actions/create-github-app-token@v2 + id: app-token + with: + app-id: ${{ secrets.SUBXT_PR_MAKER_APP_ID }} + private-key: ${{ secrets.SUBXT_PR_MAKER_APP_KEY }} + + - name: Create Pull Request + uses: peter-evans/create-pull-request@v7 + with: + token: ${{ steps.app-token.outputs.token }} + base: master + branch: update-artifacts + commit-message: Update Artifacts (auto-generated) + branch-suffix: timestamp + title: Update Artifacts (auto-generated) + body: | + This PR updates the artifacts by fetching fresh metadata from a substrate node. + It also recreates the polkadot.rs file used in the integration tests. + It was created automatically by a Weekly GitHub Action Cronjob. diff --git a/vendor/pezkuwi-subxt/.github/workflows/validate-dependabot.yml b/vendor/pezkuwi-subxt/.github/workflows/validate-dependabot.yml new file mode 100644 index 00000000..67d5de52 --- /dev/null +++ b/vendor/pezkuwi-subxt/.github/workflows/validate-dependabot.yml @@ -0,0 +1,14 @@ +name: Dependabot + +on: + pull_request: + paths: + - '.github/dependabot.yml' + - '.github/workflows/validate-dependabot.yml' +jobs: + validate: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - uses: marocchino/validate-dependabot@v3 + id: validate diff --git a/vendor/pezkuwi-subxt/.gitignore b/vendor/pezkuwi-subxt/.gitignore new file mode 100644 index 00000000..0dde3651 --- /dev/null +++ b/vendor/pezkuwi-subxt/.gitignore @@ -0,0 +1,9 @@ +/target +**/*.rs.bk +**/.DS_Store +cargo-timing* +/examples/wasm-example/dist +/examples/wasm-example/target +/examples/parachain-example/target +/examples/parachain-example/metadata/target +.vscode diff --git a/vendor/pezkuwi-subxt/CHANGELOG.md b/vendor/pezkuwi-subxt/CHANGELOG.md new file mode 100644 index 00000000..708b4a18 --- /dev/null +++ b/vendor/pezkuwi-subxt/CHANGELOG.md @@ -0,0 +1,2401 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +### Added + +- Add `system_chain_type()` RPC method to `LegacyRpcMethods` + +## [0.44.0] - 2025-08-28 + +This small release primarily fixes a few issues, but also adds the code for a prelease of `subxt-historic`, a new crate (at the moment) for working with historic blocks and state. Future releases will aim to stabilize this crate to the level of other `subxt` crates or otherwise merge the logic into `subxt` itself. + +This is a minor version bump because, in theory at least, adding the `Clone` bound to block headers in ([#2047](https://github.com/pezkuwichain/subxt/pull/2047)) is a breaking change, although I think it is unlikely that this will impact any users. + +### Added + +- Add prerelease `subxt-historic` crate for accessing historic (non head-of-chain) blocks ([#2040](https://github.com/pezkuwichain/subxt/pull/2040)) + +### Changed + +- Block: Implement clone ([#2047](https://github.com/pezkuwichain/subxt/pull/2047)) +- Increase number of dev accounts ([#2068](https://github.com/pezkuwichain/subxt/pull/2068)) + +### Fixed + +- Do not panic when encoding call data with invalid fields ([#2070](https://github.com/pezkuwichain/subxt/pull/2070)) +- Tweak test to reduce chance of failure, and need jsonrpsee/server for tests ([#2057](https://github.com/pezkuwichain/subxt/pull/2057)) +- Fix 1.89 clippy warnings ([#2055](https://github.com/pezkuwichain/subxt/pull/2055)) +- Increase reconnecting client request/response size ([#2046](https://github.com/pezkuwichain/subxt/pull/2046)) + +## [0.43.0] - 2025-07-17 + +This is a reasonably small release which is mainly bug fixing, but has a couple of changes I'd like to elaborate on: + +### Remove `codec::Encode` and `codec::Decode` derives from generated APIs by default ([#2008](https://github.com/pezkuwichain/subxt/pull/2008)) + +When generating an API using the `#[subxt::subxt(...)]` macro (or programatically via `subxt-codegen`), we had always previously added `parity_scale_codec::Encode` and `parity_scale_codec::Decode` derives to all of the generated types. Most places in Subxt have not made use of these for a long time (relying instead on `scale_encode::EncodeAsType` and `scale_decode::DecodeAsType`, since they allow encoding and encoding which takes the type information into account and can more gracefully handle incompatibilities). + +We eventually [hit an issue](https://github.com/pezkuwichain/subxt/issues/2006) to which the most appropriate fix was just to remove these derives. + +If you still need the `parity_scale_codec::Encode` or `parity_scale_codec::Decode` derives on certain types, you have two options: + +1. Use the [`derive_for_type`](https://docs.rs/subxt/latest/subxt/attr.subxt.html#derive_for_typepath---derive--) attr to add them back where needed, eg: + ```rust + #[subxt::subxt( + ... + derive_for_type( + path = "staging_xcm::v3::multilocation::MultiLocation", + derive = "parity_scale_codec::Encode, parity_scale_codec::Decode", + recursive + ) + )] + ``` +2. Use the [`derive_for_all_types`](https://docs.rs/subxt/latest/subxt/attr.subxt.html#derive_for_all_types--) attr to add them back everywhere, eg: + ``` + #[subxt::subxt( + ... + derive_for_all_types = "parity_scale_codec::Encode, parity_scale_codec::Decode" + )] + ``` + +Prefer (1) where possible to reduce the amount of generated code, and reduce the likelihood of running into [issues](https://github.com/pezkuwichain/subxt/issues/2006) around those derives in certain edge cases. + +This PR changes some things around storage keys to remove one last requirement for `Encode` and `Decode` derives, and also as a side effect changes `api.storage().call_raw()` slightly to no longer also try to decode the resulting type via `Decode`, leaving this to the user (and also meaning it's much easier now for the user to obtain the raw bytes for some storage entry). + +In other words, instead of doing something like: + +```rust +let (compact_len, metadata) = rt + .call_raw::<(Compact, frame_metadata::RuntimeMetadataPrefixed)>( + "Metadata_metadata", + None, + ) + .await?; +``` + +You would now do: + +```rust +let meta_bytes = rt.call_raw("Metadata_metadata", None).await?; +let (compact_len, metadata): (Compact, frame_metadata::RuntimeMetadataPrefixed) = + Decode::decode(&mut &*meta_bytes)?; +``` + +### Address some issues around tx mortality ([#2025](https://github.com/pezkuwichain/subxt/pull/2025)) + +Prior to this change, the intended behavior was that any transaction submitted via an `OnlineClient` would have a mortality of 32 blocks by default, and any transaction submitted via an `OfflineClient` would be immortal by default. A couple of issues were present or cropped up however: +- If you explicitly configure the mortality via setting params like `PezkuwiExtrinsicParamsBuilder::new().mortal(32).build()`, the `OfflineClient` transaction would _still_ be immortal, because it didn't have enough information to properly configure the mortality as asked for (by virtue of being offline and unable to fetch it). +- The intended behaviour turned out to have been broken, and transactions were being submitted as immortal even via the `OnlineClient` by default, unless mortality was explicitly configured. +- There was no easy way to actually set the mortality for an `OfflineClient` transaction; you'd have to do something like this: + ```rust + let params = DefaultExtrinsicParamsBuilder::new(); + params.5 = CheckMortalityParams::mortal_from_unchecked(for_n_blocks, from_block_n, from_block_hash); + ``` + +With this PR, transactions _are_ now mortal by default using the `OnlineClient`, we now return an error if you try to construct a transaction with the `OfflineClient` and try to use `params.mortal(..)` when configuring it, and we expose `params.mortal_from_unchecked(..)` to allow configuration for offline transactions without the ugly code above. + +In this PR, we also discovered an issue decoding `Eras` and fixed this, so that decoding the mortality of a transaction when it is mortal should now work. + +### Add FFI example ([#2037](https://github.com/pezkuwichain/subxt/pull/2037)) + +I'd like to do a quick shoutout to @wassimans, who submitted an excellent example for how to interact with Subxt via the C FFI in Python and Node.JS. This is something I've wanted to add for a while, so it's lovely to see this new example which highlights one of the strengths of Subxt over Javascript based compatitors in the space. + +All of the non-trivial changes in this release are listed below: + +### Added + +- Add FFI example ([#2037](https://github.com/pezkuwichain/subxt/pull/2037)) + +### Changed + +- Remove `codec::Encode` and `codec::Decode` derives from generated APIs by default ([#2008](https://github.com/pezkuwichain/subxt/pull/2008)) +- Address some issues around tx mortality ([#2025](https://github.com/pezkuwichain/subxt/pull/2025)) + +### Fixed + +- Fix 'subxt explore storage': don't turn keys to bytes ([#2038](https://github.com/pezkuwichain/subxt/pull/2038)) +- Refactor: improve nonce and block injection in extrinsic params ([#2032](https://github.com/pezkuwichain/subxt/pull/2032)) +- Improve docs for `at_latest` ([#2035](https://github.com/pezkuwichain/subxt/pull/2035)) +- Clippy fixes for latest Rustc ([#2033](https://github.com/pezkuwichain/subxt/pull/2033)) +- docs: fix minor comment typos ([#2027](https://github.com/pezkuwichain/subxt/pull/2027)) +- chore: remove redundant backtick in comment ([#2020](https://github.com/pezkuwichain/subxt/pull/2020)) +- Keep codec attrs even when Encode/Decode not used ([#2023](https://github.com/pezkuwichain/subxt/pull/2023)) +- Run CI on v0.N.x branches or PRs to them for ease of backporting ([#2017](https://github.com/pezkuwichain/subxt/pull/2017)) +- De-dup types early in CLI/macro so that derives/substitutes work for de-duped types ([#2015](https://github.com/pezkuwichain/subxt/pull/2015)) +- If only one hasher, always treat any key as a single and not NMap key, even if it's a tuple. ([#2010](https://github.com/pezkuwichain/subxt/pull/2010)) + +## [0.42.1] - 2025-05-12 + +This patch release reduces the rust-version to 1.85.0, given that we don't use any features newer than this at the moment. + +## [0.42.0] - 2025-05-09 + +The primary benefit of this release is introducing support for the [_about-to-be-stabilised-in-pezkuwi-sdk_](https://github.com/pezkuwichain/pezkuwi-sdk/pull/8443) V16 metadata, and with that, support for calling Pallet View Functions on runtimes which will support this. Pallet View Functions are used much like Runtime APIs, except that they are declared in specific pallets and not declared at the runtime-wide level, allowing pallets to carry their own APIs with them. + +### Pallet View Functions + +Calling a Pallet View Function in this Subxt release will look like: + +```rust +use runtime::proxy::view_functions::check_permissions::{Call, ProxyType}; + +// Construct the call, providing the two arguments. +let view_function_call = runtime::view_functions() + .proxy() + .check_permissions( + Call::System(runtime::system::Call::remark { remark: b"hi".to_vec() }), + ProxyType::Any + ); + +// Submit the call and get back a result. +let _is_call_allowed = api + .view_functions() + .at_latest() + .await? + .call(view_function_call) + .await?; +``` + +Like Runtime APIs and others, the dynamic API can also be used to call into Pallet View Functions, which has the advantage of not needing the statically generated interface, but the downside of not being strongly typed. This looks like the following: + +```rust +use scale_value::value; + +let metadata = api.metadata(); + +// Look up the query ID for the View Function in the node metadata: +let query_id = metadata + .pallet_by_name("Proxy") + .unwrap() + .view_function_by_name("check_permissions") + .unwrap() + .query_id(); + +// Construct the call, providing the two arguments. +let view_function_call = subxt::dynamic::view_function_call( + *query_id, + vec![ + value!(System(remark(b"hi".to_vec()))), + value!(Any()) + ], +); + +// Submit the call and get back a result. +let _is_call_allowed = api + .view_functions() + .at_latest() + .await? + .call(view_function_call) + .await?; +``` + +### Updated `Config` trait + +Another change to be aware of is that [our `Config` trait has been tweaked](https://github.com/pezkuwichain/subxt/pull/1974). The `Hash` associated type is no longer needed, as it can be obtained via the `Hasher` associated type already, and `PezkuwiConfig`/`BizinikiwConfig` now set the hasher by default to be `DynamicHasher256`, which will (when V16 metadata is available for a runtime) automatically select between Keccak256 and BlakeTwo256 hashers depending on what the chain requires. + +### Other changes + +We also [solidify our support for V1 archive RPCs](https://github.com/pezkuwichain/subxt/pull/1977), [upgrade the codebase to Rust 2024 edition](https://github.com/pezkuwichain/subxt/pull/2001), and a bunch of other changes, the full list of which is here: + +### Added + +- Support v16 metadata and use it by default if it's available ([#1999](https://github.com/pezkuwichain/subxt/pull/1999)) +- Metadata V16: Implement support for Pallet View Functions ([#1981](https://github.com/pezkuwichain/subxt/pull/1981)) +- Metadata V16: Be more dynamic over which hasher is used. ([#1974](https://github.com/pezkuwichain/subxt/pull/1974)) + +### Changed + +- Update to 2024 edition ([#2001](https://github.com/pezkuwichain/subxt/pull/2001)) +- Update Smoldot to latest version ([#1991](https://github.com/pezkuwichain/subxt/pull/1991)) +- Update native test timeout to 45 mins ([#2002](https://github.com/pezkuwichain/subxt/pull/2002)) +- chore(deps): tokio ^1.44.2 ([#1989](https://github.com/pezkuwichain/subxt/pull/1989)) +- Add DefaultParams to allow more transaction extensions to be used when calling _default() methods ([#1979](https://github.com/pezkuwichain/subxt/pull/1979)) +- Use wat instead of wabt to avoid CI cmake error (and use supported dep) ([#1980](https://github.com/pezkuwichain/subxt/pull/1980)) +- Support v1 archive RPCs ([#1977](https://github.com/pezkuwichain/subxt/pull/1977)) +- Support V16 metadata and refactor metadata code ([#1967](https://github.com/pezkuwichain/subxt/pull/1967)) +- Allow submitting transactions ignoring follow events ([#1962](https://github.com/pezkuwichain/subxt/pull/1962)) +- Improve error message regarding failure to extract metadata from WASM runtime ([#1961](https://github.com/pezkuwichain/subxt/pull/1961)) +- Add docs for subxt-rpcs and fix example ([#1954](https://github.com/pezkuwichain/subxt/pull/1954)) + +### Fixed + +- Fix CLI storage diff ([#1958](https://github.com/pezkuwichain/subxt/pull/1958)) +- chore: fix some typos ([#1997](https://github.com/pezkuwichain/subxt/pull/1997)) + +## [0.41.0] - 2025-03-10 + +This release makes two main changes: + +### Add `subxt-rpcs` crate. + +Previously, if you wanted to make raw RPC calls but weren't otherwise interested in using the higher level Subxt interface, you still needed to include the entire Subxt crate. + +Now, one can depend on `subxt-rpcs` directly. This crate implements the new RPC-V2 `chainHead`/`transaction` endpoints as well as the currently unstable `archive` endpoints. it also implements various legacy endpoints that Subxt uses as a fallback to the modern ones. It also provides several feature gated clients for interacting with them: + +- **jsonrpsee**: A `jsonrpsee` based RPC client for connecting to individual RPC nodes. +- **unstable-light-client**: A Smoldot based light client which connects to multiple nodes in chains via p2p and verifies everything handed back, removing the need to trust any individual nodes. +- **reconnecting-rpc-client**: Another `jsonrpsee` based client which handles reconnecting automatically in the event of network issues. +- **mock-rpc-client**: A mock RPC client that can be used in tests. + +Custom clients can be implemented if preferred. + +Example usage via `jsonrpsee` feature: + +```rust +use subxt_rpcs::{RpcClient, ChainHeadRpcMethods}; + +// Connect to a local node: +let client = RpcClient::from_url("ws://127.0.0.1:9944").await?; +// Use chainHead/archive V2 methods: +let methods = ChainHeadRpcMethods::new(client); + +// Call some RPC methods (in this case a subscription): +let mut follow_subscription = methods.chainhead_v1_follow(false).await.unwrap(); +while let Some(follow_event) = follow_subscription.next().await { + // do something with events.. +} +``` + +### Support creating V5 transactions. + +Subxt has supported decoding V5 transactions from blocks since 0.38.0, but now it also supports constructing V5 transactions where allowed. Some naming changes have also taken place to align with the Bizinikiwi terminology now around transactions (see [#1931](https://github.com/pezkuwichain/subxt/pull/1931) for more!). + +The main changes here are: + +- `subxt_core` now contains versioned methods for creating each of the possible types of transaction (V4 unsigned, V4 signed, V5 "bare" or V5 "general"), enabling the APIs to be tailored for each case. +- `subxt` exposes higher level wrappers these (ie `api.tx().create_v4_unsigned(..)`, `api.tx().create_v5_bare(..)`), but also continues to expose the same standard APIs for creating transactions which will, under the hood, decide what to create based on the chain we're connected to. +- APIs like `sign_and_submit` now take a `T::AccountId` rather than a `T::Address` since it was found to not be useful to provide the latter, and V5 transactions only expect an `T::AccountId`. +- Signed Extensions are now referred to as _Transaction Extensions_, and we've tweaked the interface around how these work slightly to accomodate the fact that in V5 transactions, the signature is passed into a transaction extension where applicable (`VerifySignature`). +- As a side effect, it's simpler to set mortality on transactions; no more block hash needs to be provided; only the number of blocks you would like a transaction to live for. + +A full list of the relevant changes is as follows: + +### Added + +- Support constructing and submitting V5 transactions ([#1931](https://github.com/pezkuwichain/subxt/pull/1931)) +- Add archive RPCs to subxt-rpcs ([#1940](https://github.com/pezkuwichain/subxt/pull/1940)) +- Document generating interface from Runtime WASM and change feature to `runtime-wasm-path` ([#1936](https://github.com/pezkuwichain/subxt/pull/1936)) +- Split RPCs into a separate crate ([#1910](https://github.com/pezkuwichain/subxt/pull/1910)) + +### Changed + +- Wrap the subxt::events::Events type to avoid exposing subxt_core errors and types unnecessarily ([#1948](https://github.com/pezkuwichain/subxt/pull/1948)) +- Allow transaction timeout in ChainheadBackend to be configured ([#1943](https://github.com/pezkuwichain/subxt/pull/1943)) +- refactor: make ExtrinsicEvents::new public for external access ([#1933](https://github.com/pezkuwichain/subxt/pull/1933)) + +## [0.40.0] - 2025-03-06 + +This release reverts the usage of the `pezkuwi-sdk` umbrella crate, which was causing issues such as an increased number of dependencies in Cargo.lock. For more details, see [#1925](https://github.com/pezkuwichain/subxt/issues/1925). + +Additionally, this update bumps the Pezkuwi SDK-related dependencies to their latest versions, ensuring compatibility and stability. + +### Fixed + +- Remove usage of pezkuwi-sdk umbrella crate ([#1926](https://github.com/pezkuwichain/subxt/pull/1926)) + +**Full Changelog**: https://github.com/pezkuwichain/subxt/compare/v0.39.0...v0.40.0 + +## [0.39.0] - 2025-02-04 + +This release is mostly bug fixes and changes. The only change that should be a breaking change is removing the `bizinikiwi-compat` feature flag (see [#1850](https://github.com/pezkuwichain/subxt/pull/1850)), which we'll go into more detail about. + +### The `bizinikiwi-compat` feature flag has been removed. + +The `bizinikiwi-compat` feature flag essentially provided: +1. An implementation of the `subxt::config::Header` trait for anything implementing `sp_runtime::traits::Header` ([here](https://github.com/pezkuwichain/subxt/pull/1850/files#diff-26ab583bc154fdb10c63d7cc90045a6026ad6497efe790fe257b60ceb1a15ea7L137)). +2. Same for `subxt::config::Hasher` and anything implementing `sp_runtime::traits::Hasher` ([here](https://github.com/pezkuwichain/subxt/pull/1850/files#diff-26ab583bc154fdb10c63d7cc90045a6026ad6497efe790fe257b60ceb1a15ea7L149)). +3. A `subxt_core::tx::PairSigner` type which could be given something implementing `sp_core::Pair` and then be used to sign transactions ([here](https://github.com/pezkuwichain/subxt/pull/1850/files#diff-fe5469ea5a4788ffac7607c8d25f9d17c232c703f2d38ffe593cb6e87662a0afL46)). +4. From impls for `sp_runtime::AccountId32` and related for `subxt::utils::AccountId32` ([here](https://github.com/pezkuwichain/subxt/pull/1850/files#diff-61f12204f1b6828f829ea82da72826674e8f6c35943795258860b25ce59fc692L169)). +5. Likewise for `sp_runtime::MultiAddress` and `subxt::utils::MultiAddress` ([here](https://github.com/pezkuwichain/subxt/pull/1850/files#diff-956118f361c3e5fbdd6974d6f23f40fd0050714cd6bfdfe0f6624d883a2d0c7cL53)). +6. Likewise for `sp_runtime::MultiSignature` and `subxt::utils::MultiSignature` ([here](https://github.com/pezkuwichain/subxt/pull/1850/files#diff-590233f1bae2f8031dfb010e9c35ba04bb700539d8b067daa7477a0a3f14e38dL29)). + +While useful, providing these features in Subxt is almost impossible to maintain: we can only support a single version of `sp_runtime`/`sp_core` at a time, but many versions are in use in the wild. This led to various issues regarding the mismatch between `sp_*` crates in use and a given version of Subxt. More generally, the goal of Subxt is to be independent from any specific version of Bizinikiwi, and communicate via the exposed RPC APIs in order to work across any compatible Bizinikiwi version (or indeed, alternative implementations that follow things like [the RPC spec](https://github.com/pezkuwichain/json-rpc-interface-spec)). + +As a result, we've taken the decision to remove this compatibility layer from Subxt itself. To migrate away from this feature, we suggest: +1. Using the example [here](https://github.com/pezkuwichain/subxt/blob/d924ece39a5cb369ba5ccde3dc160b5ee006271b/subxt/examples/bizinikiwi_compat_signer.rs) to see how to use a Bizinikiwi signer to sign Subxt transactions. +2. Looking at `subxt_signer` instead, if it's a viable alternative in your case. +3. Following the "here" links above to see what impls were removed. Impls can generally be recreated as needed using wrapper types which allow converting between Bizinikiwi and Subxt types/traits, for instance: + +```rust +// Wrap a bizinikiwi header type in this to impl the subxt Header trait: +struct SubxtHeader(pub T); + +// This basically copies the code removed from Subxt, but on a wrapper type: +impl subxt::config::Header for SubxtHeader +where + T: sp_runtime::traits::Header, + ::Number: Into, +{ + type Number = T::Number; + type Hasher = T::Hashing; + + fn number(&self) -> Self::Number { + *self.0.number() + } +} +``` + +The hope is that this pattern is applicable to any such types that you find useful to share between Bizinikiwi and Subxt code. Please raise an issue if you can't find a solution in your case, and we'll endeavour to help! + +The result of this is that your code will work against whichever Bizinikiwi crate versions you are using, at the cost of this code no longer being included behind the `bizinikiwi-compat` feature flag. + +A full list of relevant changes and fixes (nothing was added in this release) is as follows: + +### Changed + +- remove bizinikiwi compat ([#1850](https://github.com/pezkuwichain/subxt/pull/1850)) +- migrate custom error trait impls to `thiserror` ([#1856](https://github.com/pezkuwichain/subxt/pull/1856)) +- re-export `jsonrpsee` in `subxt::ext` ([#1843](https://github.com/pezkuwichain/subxt/pull/1843)) + +### Fixed + +- don't double hash: use the same hash in ExtrinsicDetails and ExtrinsicDetails ([#1917](https://github.com/pezkuwichain/subxt/pull/1917)) +- fix and test sr25519 signing in nostd ([#1872](https://github.com/pezkuwichain/subxt/pull/1872)) +- preserve custom metadata when converting between Subxt metadata and frame_metadata ([#1914](https://github.com/pezkuwichain/subxt/pull/1914)) +- fix: don't wrap rpc error in DisconnectedWillReconnect in reconnecting rpc client ([#1904](https://github.com/pezkuwichain/subxt/pull/1904)) +- fix: bizinikiwi runner, support new libp2p addr log ([#1892](https://github.com/pezkuwichain/subxt/pull/1892)) +- update Artifacts (auto-generated) ([#1874](https://github.com/pezkuwichain/subxt/pull/1874)) +- bump frame-decode and frame-metadata to latest ([#1870](https://github.com/pezkuwichain/subxt/pull/1870)) +- fix unstable-light-client + ChainHeadBackend tx events ([#1865](https://github.com/pezkuwichain/subxt/pull/1865)) +- when native feature is enabled, we need pezkuwi-sdk/std for eg examples to work ([#1864](https://github.com/pezkuwichain/subxt/pull/1864)) +- load latest metadata version from Wasm blobs. ([#1859](https://github.com/pezkuwichain/subxt/pull/1859)) +- minor fix - Yew example ([#1852](https://github.com/pezkuwichain/subxt/pull/1852)) +- update the release notes to work for current releases ([#1842](https://github.com/pezkuwichain/subxt/pull/1842)) + +## [0.38.0] - 2024-10-24 + +This release doesn't introduce any substantial breaking changes and focuses primarily on incremental improvements, testing and bug fixes. A few of the highlights include: + +- [#1785](https://github.com/pezkuwichain/subxt/pull/1785): Support decoding V5 extrinsics in blocks (currently Subxt will still submit V4 extrinsics). This also unifies our extrinsic decoding logic into one place. +- [#1802](https://github.com/pezkuwichain/subxt/pull/1802): Stabilizing the `subxt::backend::unstable::UnstableBackend` (it's now called `subxt::backend::chain_head::ChainHeadBackend`). This backend can be used to interact with the modern `chainHead` RPC methods exposed by Smoldot and compliant RPC nodes. See [this example](https://github.com/pezkuwichain/subxt/blob/master/subxt/examples/setup_rpc_chainhead_backend.rs). +- [#1803](https://github.com/pezkuwichain/subxt/pull/1803): Stabilizing the `reconnecting-rpc-client`. See [this example](https://github.com/pezkuwichain/subxt/blob/master/subxt/examples/setup_reconnecting_rpc_client.rs). +- [#1720](https://github.com/pezkuwichain/subxt/pull/1720): A nice little QoL improvement if you have the raw runtime WASM and would like to generate an interface directly from that (ie with `#[subx(runtime_path = "path/to/runtime.wasm")]`). +- [#1661](https://github.com/pezkuwichain/subxt/pull/1661): Support loading keys directly from the PezkuwiJS JSON to be used in Subxt. +- [#1638](https://github.com/pezkuwichain/subxt/pull/1638): Improve support for Eth style chains by defining a 20-byte account ID type directly in `subxt-core`. See [this example](https://github.com/pezkuwichain/subxt/blob/master/subxt/examples/tx_basic_frontier.rs). + +The notable changes in this release are as follows: + +### Added +- add reconnecting tests for unstable_backend ([#1765](https://github.com/pezkuwichain/subxt/pull/1765)) +- add support for generating metadata from runtime wasm files ([#1720](https://github.com/pezkuwichain/subxt/pull/1720)) +- support loading keys from Pezkuwi-JS accounts ([#1661](https://github.com/pezkuwichain/subxt/pull/1661)) +- allow tx payloads to be boxed ([#1690](https://github.com/pezkuwichain/subxt/pull/1690)) +- add hash method to ExtrinsicDetails ([#1676](https://github.com/pezkuwichain/subxt/pull/1676)) +- expose `secret_key` method for `ecdsa::Keypair` and `eth::Keypair` ([#1628](https://github.com/pezkuwichain/subxt/pull/1628)) +- add 20-byte account id to subxt_core ([#1638](https://github.com/pezkuwichain/subxt/pull/1638)) + +### Changed +- make it clearer which extrinsic failed to decode ([#1835](https://github.com/pezkuwichain/subxt/pull/1835)) +- chore(deps): bump frame-metadata from 16 to 17 ([#1836](https://github.com/pezkuwichain/subxt/pull/1836)) +- chore(deps): bump `scale family crates`, `primitive-types` and `impl-serde` ([#1832](https://github.com/pezkuwichain/subxt/pull/1832)) +- chore(deps): replace `instant` with `web-time` ([#1830](https://github.com/pezkuwichain/subxt/pull/1830)) +- deps: use pezkuwi-sdk umbrella crate ([#1786](https://github.com/pezkuwichain/subxt/pull/1786)) +- stabilize reconnecting-rpc-client ([#1803](https://github.com/pezkuwichain/subxt/pull/1803)) +- stabilize chainhead backend ([#1802](https://github.com/pezkuwichain/subxt/pull/1802)) +- derive serialize on more types ([#1797](https://github.com/pezkuwichain/subxt/pull/1797)) +- use frame-decode for core extrinsic decode logic (including v5 support) ([#1785](https://github.com/pezkuwichain/subxt/pull/1785)) +- reconn-rpc-client: parse URL before connecting ([#1789](https://github.com/pezkuwichain/subxt/pull/1789)) +- update proc_macro_error to proc_macro_error2 ([#1767](https://github.com/pezkuwichain/subxt/pull/1767)) +- chore(deps): update Smoldot to the latest version ([#1400](https://github.com/pezkuwichain/subxt/pull/1400)) +- remove unneeded `?Sized` bound and replace never type with `()` ([#1758](https://github.com/pezkuwichain/subxt/pull/1758)) +- improve test coverage for legacy `Backend` impl ([#1751](https://github.com/pezkuwichain/subxt/pull/1751)) +- add integration tests for `unstable-reconnecting-rpc-client` ([#1711](https://github.com/pezkuwichain/subxt/pull/1711)) +- replace `reconnecting-jsonrpsee-ws-client` with `subxt-reconnecting-rpc-client` ([#1705](https://github.com/pezkuwichain/subxt/pull/1705)) +- allow PartialExtrinsic to be held across await points ([#1658](https://github.com/pezkuwichain/subxt/pull/1658)) +- chore(deps): bump jsonrpsee from 0.22.5 to 0.23.1 ([#1656](https://github.com/pezkuwichain/subxt/pull/1656)) + +### Fixed +- fix stripping metadata in the case where enums like RuntimeCall are handed back ([#1774](https://github.com/pezkuwichain/subxt/pull/1774)) +- fix: `defalt-feature` -> `default-features` Cargo.toml ([#1828](https://github.com/pezkuwichain/subxt/pull/1828)) +- avoid hang by notifying subscribers when the backend is closed ([#1817](https://github.com/pezkuwichain/subxt/pull/1817)) +- fix: error message on rpc errors ([#1804](https://github.com/pezkuwichain/subxt/pull/1804)) +- docs: fix typos ([#1776](https://github.com/pezkuwichain/subxt/pull/1776)) +- examples: fix reconnecting logging target ([#1733](https://github.com/pezkuwichain/subxt/pull/1733)) +- docs: fix spelling issues ([#1699](https://github.com/pezkuwichain/subxt/pull/1699)) +- chore: fix some comments ([#1697](https://github.com/pezkuwichain/subxt/pull/1697)) +- codegen: Fix decode error by adding `#[codec(dumb_trait_bound)]` ([#1630](https://github.com/pezkuwichain/subxt/pull/1630)) + +## [0.37.0] - 2024-05-28 + +This release mainly adds support for the sign extension `CheckMetadataHash` and fixes a regression introduced in v0.36.0 +where the type de-duplication was too aggressive and lots of the same type such as `BoundedVec` was duplicated to +plenty of different types such as BoundedVec1, BoundedVec2, .. BoundedVec. + +### Added +- Implemented `sign_prehashed` for `ecdsa::Keypair` and `eth::Keypair` ([#1598](https://github.com/pezkuwichain/subxt/pull/1598)) +- Add a basic version of the CheckMetadataHash signed extension ([#1590](https://github.com/pezkuwichain/subxt/pull/1590)) + +## Changed +- Remove `derive_more` ([#1600](https://github.com/pezkuwichain/subxt/pull/1600)) +- chore(deps): bump scale-typegen v0.8.0 ([#1615](https://github.com/pezkuwichain/subxt/pull/1615)) + +## [0.36.1] - 2024-05-28 [YANKED] + +Yanked because the typegen changed, it's a breaking change. + +## [0.36.0] - 2024-05-16 + +This release adds a few new features, which I'll go over below in more detail. + +### [`subxt-core`](https://github.com/pezkuwichain/subxt/pull/1508) + +We now have a brand new `subxt-core` crate, which is `#[no-std]` compatible, and contains a lot of the core logic that is needed in Subxt. Using this crate, you can do things in a no-std environment like: + +- `blocks`: decode and explore block bodies. +- `constants`: access and validate the constant addresses in some metadata. +- `custom_values`: access and validate the custom value addresses in some metadata. +- `metadata`: decode bytes into the metadata used throughout this library. +- `storage`: construct storage request payloads and decode the results you'd get back. +- `tx`: construct and sign transactions (extrinsics). +- `runtime_api`: construct runtime API request payloads and decode the results you'd get back. +- `events`: decode and explore events. + +Check out [the docs](https://docs.rs/subxt-core/latest/subxt_core/) for more, including examples of each case. + +A breaking change that comes from migrating a bunch of logic to this new crate is that the `ExtrinsicParams` trait is now handed `&ClientState` rather than a `Client`. `ClientState` is just a concrete struct containing the state that one needs for things like signed extensions. + +### [Support for reconnecting](https://github.com/pezkuwichain/subxt/pull/1505) + +We've baked in a bunch of support for automatically reconnecting after a connection loss into Subxt. This comes in three parts: +1. An RPC client that is capable of reconnecting. This is gated behind the `unstable-reconnecting-rpc-client` feature flag at the moment, and +2. Handling in the subxt Backends such that when the RPC client notifies it that it is reconnecting, the backend will transparently handle this behind the scenes, or else pass on a `DisconnectedWillReconnect` error to the user where it cannot. Note that the individual `LegacyRpcMethods` and `UnstableRpcMethods` are _not_ automatically retried on reconnection. Which leads us to.. +3. A couple of util helpers (`subxt::backend::retry` and `subxt::backend::retry_stream`) which can be used in conjunction with a reconnecting RPC client to make it easy to automatically retry RPC method calls where needed. + +We'd love feedback on this reconnecting work! To try it out, enable the `unstable-reconnecting-rpc-client` feature flag and then you can make use of this like so: + +```rust +use std::time::Duration; +use futures::StreamExt; +use subxt::backend::rpc::reconnecting_rpc_client::{Client, ExponentialBackoff}; +use subxt::{OnlineClient, PezkuwiConfig}; + +// Generate an interface that we can use from the node's metadata. +#[subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a new client with a reconnecting RPC client. + let rpc = Client::builder() + // We can configure the retry policy; here to an exponential backoff. + // This API accepts an iterator of retry delays, and here we use `take` + // to limit the number of retries. + .retry_policy( + ExponentialBackoff::from_millis(100) + .max_delay(Duration::from_secs(10)) + .take(3), + ) + .build("ws://localhost:9944".to_string()) + .await?; + + // Use this reconnecting client when instantiating a Subxt client: + let api: OnlineClient = OnlineClient::from_rpc_client(rpc.clone()).await?; +``` + +Check out the full example [here](https://github.com/pezkuwichain/subxt/blob/64d3aae521112c8bc7366385c54a9340185d81ac/subxt/examples/setup_reconnecting_rpc_client.rs). + +### [Better Ethereum support](https://github.com/pezkuwichain/subxt/pull/1501) + +We've added built-in support for Ethereum style chains (eg Frontier and Moonbeam) in `subxt-signer`, making it easier to sign transactions for these chains now. + +Check out a full example [here](https://github.com/pezkuwichain/subxt/blob/327b70ac94c4d925c8529a1e301d596d7db181ea/subxt/examples/tx_basic_frontier.rs). + +We plan to improve on this in the future, baking in better Ethereum support if possible so that it's as seamless to use `AccountId20` as it is `AccountId32`. + +### Stabilizing the new V2 RPCs ([#1540](https://github.com/pezkuwichain/subxt/pull/1540), [#1539](https://github.com/pezkuwichain/subxt/pull/1539), [#1538](https://github.com/pezkuwichain/subxt/pull/1538)) + +A bunch of the new RPCs are now stable in the spec, and have consequently been stabilized here, bringing the `unstable-backend` a step closer to being stabilized itself! We'll probably first remove the feature flag and next make it the default backend, in upcoming releases. + +All of the notable changes in this release are as follows: + +### Added + +- Add `frontier/ethereum` example ([#1557](https://github.com/pezkuwichain/subxt/pull/1557)) +- Rpc: add full support reconnecting rpc client ([#1505](https://github.com/pezkuwichain/subxt/pull/1505)) +- Signer: ethereum implementation ([#1501](https://github.com/pezkuwichain/subxt/pull/1501)) +- `subxt-core` crate ([#1466](https://github.com/pezkuwichain/subxt/pull/1466)) + +### Changed + +- Bump scale-decode and related deps to latest ([#1583](https://github.com/pezkuwichain/subxt/pull/1583)) +- Update Artifacts (auto-generated) ([#1577](https://github.com/pezkuwichain/subxt/pull/1577)) +- Update deps to use `scale-type-resolver` 0.2 ([#1565](https://github.com/pezkuwichain/subxt/pull/1565)) +- Stabilize transactionBroadcast methods ([#1540](https://github.com/pezkuwichain/subxt/pull/1540)) +- Stabilize transactionWatch methods ([#1539](https://github.com/pezkuwichain/subxt/pull/1539)) +- Stabilize chainHead methods ([#1538](https://github.com/pezkuwichain/subxt/pull/1538)) +- Rename traits to remove T suffix ([#1535](https://github.com/pezkuwichain/subxt/pull/1535)) +- Add Debug/Clone/etc for common Configs for convenience ([#1542](https://github.com/pezkuwichain/subxt/pull/1542)) +- Unstable_rpc: Add transactionBroadcast and transactionStop ([#1497](https://github.com/pezkuwichain/subxt/pull/1497)) + +### Fixed + +- metadata: Fix cargo clippy ([#1574](https://github.com/pezkuwichain/subxt/pull/1574)) +- Fixed import in `subxt-signer::eth` ([#1553](https://github.com/pezkuwichain/subxt/pull/1553)) +- chore: fix typos and link broken ([#1541](https://github.com/pezkuwichain/subxt/pull/1541)) +- Make subxt-core ready for publishing ([#1508](https://github.com/pezkuwichain/subxt/pull/1508)) +- Remove dupe storage item if we get one back, to be compatible with Smoldot + legacy RPCs ([#1534](https://github.com/pezkuwichain/subxt/pull/1534)) +- fix: bizinikiwi runner libp2p port ([#1533](https://github.com/pezkuwichain/subxt/pull/1533)) +- Swap BinaryHeap for Vec to avoid Ord constraint issue ([#1523](https://github.com/pezkuwichain/subxt/pull/1523)) +- storage_type: Strip key proper hash and entry bytes (32 instead of 16) ([#1522](https://github.com/pezkuwichain/subxt/pull/1522)) +- testing: Prepare light client testing with bizinikiwi binary and add subxt-test macro ([#1507](https://github.com/pezkuwichain/subxt/pull/1507)) + +## [0.35.0] - 2024-03-21 + +This release contains several fixes, adds `no_std` support to a couple of crates (`subxt-signer` and `subxt-metadata`) and introduces a few quality of life improvements, which I'll quickly cover: + +### Reworked light client ([#1475](https://github.com/pezkuwichain/subxt/pull/1475)) + +This PR reworks the light client interface. The "basic" usage of connecting to a parachain now looks like this: + +```rust +#[subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +use subxt::lightclient::LightClient; + +// Instantiate a light client with the Pezkuwi relay chain given its chain spec. +let (lightclient, pezkuwi_rpc) = LightClient::relay_chain(POLKADOT_SPEC)?; +// Connect the light client to some parachain by giving a chain spec for it. +let asset_hub_rpc = lightclient.parachain(ASSET_HUB_SPEC)?; + +// Now, we can create Subxt clients from these Smoldot backed RPC clients: +let pezkuwi_api = OnlineClient::::from_rpc_client(pezkuwi_rpc).await?; +let asset_hub_api = OnlineClient::::from_rpc_client(asset_hub_rpc).await?; +``` + +This interface mirrors the requirement that we must connect to a relay chain before we can connect to a parachain. It also moves the light client specific logic into an `RpcClientT` implementation, rather than exposing it as a `subxt::client::LightClient`. + +### Typed Storage Keys ([#1419](https://github.com/pezkuwichain/subxt/pull/1419)) + +This PR changes the storage interface so that, where possible, we now also decode the storage keys as well as the values when iterating over storage entries: + +```rust +#[subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +// Create a new API client, configured to talk to Pezkuwi nodes. +let api = OnlineClient::::new().await?; + +// Build a storage query to iterate over account information. +let storage_query = pezkuwi::storage().system().account_iter(); + +// Get back an iterator of results (here, we are fetching 10 items at +// a time from the node, but we always iterate over one at a time). +let mut results = api.storage().at_latest().await?.iter(storage_query).await?; + +while let Some(Ok(kv)) = results.next().await { + // We used to get a tuple of key bytes + value. Now we get back a + // `kv` struct containing the bytes and value as well as the actual + // decoded keys: + println!("Decoded key(s): {:?}", kv.keys); + println!("Key bytes: 0x{}", hex::encode(&kv.key_bytes)); + println!("Value: {:?}", kv.value); +} +``` + +When using the static interface, keys come back as a tuple of values corresponding to the different hashers used in constructing the key. When using a dynamic interface, keys will be encoded/decoded from the type given so long as it implements `subxt::storage::StorageKey`, eg `Vec`. + +### Extrinsic Params Refinement ([#1439](https://github.com/pezkuwichain/subxt/pull/1439)) + +Prior to this PR, one could configure extrinsic signed extensions by providing some params like so: + +```rust +// Configure the transaction parameters; we give a small tip and set the +// transaction to live for 32 blocks from the `latest_block` above: +let tx_params = Params::new() + .tip(1_000) + .mortal(latest_block.header(), 32) + .build(); + +let hash = api.tx().sign_and_submit(&tx, &from, tx_params).await?; +``` + +If you want to customize the account nonce, you'd use a different call like `create_signed_with_nonce` instead. + +One of the downsides of the above approach is that, if you don't provide any explicit params, transactions will be immortal by default (because the signed extensions didn't have the information to do any better). + +Now, with the help of a `RefineParams` trait, transactions will default to being mortal and living for 32 blocks unless an explicit mortality is provided as above. + +One notable change is that the offline-only `create_signed_with_nonce` and `create_partial_signed_with_nonce` functions have lost the `_with_nonce` suffix. Since we can't discover nonce/mortality settings offline, you should now provide `Params` and set an explicit nonce (and mortality, if you like) when using these calls, otherwise the nonce will be set to 0 and the mortality to `Immortal`. + +For a full list of changes, please see the following: + +### Added + +- Reworked light client ([#1475](https://github.com/pezkuwichain/subxt/pull/1475)) +- `no_std` compatibility for `subxt-signer` ([#1477](https://github.com/pezkuwichain/subxt/pull/1477)) +- Typed Storage Keys ([#1419](https://github.com/pezkuwichain/subxt/pull/1419)) +- Extrinsic Params Refinement ([#1439](https://github.com/pezkuwichain/subxt/pull/1439)) +- Make storage_page_size for the LegacyBackend configurable ([#1458](https://github.com/pezkuwichain/subxt/pull/1458)) +- `no_std` compatibility for `subxt-metadata` ([#1401](https://github.com/pezkuwichain/subxt/pull/1401)) +- Experimental `reconnecting-rpc-client` ([#1396](https://github.com/pezkuwichain/subxt/pull/1396)) + +### Changed + +- `scale-type-resolver` integration ([#1460](https://github.com/pezkuwichain/subxt/pull/1460)) +- subxt: Derive `std::cmp` traits for subxt payloads and addresses ([#1429](https://github.com/pezkuwichain/subxt/pull/1429)) +- CLI: Return error on wrongly specified type paths ([#1397](https://github.com/pezkuwichain/subxt/pull/1397)) +- rpc v2: chainhead support multiple finalized block hashes in `FollowEvent::Initialized` ([#1476](https://github.com/pezkuwichain/subxt/pull/1476)) +- rpc v2: rename transaction to transactionWatch ([#1399](https://github.com/pezkuwichain/subxt/pull/1399)) + +### Fixed + +- Avoid a panic in case we try decoding naff bytes ([#1444](https://github.com/pezkuwichain/subxt/pull/1444)) +- Fix error mapping to wrong transaction status ([#1445](https://github.com/pezkuwichain/subxt/pull/1445)) +- Update DispatchError to match latest in pezkuwi-sdk ([#1442](https://github.com/pezkuwichain/subxt/pull/1442)) +- Handle errors when fetching storage keys from Unstablebackend ([#1440](https://github.com/pezkuwichain/subxt/pull/1440)) +- Swap type aliases around to be semantically correct ([#1441](https://github.com/pezkuwichain/subxt/pull/1441)) + +## [0.34.0] - 2024-01-23 + +This release introduces a bunch of features that make subxt easier to use. Let's look at a few of them. + +### Codegen - Integrating [`scale-typegen`](https://github.com/pezkuwichain/scale-typegen) and adding type aliases ([#1249](https://github.com/pezkuwichain/subxt/pull/1249)) + +We rewrote the code generation functionality of subxt and outsourced it to the new [`scale-typegen`](https://github.com/pezkuwichain/scale-typegen) crate, which serves a more general purpose. + +Since a lot of types used in bizinikiwi are rich with generics, this release introduces type aliases into the generated code. +A type alias is generated for the arguments/keys or each call, storage entry, and runtime API method ([#1249](https://github.com/pezkuwichain/subxt/pull/1249)). + +### Macro - Errors for misspecified type paths ([#1339](https://github.com/pezkuwichain/subxt/pull/1339)) + +The subxt macro provides attributes to specify custom derives, attributes, and type substitutions on a per-type basis. +Previously we did not verify that the provided type paths are part of the metadata. This is now fixed: +If you provide an invalid type path, the macro will tell you so. It also suggests similar type paths, you might have meant instead. + +```rust +#[subxt::subxt( + runtime_metadata_path = "metadata.scale", + derive_for_type(path = "Junctions", derive = "Clone") +)] +pub mod pezkuwi {} +``` + +This gives you a compile-time error like this: + +```md +Type `Junctions` does not exist at path `Junctions` + +A type with the same name is present at: +xcm::v3::junctions::Junctions +xcm::v2::multilocation::Junctions +``` + +### Macro - Recursive derives and attributes ([#1379](https://github.com/pezkuwichain/subxt/pull/1379)) + +Previously adding derives on a type containing other types was also cumbersome, see this example: + +```rust +#[subxt::subxt( + runtime_metadata_path = "metadata.scale", + derive_for_type(path = "xcm::v2::multilocation::MultiLocation", derive = "Clone"), + derive_for_type(path = "xcm::v2::multilocation::Junctions", derive = "Clone"), + derive_for_type(path = "xcm::v2::junction::Junction", derive = "Clone"), + derive_for_type(path = "xcm::v2::NetworkId", derive = "Clone"), + derive_for_type(path = "xcm::v2::BodyId", derive = "Clone"), + derive_for_type(path = "xcm::v2::BodyPart", derive = "Clone"), + derive_for_type( + path = "bounded_collections::weak_bounded_vec::WeakBoundedVec", + derive = "Clone" + ) +)] +pub mod pezkuwi {} +``` + +We introduced a `recursive` flag for custom derives and attributes that automatically inserts the specified derives on all child types: + +```rust +#[subxt::subxt( + runtime_metadata_path = "metadata.scale", + derive_for_type(path = "xcm::v2::multilocation::MultiLocation", derive = "Clone", recursive), +)] +pub mod pezkuwi {} +``` + +### Subxt CLI - New features and usability improvements ([#1290](https://github.com/pezkuwichain/subxt/pull/1290), [#1336](https://github.com/pezkuwichain/subxt/pull/1336), and [#1379](https://github.com/pezkuwichain/subxt/pull/1379)) + +Our CLI tool now allows you to explore runtime APIs and events ([#1290](https://github.com/pezkuwichain/subxt/pull/1290)). We also fully integrated with [`scale-typegen-description`](https://github.com/pezkuwichain/scale-typegen/tree/master/description), a crate that can describe types in a friendly way and provide type examples. The output is also color-coded to be easier on the eyes. Get started with these commands: + +```sh +# Show details about a runtime API call: +subxt explore --url wss://zagros-rpc.pezkuwi.io api StakingAPI nominations_quota +# Execute a runtime API call from the CLI: +subxt explore --url wss://zagros-rpc.pezkuwi.io api core version -e +# Discover what events a pallet can emit: +subxt explore --url wss://zagros-rpc.pezkuwi.io pallet Balances events +``` + +All CLI commands that take some metadata via `--file` or `--url`, can now also read the metadata directly from `stdin` with `--file -` ([#1336](https://github.com/pezkuwichain/subxt/pull/1336)). +This allows you to pipe in metadata from other processes like in this command chain: +```sh +parachain-node export-metadata | subxt codegen --file - | rustfmt > main.rs +``` + +Similar to the macro, the `subxt codegen` command can now also use `recursive` flags: +```sh +subxt codegen --derive-for-type xcm::v2::multilocation::MultiLocation=Clone,recursive +subxt codegen --attributes-for-type "xcm::v2::multilocation::MultiLocation=#[myerror],recursive" +``` + +### Minor changes and things to be aware of + +- Using insecure connections is now an explicit opt-in in many places ([#1309](https://github.com/pezkuwichain/subxt/pull/1309)) +- When decoding extrinsics from a block into a static type, we now return it's details (e.g. signature, signed extensions, raw bytes) alongside the statically decoded extrinsic itself ([#1376](https://github.com/pezkuwichain/subxt/pull/1376)) + +We also made a few fixes and improvements around the unstable backend and the lightclient, preparing them for more stable usage in the future. + +### Added + +- Errors for misspecified type paths + suggestions ([#1339](https://github.com/pezkuwichain/subxt/pull/1339)) +- CLI: Recursive derives and attributes ([#1379](https://github.com/pezkuwichain/subxt/pull/1379)) +- CLI: Explore runtime APIs and events, colorized outputs, scale-typegen integration for examples ([#1290](https://github.com/pezkuwichain/subxt/pull/1290)) +- Add chainflip to real world usage section of README ([#1351](https://github.com/pezkuwichain/subxt/pull/1351)) +- CLI: Allow using `--file -` to read metadata from stdin ([#1336](https://github.com/pezkuwichain/subxt/pull/1336)) +- Codegen: Generate type aliases for better API ergonomics ([#1249](https://github.com/pezkuwichain/subxt/pull/1249)) + +### Changed + +- Return Pending rather than loop around if no new finalized hash in submit_transaction ([#1378](https://github.com/pezkuwichain/subxt/pull/1378)) +- Return `ExtrinsicDetails` alongside decoded static extrinsics ([#1376](https://github.com/pezkuwichain/subxt/pull/1376)) +- Improve Signed Extension and Block Decoding Examples/Book ([#1357](https://github.com/pezkuwichain/subxt/pull/1357)) +- Use `scale-typegen` as a backend for the codegen ([#1260](https://github.com/pezkuwichain/subxt/pull/1260)) +- Using insecure connections is now opt-in ([#1309](https://github.com/pezkuwichain/subxt/pull/1309)) + +### Fixed + +- Ensure lightclient chainSpec is at least one block old ([#1372](https://github.com/pezkuwichain/subxt/pull/1372)) +- Typo fix in docs ([#1370](https://github.com/pezkuwichain/subxt/pull/1370)) +- Don't unpin blocks that may show up again ([#1368](https://github.com/pezkuwichain/subxt/pull/1368)) +- Runtime upgrades in unstable backend ([#1348](https://github.com/pezkuwichain/subxt/pull/1348)) +- Generate docs for feature gated items ([#1332](https://github.com/pezkuwichain/subxt/pull/1332)) +- Backend: Remove only finalized blocks from the event window ([#1356](https://github.com/pezkuwichain/subxt/pull/1356)) +- Runtime updates: wait until upgrade on chain ([#1321](https://github.com/pezkuwichain/subxt/pull/1321)) +- Cache extrinsic events ([#1327](https://github.com/pezkuwichain/subxt/pull/1327)) + +## [0.33.0] - 2023-12-06 + +This release makes a bunch of small QoL improvements and changes. Let's look at the main ones. + +### Add support for configuring multiple chains ([#1238](https://github.com/pezkuwichain/subxt/pull/1238)) + +The light client support previously provided a high level interface for connecting to single chains (ie relay chains). This PR exposes a "low level" interface which allows smoldot (the light client implementation we use) to be configured somewhat more arbitrarily, and then converted into a valid subxt `OnlineClient` to be used. + +See [this example](https://github.com/pezkuwichain/subxt/blob/418c3afc923cacd17501f374fdee0d8f588e14fd/subxt/examples/light_client_parachains.rs) for more on how to do this. + +We'll likely refine this over time and add a slightly higher level interface to make common operations much easier to do. + +### Support decoding signed extensions ([#1209](https://github.com/pezkuwichain/subxt/pull/1209) and [#1235](https://github.com/pezkuwichain/subxt/pull/1235)) + +This PR makes it possible to decode the signed extensions in extrinsics. This looks something like: + +```rust +let api = OnlineClient::::new().await?; + +// Get blocks; here we just subscribe to them: +let mut blocks_sub = api.blocks().subscribe_finalized().await?; + +while let Some(block) = blocks_sub.next().await { + let block = block?; + + // Fetch the extrinsics in the block: + let extrinsics = block.extrinsics().await?; + + // Iterate over them: + for extrinsic in extrinsics.iter() { + + // Returns None if the extrinsic isn't signed, so no signed extensions: + let Some(signed_exts) = extrinsic.signed_extensions() else { + continue; + }; + + // We can ask for a couple of common values, None if not found: + println!("Tip: {:?}", signed_exts.tip()); + println!("Nonce: {:?}", signed_exts.tip()); + + // Or we can find and decode into a static signed extension type + // (Err if we hit a decode error first, then None if it's not found): + if let Ok(Some(era)) = signed_exts.find::>() { + println!("Era: {era:?}"); + } + + // Or we can iterate over the signed extensions to work with them: + for signed_ext in signed_exts { + println!("Signed Extension name: {}", signed_ext.name()); + + // We can try to statically decode each one: + if let Ok(Some(era)) = signed_ext.as_signed_extension::>() { + println!("Era: {era:?}"); + } + + // Or we can dynamically decode it into a `scale_value::Value`: + if let Ok(value) = signed_ext.value() { + println!("Decoded extension: {value}"); + } + } + } +} +``` + +See the API docs for more. + +### ChargeAssetTxPayment: Add support for generic AssetId + +Still on the topic of signed extensions, the `ChargeAssetTxPayment` extension was previously not able to be used with a generic AssetId, which prohibited it from being used on the Asset Hub (which uses a `MultiLocation` instead). To address this, we added an `AssetId` type to our `subxt::Config`, which can now be configured. + +One example of doing that [can be found here](https://github.com/pezkuwichain/subxt/blob/master/subxt/examples/setup_config_custom.rs). + +This example uses a generated `MultiLocation` type to be used as the `AssetId`. Currently it requires a rather hideous set of manual clones like so: + +```rust +#[subxt::subxt( + runtime_metadata_path = "../artifacts/pezkuwi_metadata_full.scale", + derive_for_type(path = "xcm::v2::multilocation::MultiLocation", derive = "Clone"), + derive_for_type(path = "xcm::v2::multilocation::Junctions", derive = "Clone"), + derive_for_type(path = "xcm::v2::junction::Junction", derive = "Clone"), + derive_for_type(path = "xcm::v2::NetworkId", derive = "Clone"), + derive_for_type(path = "xcm::v2::BodyId", derive = "Clone"), + derive_for_type(path = "xcm::v2::BodyPart", derive = "Clone"), + derive_for_type( + path = "bounded_collections::weak_bounded_vec::WeakBoundedVec", + derive = "Clone" + ) +)] +``` + +This is something we plan to address in the next version of Subxt. + +### Change SignedExtension matching logic ([#1283](https://github.com/pezkuwichain/subxt/pull/1283)) + +Before this release, each signed extension had a unique name (`SignedExtension::NAME`). We'd use this name to figure out which signed extensions to apply for a given chain inside the `signed_extensions::AnyOf` type. + +However, we recently ran into a new signed extension in Bizinikiwi called `SkipCheckIfFeeless`. This extension would wrap another signed extension, but maintained its own name. It has since been "hidden" from the public Bizinikiwi interface again, but a result of encountering this is that we have generalised the way that we "match" on signed extensions, so that we can be smarter about it going forwards. + +So now, for a given signed extension, we go from: + +```rust +impl SignedExtension for ChargeAssetTxPayment { + const NAME: &'static str = "ChargeAssetTxPayment"; + type Decoded = Self; +} +``` + +To: + +```rust +impl SignedExtension for ChargeAssetTxPayment { + type Decoded = Self; + fn matches(identifier: &str, type_id: u32, types: &PortableRegistry) -> bool { + identifier == "ChargeAssetTxPayment" + } +} +``` + +On the whole, we continue matching by name, as in the example above, but this allows an author to inspect the type of the signed extension (and subtypes of it) too if they want the signed extension to match (and thus be used) only in certain cases. + +### Remove `wait_for_in_block` helper method ([#1237](https://github.com/pezkuwichain/subxt/pull/1237)) + +One can no longer use `tx.wait_for_in_block` to wait for a transaction to enter a block. The reason for this removal is that, especially when we migrate to the new `chainHead` APIs, we will no longer be able to reliably obtain any details about the block that the transaction made it into. + +In other words, the following sort of thing would often fail: + +```rust +tx.wait_for_in_block() + .await? + .wait_for_success() + .await?; +``` + +The reason for this is that the block announced in the transaction status may not have been "pinned" yet in the new APIs. In the old APIs, errors would occasionally be encountered because the block announced may have been pruned by the time we ask for details for it. Overall; having an "unreliable" higher level API felt like a potential foot gun. + +That said, you can still achieve the same via the lower level APIs like so: + +```rust +while let Some(status) = tx.next().await { + match status? { + TxStatus::InBestBlock(tx_in_block) | TxStatus::InFinalizedBlock(tx_in_block) => { + // now, we can attempt to work with the block, eg: + tx_in_block.wait_for_success().await?; + }, + TxStatus::Error { message } | TxStatus::Invalid { message } | TxStatus::Dropped { message } => { + // Handle any errors: + println!("Error submitting tx: {message}"); + }, + // Continue otherwise: + _ => continue, + } +} +``` + +### Subxt-codegen: Tidy crate interface ([#1225](https://github.com/pezkuwichain/subxt/pull/1225)) + +The `subxt-codegen` crate has always been a bit of a mess because it wasn't really supposed to be used outside of the subxt crates, which had led to issues like https://github.com/pezkuwichain/subxt/issues/1211. + +This PR tidies up the interface to that crate so that it's much easier now to programmatically generate the Subxt interface. Now, we have three properly supported ways to do this, depending on your needs: + +1. Using the `#[subxt]` macro. +2. Using the `subxt codegen` CLI command. +3. Programmatically via the `subxt-codegen` crate. + +Each method aims to expose a similar and consistent set of options. + +If you were previously looking to use parts of the type generation logic to, for instance, generate runtime types but not the rest of the Subxt interface, then the https://github.com/pezkuwichain/scale-typegen crate will aim to fill this role eventually. + +That sums up the most significant changes. A summary of all of the relevant changes is as follows: + +### Added + +- CLI: Add command to fetch chainSpec and optimize its size ([#1278](https://github.com/pezkuwichain/subxt/pull/1278)) +- Add legacy RPC usage example ([#1279](https://github.com/pezkuwichain/subxt/pull/1279)) +- impl RpcClientT for `Arc` and `Box` ([#1277](https://github.com/pezkuwichain/subxt/pull/1277)) +- RPC: Implement legacy RPC system_account_next_index ([#1250](https://github.com/pezkuwichain/subxt/pull/1250)) +- Lightclient: Add support for configuring multiple chains ([#1238](https://github.com/pezkuwichain/subxt/pull/1238)) +- Extrinsics: Allow static decoding of signed extensions ([#1235](https://github.com/pezkuwichain/subxt/pull/1235)) +- Extrinsics: Support decoding signed extensions ([#1209](https://github.com/pezkuwichain/subxt/pull/1209)) +- ChargeAssetTxPayment: Add support for generic AssetId (eg `u32` or `MultiLocation`) ([#1227](https://github.com/pezkuwichain/subxt/pull/1227)) +- Add Clone + Debug on Payloads/Addresses, and compare child storage results ([#1203](https://github.com/pezkuwichain/subxt/pull/1203)) + +### Changed + +- Lightclient: Update smoldot to `0.14.0` and smoldot-light to `0.12.0` ([#1307](https://github.com/pezkuwichain/subxt/pull/1307)) +- Cargo: Switch to workspace lints ([#1299](https://github.com/pezkuwichain/subxt/pull/1299)) +- Update bizinikiwi-* and signer-related dependencies ([#1297](https://github.com/pezkuwichain/subxt/pull/1297)) +- Change SignedExtension matching logic and remove SkipCheckIfFeeless bits ([#1283](https://github.com/pezkuwichain/subxt/pull/1283)) +- Update the README with the new location of node-cli ([#1282](https://github.com/pezkuwichain/subxt/pull/1282)) +- Generalize `bizinikiwi-compat` impls to accept any valid hasher/header impl ([#1265](https://github.com/pezkuwichain/subxt/pull/1265)) +- Extrinsics: Remove `wait_for_in_block` helper method ([#1237](https://github.com/pezkuwichain/subxt/pull/1237)) +- Subxt-codegen: Tidy crate interface ([#1225](https://github.com/pezkuwichain/subxt/pull/1225)) +- Lightclient: Update usage docs ([#1223](https://github.com/pezkuwichain/subxt/pull/1223)) +- Wee tidy to subxt-signer flags ([#1200](https://github.com/pezkuwichain/subxt/pull/1200)) +- Batch fetching storage values again to improve performance ([#1199](https://github.com/pezkuwichain/subxt/pull/1199)) +- Add `subxt` feature in `subxt-signer` crate to default features ([#1193](https://github.com/pezkuwichain/subxt/pull/1193)) + +### Fixed + +- Trimmed metadata hash comparison fails in `is_codegen_valid_for` ([#1306](https://github.com/pezkuwichain/subxt/pull/1306)) +- Sync tx submission with chainHead_follow ([#1305](https://github.com/pezkuwichain/subxt/pull/1305)) +- Storage: Fix partial key storage iteration ([#1298](https://github.com/pezkuwichain/subxt/pull/1298)) +- Lightclient: Fix wasm socket closure called after being dropped ([#1289](https://github.com/pezkuwichain/subxt/pull/1289)) +- Fix parachain example ([#1228](https://github.com/pezkuwichain/subxt/pull/1228)) + +## [0.32.1] - 2023-10-05 + +This is a patch release, mainly to deploy the fix [#1191](https://github.com/pezkuwichain/subxt/pull/1191), which resolves an issue around codegen when runtime API definitions have an argument name "_". + +We also expose an API, `api.blocks().at(block_hash).account_nonce(account_id)`, which allows one to obtain the account nonce for some account at any block hash, and not just at the latest finalized block hash as is possible via `api.tx().account_nonce(..)`. + +The main changes are: + +- fix for when runtime API field name is _ ([#1191](https://github.com/pezkuwichain/subxt/pull/1191)) +- allow getting account nonce at arbitrary blocks, too ([#1182](https://github.com/pezkuwichain/subxt/pull/1182)) +- chore: improve some error messages ([#1183](https://github.com/pezkuwichain/subxt/pull/1183)) + +## [0.32.0] - 2023-09-27 + +This is a big release that adds quite a lot, and also introduces some slightly larger breaking changes. Let's look at the main changes: + +### The `Backend` trait and the `UnstableBackend` and `LegacyBackend` impls. + +See [#1126](https://github.com/pezkuwichain/subxt/pull/1126), [#1137](https://github.com/pezkuwichain/subxt/pull/1137) and [#1161](https://github.com/pezkuwichain/subxt/pull/1161) for more information. + +The overarching idea here is that we want Subxt to be able to continue to support talking to nodes/light-clients using the "legacy" RPC APIs that are currently available, but we _also_ want to be able to support using only [the new RPC APIs](https://pezkuwichain.github.io/json-rpc-interface-spec/) once they are stabilized. + +Until now, the higher level APIs in Subxt all had access to the RPCs and could call whatever they needed. Now, we've abstracted away which RPCs are called (or even that RPCs are used at all) behind a `subxt::backend::Backend` trait. Higher level APIs no longer have access to RPC methods and instead have access to the current `Backend` implementation. We then added two `Backend` implementations: + +- `subxt::backend::legacy::LegacyBackend`: This uses the "legacy" RPCs, as we've done to date, to obtain the information we need. This is still the default backend that Subxt will use. +- `subxt::backend::unstable::UnstableBackend`: This backend relies on the new (and currently still unstable) `chainHead` based RPC APIs to obtain the information we need. This could break at any time as the RPC methods update, until they are fully stabilized. One day, this will be the default backend. + +One of the significant differences between backends is that the `UnstableBackend` can only fetch further information about blocks that are "pinned", ie that we have signalled are still in use. To that end, the backend now hands back `BlockRef`s instead of plain block hashes. As long as a `BlockRef` exists for some block, the backend (and node) will attempt to keep it available. Thus, Subxt will keep hold of these internally as needed, and also allows you to obtain them from a `Block` with `block.reference()`, in case you need to try and hold on to any blocks for longer. + +One of the main breaking changes here is in how you can access and call RPC methods. + +Previously, you could access them directly from the Subxt client, since it exposed the RPC methods itself, eg: + +```rust +let genesis_hash = client.rpc().genesis_hash().await?; +``` + +Now, the client only knows about a `Backend` (ie it has a `.backend()` method instead of `.rpc()`), and doesn't know about RPCs, but you can still manually create an `RpcClient` to call RPC methods like so: + +```rust +use subxt::{ + config::BizinikiwConfig, + backend::rpc::RpcClient, + backend::legacy::LegacyRpcMethods, +}; + +// Instantiate an RPC client pointing at some URL. +let rpc_client = RpcClient::from_url("ws://localhost:9944").await?; + +// We could also call unstable RPCs with `backend::unstable::UnstableRpcMethods`: +let rpc_methods = LegacyRpcMethods::::new(rpc_client); + +// Use it to make RPC calls, here calling the legacy genesis_hash method. +let genesis_hash = rpc_methods.genesis_hash().await? +``` + +If you'd like to share a single client for RPCs and Subxt usage, you can clone this RPC client and run `OnlineClient::::from_rpc_client(rpc_client)` to create a Subxt client using it. + +Another side effect of this change is that RPC related things have moved from `subxt::rpc::*` to `subxt::backend::rpc::*` and some renaming has happened along the way. + +A number of smaller breaking changes have also been made in order to expose details that are compatible with both sets of RPCs, and to generally move Subxt towards working well with the new APIs and exposing things in a consistent way: + +- The storage methods `fetch_keys` is renamed to `fetch_raw_keys` (this just for consistency with `fetch_raw`). +- The storage method `iter` no longer accepts a `page_size` argument, and each item returned is now an `Option>` instead of a `Result>` (we now return a valid `Stream` implementation for storage entry iteration). See [this example](https://github.com/pezkuwichain/subxt/blob/cd5060a5a08c9bd73477477cd2cadc16015e77bf/subxt/examples/storage_iterating.rs#L18). +- The events returned when you manually watch a transaction have changed in order to be consistent with the new RPC APIs (the new events [can be seen here](https://github.com/pezkuwichain/subxt/blob/cd5060a5a08c9bd73477477cd2cadc16015e77bf/subxt/src/tx/tx_progress.rs#L203)), and `next_item` => `next`. If you rely on higher level calls like `sign_and_submit_then_watch`, nothing has changed. +- Previously, using `.at_latest()` in various places would mean that calls would run against the latest _best_ block. Now, all such calls will run against the latest _finalized_ block. The latest best block is subject to changing or being pruned entirely, and can differ between nodes. +- `.at(block_hash)` should continue to work as-is, but can now also accept a `BlockRef`, to keep the relevant block around while you're using the associated APIs. +- To fetch the extrinsics in a block, you used to call `block.body().await?.extrinsics()`. This has now been simplified to `block.extrinsics().await?`. + +### Making `ExtrinsicParams` more flexible with `SignedExtension`s. + +See [#1107](https://github.com/pezkuwichain/subxt/pull/1107) for more information. + +When configuring Subxt to work against a given chain, you needed to configure the `ExtrinsicParams` associated type to encode exactly what was required by the chain when submitting transactions. This could be difficult to get right. + +Now, we have "upgraded" the `ExtrinsicParams` trait to give it access to metadata, so that it can be smarter about how to encode the correct values. We've also added a `subxt::config::SignedExtension` trait, and provided implementations of it for all of the "standard" signed extensions (though [we have a little work to do still](https://github.com/pezkuwichain/subxt/issues/1162)). + +How can you use `SignedExtension`s? Well, `subxt::config::signed_extensions::AnyOf` is a type which can accept any tuple of `SignedExtension`s, and itself implements `ExtrinsicParams`. It's smart, and will use the metadata to know which of the signed extensions that you provided to actually use on a given chain. So, `AnyOf` makes it easy to compose whichever `SignedExtension`s you need to work with a chain. + +Finally, we expose `subxt::config::{ DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder }`; the former just uses `AnyOf` to automatically use any of the "standard" signed extensions as needed, and the latter provided a nice builder interface to configure any parameters for them. This is now the default type used in `BizinikiwConfig` and `PezkuwiConfig`, so long story short: those configurations (and particularly their `ExtrinsicParams`) are more likely to _Just Work_ now across default chains. + +[See this example](https://github.com/pezkuwichain/subxt/blob/cd5060a5a08c9bd73477477cd2cadc16015e77bf/subxt/examples/setup_config_signed_extension.rs) for how to create and use custom signed extensions, or [this example](https://github.com/pezkuwichain/subxt/blob/cd5060a5a08c9bd73477477cd2cadc16015e77bf/subxt/examples/setup_config_custom.rs) for how to implement custom `ExtrinsicParams` if you'd prefer to ignore `SignedExtension`s entirely. + +As a result of using the new `DefaultExtrinsicParams` in `BizinikiwConfig` and `PezkuwiConfig`, the interface to configure transactions has changed (and in fact been generally simplified). Configuring a mortal transaction with a small tip ƒor instance used to look like: + +```rust +use subxt::config::pezkuwi::{Era, PlainTip, PezkuwiExtrinsicParamsBuilder as Params}; + +let tx_params = Params::new() + .tip(PlainTip::new(1_000)) + .era(Era::mortal(32, latest_block.header().number()), latest_block.header().hash()); + +let hash = api.tx().sign_and_submit(&tx, &from, tx_params).await?; +``` + +And now it will look like this: + +```rust +use subxt::config::pezkuwi::PezkuwiExtrinsicParamsBuilder as Params; + +let tx_params = Params::new() + .tip(1_000) + .mortal(latest_block.header(), 32) + .build(); + +let hash = api.tx().sign_and_submit(&tx, &from, tx_params).await?; +``` + +Check the docs for `PezkuwiExtrinsicParamsBuilder` and the `ExtrinsicParams` trait for more information. + +### Storage: Allow iterating storage entries at different depths + +See ([#1079](https://github.com/pezkuwichain/subxt/pull/1079)) for more information. + +Previously, we could statically iterate over the root of some storage map using something like: + +```rust +// Build a storage query to iterate over account information. +let storage_query = pezkuwi::storage().system().account_root(); + +// Get back an iterator of results (here, we are fetching 10 items at +// a time from the node, but we always iterate over one at a time). +let mut results = api.storage().at_latest().await?.iter(storage_query, 10).await?; +``` + +Now, the suffix `_root` has been renamed to `_iter`, and if the storage entry is for instance a double map (or greater depth), we'll also now generate `_iter2`, `iter3` and so on, each accepting the keys needed to access the map at that depth to iterate the remainder. The above example now becomes: + +```rust +// Build a storage query to iterate over account information. +let storage_query = pezkuwi::storage().system().account_iter(); + +// Get back an iterator of results +let mut results = api.storage().at_latest().await?.iter(storage_query).await?; +``` + +Note also that the pagination size no longer needs to be provided; that's handled internally by the relevant `Backend`. + +### Custom values + +This is not a breaking change, but just a noteworthy addition; see [#1106](https://github.com/pezkuwichain/subxt/pull/1106), [#1117](https://github.com/pezkuwichain/subxt/pull/1117) and [#1147](https://github.com/pezkuwichain/subxt/pull/1147) for more information. + +V15 metadata allows chains to insert arbitrary information into a new "custom values" hashmap ([see this](https://github.com/pezkuwichain/frame-metadata/blob/0e90489c8588d48b55779f1c6b93216346ecc8a9/frame-metadata/src/v15.rs#L306)). Subxt has now added APIs to allow accessing these custom values a little like how constants can be accessed. + +Dynamically accessing custom values looks a bit like this: + +```rust +// Obtain the raw bytes for some entry: +let custom_value_bytes: Vec = client.custom_values().bytes_at("custom-value-name")?; + +// Obtain a representation of the value that we can attempt to decode: +let custom_value = client.custom_values().at("custom-value-name")?; + +// Decode it into a runtime Value if possible: +let value: Value = custom_value.to_value()?; +// Or attempt to decode it into a specific type: +let value: Foo = custom_value.as_type()?; +``` + +We can also use codegen to statically access values, which makes use of validation and returns a known type whenever possible, for the added compile time safety this brings: + +```rust +#[subxt::subxt(runtime_metadata_path = "metadata.scale")] +pub mod runtime {} + +// The generated interface also exposes any custom values with known types and sensible names: +let value_addr = runtime::custom().custom_value_name(); + +// We can use this address to access and decode the relevant value from metadata: +let static_value = client.custom_values().at(&value_addr)?; +// Or just ask for the bytes for it: +let static_value_bytes = client.custom_values().bytes_at(&value_addr)?; +``` + +That sums up the most significant changes. All of the key commits in this release can be found here: + +### Added + +- `UnstableBackend`: Add a chainHead based backend implementation ([#1161](https://github.com/pezkuwichain/subxt/pull/1161)) +- `UnstableBackend`: Expose the chainHead RPCs ([#1137](https://github.com/pezkuwichain/subxt/pull/1137)) +- Introduce Backend trait to allow different RPC (or other) backends to be implemented ([#1126](https://github.com/pezkuwichain/subxt/pull/1126)) +- Custom Values: Fixes and tests for "custom values" ([#1147](https://github.com/pezkuwichain/subxt/pull/1147)) +- Custom Values: Add generated APIs to statically access custom values in metadata ([#1117](https://github.com/pezkuwichain/subxt/pull/1117)) +- Custom Values: Support dynamically accessing custom values in metadata ([#1106](https://github.com/pezkuwichain/subxt/pull/1106)) +- Add `storage_version()` and `runtime_wasm_code()` to storage ([#1111](https://github.com/pezkuwichain/subxt/pull/1111)) +- Make ExtrinsicParams more flexible, and introduce signed extensions ([#1107](https://github.com/pezkuwichain/subxt/pull/1107)) + +### Changed + +- `subxt-codegen`: Add "web" feature for WASM compilation that works with `jsonrpsee` ([#1175](https://github.com/pezkuwichain/subxt/pull/1175)) +- `subxt-codegen`: support compiling to WASM ([#1154](https://github.com/pezkuwichain/subxt/pull/1154)) +- CI: Use composite action to avoid dupe use-bizinikiwi code ([#1177](https://github.com/pezkuwichain/subxt/pull/1177)) +- Add custom `Debug` impl for `DispatchError` to avoid huge metadata output ([#1153](https://github.com/pezkuwichain/subxt/pull/1153)) +- Remove unused start_key that new RPC API may not be able to support ([#1148](https://github.com/pezkuwichain/subxt/pull/1148)) +- refactor(rpc): Use the default port if one isn't provided ([#1122](https://github.com/pezkuwichain/subxt/pull/1122)) +- Storage: Support iterating over NMaps with partial keys ([#1079](https://github.com/pezkuwichain/subxt/pull/1079)) + +### Fixed + +- metadata: Generate runtime outer enums if not present in V14 ([#1174](https://github.com/pezkuwichain/subxt/pull/1174)) +- Remove "std" feature from `sp-arithmetic` to help bizinikiwi compat. ([#1155](https://github.com/pezkuwichain/subxt/pull/1155)) +- integration-tests: Increase the number of events we'll wait for ([#1152](https://github.com/pezkuwichain/subxt/pull/1152)) +- allow 'latest' metadata to be returned from the fallback code ([#1127](https://github.com/pezkuwichain/subxt/pull/1127)) +- chainHead: Propagate results on the `chainHead_follow` ([#1116](https://github.com/pezkuwichain/subxt/pull/1116)) + + +## [0.31.0] - 2023-08-02 + +This is a small release whose primary goal is to bump the versions of `scale-encode`, `scale-decode` and `scale-value` being used, to benefit from recent changes in those crates. + +`scale-decode` changes how compact values are decoded as part of [#1103](https://github.com/pezkuwichain/subxt/pull/1103). A compact encoded struct should now be properly decoded into a struct of matching shape (which implements `DecodeAsType`). This will hopefully resolve issues around structs like `Perbill`. When decoding the SCALE bytes for such types into `scale_value::Value`, the `Value` will now be a composite type wrapping a value, and not just the value. + +We've also figured out how to sign extrinsics using browser wallets when a Subxt app is compiled to WASM; see [#1067](https://github.com/pezkuwichain/subxt/pull/1067) for more on that! + +The key commits: + +### Added + +- Add browser extension signing example ([#1067](https://github.com/pezkuwichain/subxt/pull/1067)) + +### Changed + +- Bump to latest scale-encode/decode/value and fix test running ([#1103](https://github.com/pezkuwichain/subxt/pull/1103)) +- Set minimum supported `rust-version` to `1.70` ([#1097](https://github.com/pezkuwichain/subxt/pull/1097)) + +### Fixed + +- Tests: support 'bizinikiwi-node' too and allow multiple binary paths ([#1102](https://github.com/pezkuwichain/subxt/pull/1102)) + + +## [0.30.1] - 2023-07-25 + +This patch release fixes a small issue whereby using `runtime_metadata_url` in the Subxt macro would still attempt to download unstable metadata, which can fail at the moment if the chain has not updated to stable V15 metadata yet (which has a couple of changes from the last unstable version). Note that you're generally encouraged to use `runtime_metadata_path` instead, which does not have this issue. + +### Fixes + +- codegen: Fetch and decode metadata version then fallback ([#1092](https://github.com/pezkuwichain/subxt/pull/1092)) + + +## [0.30.0] - 2023-07-24 + +This release beings with it a number of exciting additions. Let's cover a few of the most significant ones: + +### Light client support (unstable) + +This release adds support for light clients using Smoldot, both when compiling native binaries and when compiling to WASM to run in a browser environment. This is unstable for now while we continue testing it and work on making use of the new RPC APIs. + +Here's how to use it: + +```rust +use subxt::{ + client::{LightClient, LightClientBuilder}, + PezkuwiConfig +}; +use subxt_signer::sr25519::dev; + +// Create a light client: +let api = LightClient::::builder() + // You can also pass a chain spec directly using `build`, which is preferred: + .build_from_url("ws://127.0.0.1:9944") + .await?; + +// Working with the interface is then the same as before: +let dest = dev::bob().public_key().into(); +let balance_transfer_tx = pezkuwi::tx().balances().transfer(dest, 10_000); +let events = api + .tx() + .sign_and_submit_then_watch_default(&balance_transfer_tx, &dev::alice()) + .await? + .wait_for_finalized_success() + .await?; +``` + +At the moment you may encounter certain things that don't work; please file an issue if you do! + +### V15 Metadata + +This release stabilizes the metadata V15 interface, which brings a few changes but primarily allows you to interact with Runtime APIs via an ergonomic Subxt interface: + +```rust +// We can use the static interface to interact in a type safe way: +#[subxt::subxt(runtime_metadata_path = "path/to/metadata.scale")] +pub mod pezkuwi {} + +let runtime_call = pezkuwi::apis() + .metadata() + .metadata_versions(); + +// Or we can use the dynamic interface like so: +use subxt::dynamic::Value; + +let runtime_call = subxt::dynamic::runtime_api_call( + "Metadata", + "metadata_versions", + Vec::>::new() +); +``` + +This is no longer behind a feature flag, but if the chain you're connecting to doesn't use V15 metadata yet then the above will be unavailable. + +### `subxt-signer` + +The new `subxt-signer` crate provides the ability to sign transactions using either sr25519 or ECDSA. It's WASM compatible, and brings in fewer dependencies than using `sp_core`/`sp_keyring` does, while having an easy to use interface. Here's an example of signing a transaction using it: + +```rust +use subxt::{OnlineClient, PezkuwiConfig}; +use subxt_signer::sr25519::dev; + +let api = OnlineClient::::new().await?; + +// Build the extrinsic; a transfer to bob: +let dest = dev::bob().public_key().into(); +let balance_transfer_tx = pezkuwi::tx().balances().transfer(dest, 10_000); + +// Sign and submit the balance transfer extrinsic from Alice: +let from = dev::alice(); +let events = api + .tx() + .sign_and_submit_then_watch_default(&balance_transfer_tx, &from) + .await? + .wait_for_finalized_success() + .await?; +``` + +Dev keys should only be used for tests since they are publicly known. Actual keys can be generated from URIs, phrases or raw entropy, and derived using soft/hard junctions: + +```rust +use subxt_signer::{ SecretUri, sr25519::Keypair }; +use std::str::FromStr; + +// From a phrase (see `bip39` crate on generating phrases): +let phrase = bip39::Mnemonic::parse(phrase).unwrap(); +let keypair = Keypair::from_phrase(&phrase, Some("Password")).unwrap(); + +// Or from a URI: +let uri = SecretUri::from_str("//Alice").unwrap(); +let keypair = Keypair::from_uri(&uri).unwrap(); + +// Deriving a new key from an existing one: +let keypair = keypair.derive([ + DeriveJunction::hard("Alice"), + DeriveJunction::soft("stash") +]); +``` + +### Breaking changes + +A few small breaking changes have occurred: + +- There is no longer a need for an `Index` associated type in your `Config` implementations; we now work it out dynamically where needed. +- The "bizinikiwi-compat" feature flag is no longer enabled by default. `subxt-signer` added native signing support and can be used instead of bringing in Bizinikiwi dependencies to sign transactions now. You can still enable this feature flag as before to make use of them if needed. + - **Note:** Be aware that Bizinikiwi crates haven't been published in a while and have fallen out of date, though. This will be addressed eventually, and when it is we can bring the Bizinikiwi crates back uptodate here. + +For anything else that crops up, the compile errors and API docs will hopefully point you in the right direction, but please raise an issue if not. + +For a full list of changes, see below: + +### Added + +- Example: How to connect to parachain ([#1043](https://github.com/pezkuwichain/subxt/pull/1043)) +- ECDSA Support in signer ([#1064](https://github.com/pezkuwichain/subxt/pull/1064)) +- Add `subxt_signer` crate for native & WASM compatible signing ([#1016](https://github.com/pezkuwichain/subxt/pull/1016)) +- Add light client platform WASM compatible ([#1026](https://github.com/pezkuwichain/subxt/pull/1026)) +- light-client: Add experimental light-client support ([#965](https://github.com/pezkuwichain/subxt/pull/965)) +- Add `diff` command to CLI tool to visualize metadata changes ([#1015](https://github.com/pezkuwichain/subxt/pull/1015)) +- CLI: Allow output to be written to file ([#1018](https://github.com/pezkuwichain/subxt/pull/1018)) + +### Changed + +- Remove `bizinikiwi-compat` default feature flag ([#1078](https://github.com/pezkuwichain/subxt/pull/1078)) +- runtime API: Substitute `UncheckedExtrinsic` with custom encoding ([#1076](https://github.com/pezkuwichain/subxt/pull/1076)) +- Remove `Index` type from Config trait ([#1074](https://github.com/pezkuwichain/subxt/pull/1074)) +- Utilize Metadata V15 ([#1041](https://github.com/pezkuwichain/subxt/pull/1041)) +- chain_getBlock extrinsics encoding ([#1024](https://github.com/pezkuwichain/subxt/pull/1024)) +- Make tx payload details public ([#1014](https://github.com/pezkuwichain/subxt/pull/1014)) +- CLI tool tests ([#977](https://github.com/pezkuwichain/subxt/pull/977)) +- Support NonZero numbers ([#1012](https://github.com/pezkuwichain/subxt/pull/1012)) +- Get account nonce via state_call ([#1002](https://github.com/pezkuwichain/subxt/pull/1002)) +- add `#[allow(rustdoc::broken_intra_doc_links)]` to subxt-codegen ([#998](https://github.com/pezkuwichain/subxt/pull/998)) + +### Fixed + +- remove parens in hex output for CLI tool ([#1017](https://github.com/pezkuwichain/subxt/pull/1017)) +- Prevent bugs when reusing type ids in hashing ([#1075](https://github.com/pezkuwichain/subxt/pull/1075)) +- Fix invalid generation of types with >1 generic parameters ([#1023](https://github.com/pezkuwichain/subxt/pull/1023)) +- Fix jsonrpsee web features ([#1025](https://github.com/pezkuwichain/subxt/pull/1025)) +- Fix codegen validation when Runtime APIs are stripped ([#1000](https://github.com/pezkuwichain/subxt/pull/1000)) +- Fix hyperlink ([#994](https://github.com/pezkuwichain/subxt/pull/994)) +- Remove invalid redundant clone warning ([#996](https://github.com/pezkuwichain/subxt/pull/996)) + +## [0.29.0] - 2023-06-01 + +This is another big release for Subxt with a bunch of awesome changes. Let's talk about some of the notable ones: + +### A new guide + +This release will come with overhauled documentation and examples which is much more comprehensive than before, and goes into much more detail on each of the main areas that Subxt can work in. + +Check out [the documentation](https://docs.rs/subxt/latest/subxt/) for more. We'll continue to build on this with some larger examples, too, going forwards. ([#968](https://github.com/pezkuwichain/subxt/pull/968)) is particularly cool as it's our first example showcasing Subxt working with Yew and WASM; it'll be extended with more documentation and things in the next release. + +### A more powerful CLI tool: an `explore` command. + +The CLI tool has grown a new command, `explore`. Point it at a node and use `explore` to get information about the calls, constants and storage of a node, with a helpful interface that allows you to progressively dig into each of these areas! + +### Support for (unstable) V15 metadata and generating a Runtime API interface + +One of the biggest changes in this version is that, given (unstable) V15 metadata, Subxt can now generate a nice interface to make working with Runtime APIs as easy as building extrinsics or storage queries. This is currently unstable until the V15 metadata format is stabilised, and so will break as we introduce more tweaks to the metadata format. We hope to stabilise V15 metadata soon; [see this](https://forum.pezkuwi.network/t/stablising-v15-metadata/2819) for more information. At this point, we'll stabilize support in Subxt. + +### Support for decoding extrinsics + +Up until now, you were able to retrieve the bytes for extrinsics, but weren't able to use Subxt to do much with those bytes. + +Now, we expose several methods to decode extrinsics that work much like decoding events: + +```rust +#[subxt::subxt(runtime_metadata_path = "pezkuwi_metadata.scale")] +pub mod pezkuwi {} + +// Get some block: +let block = api.blocks().at_latest().await?; + +// Find and decode a specific extrinsic in the block: +let remark = block.find::()?; + +// Iterate the extrinsics in the block: +for ext in block.iter() { + // Decode a specific extrinsic into the call data: + let remark = ext.as_extrinsic::()?; + // Decode any extrinsic into an enum containing the call data: + let extrinsic = ext.as_root_extrinsic::()?; +} +``` + +### New Metadata Type ([#974](https://github.com/pezkuwichain/subxt/pull/974)) + +Previously, the `subxt_metadata` crate was simply a collection of functions that worked directly on `frame_metadata` types. Then, in `subxt`, we had a custom metadata type which wrapped this to provide the interface needed by various Subxt internals and traits. + +Now, the `subxt_metadata` crate exposes our own `Metadata` type which can be decoded from the same wire format as the `frame_metadata` types we used to use. This type is now used throughout Subxt, as well as in the `codegen` stuff, and provides a single unified interface for working with metadata that is independent of the actual underlying metadata version we're using. + +This shouldn't lead to breakages in most code, but if you need to load metadata for an `OfflineClient` you might previously have done this: + +```rust +use subxt::ext::frame_metadata::RuntimeMetadataPrefixed; +use subxt::metadata::Metadata; + +let metadata = RuntimeMetadataPrefixed::decode(&mut &*bytes).unwrap(); +let metadata = Metadata::try_from(metadata).unwrap(); +``` + +But now you'd do this: + +```rust +use subxt::metadata::Metadata; + +let metadata = Metadata::decode(&mut &*bytes).unwrap(); +``` + +Otherwise, if you implement traits like `TxPayload` directly, you'll need to tweak the implementations to use the new `Metadata` type, which exposes everything you used to be able to get hold of but behind a slightly different interface. + +### Removing `as_pallet_event` method ([#953](https://github.com/pezkuwichain/subxt/pull/953)) + +In an effort to simplify the number of ways we have to decode events, `as_pallet_event` was removed. You can achieve a similar thing by calling `as_root_event`, which will decode _any_ event that the static interface knows about into an outer enum of pallet names to event names. if you only care about a specific event, you can match on this enum to look for events from a specific pallet. + +Another reason that `as_pallet_event` was removed was that it could potentially decode events from the wrong pallets into what you're looking for, if the event shapes happened to line up, which was a potential foot gun. + +### Added `as_root_error` for decoding errors. + +Much like we can call `as_root_extrinsic` or `as_root_event` to decode extrinsics and events into a top level enum, we've also added `as_root_error` to do the same for errors and help to make this interface consistent across the board. + +Beyond these, there's a bunch more that's been added, fixed and changes. A full list of the notable changes in this release are as follows: + +### Added + +- Add topics to `EventDetails` ([#989](https://github.com/pezkuwichain/subxt/pull/989)) +- Yew Subxt WASM examples ([#968](https://github.com/pezkuwichain/subxt/pull/968)) +- CLI subxt explore commands ([#950](https://github.com/pezkuwichain/subxt/pull/950)) +- Retain specific runtime APIs ([#961](https://github.com/pezkuwichain/subxt/pull/961)) +- Subxt Guide ([#890](https://github.com/pezkuwichain/subxt/pull/890)) +- Partial fee estimates for SubmittableExtrinsic ([#910](https://github.com/pezkuwichain/subxt/pull/910)) +- Add ability to opt out from default derives and attributes ([#925](https://github.com/pezkuwichain/subxt/pull/925)) +- add no_default_substitutions to the macro and cli ([#936](https://github.com/pezkuwichain/subxt/pull/936)) +- extrinsics: Decode extrinsics from blocks ([#929](https://github.com/pezkuwichain/subxt/pull/929)) +- Metadata V15: Generate Runtime APIs ([#918](https://github.com/pezkuwichain/subxt/pull/918)) and ([#947](https://github.com/pezkuwichain/subxt/pull/947)) +- impl Header and Hasher for some bizinikiwi types behind the "bizinikiwi-compat" feature flag ([#934](https://github.com/pezkuwichain/subxt/pull/934)) +- add `as_root_error` for helping to decode ModuleErrors ([#930](https://github.com/pezkuwichain/subxt/pull/930)) + +### Changed + +- Update scale-encode, scale-decode and scale-value to latest ([#991](https://github.com/pezkuwichain/subxt/pull/991)) +- restrict sign_with_address_and_signature interface ([#988](https://github.com/pezkuwichain/subxt/pull/988)) +- Introduce Metadata type ([#974](https://github.com/pezkuwichain/subxt/pull/974)) and ([#978](https://github.com/pezkuwichain/subxt/pull/978)) +- Have a pass over metadata validation ([#959](https://github.com/pezkuwichain/subxt/pull/959)) +- remove as_pallet_extrinsic and as_pallet_event ([#953](https://github.com/pezkuwichain/subxt/pull/953)) +- speed up ui tests. ([#944](https://github.com/pezkuwichain/subxt/pull/944)) +- cli: Use WS by default instead of HTTP ([#954](https://github.com/pezkuwichain/subxt/pull/954)) +- Upgrade to `syn 2.0` ([#875](https://github.com/pezkuwichain/subxt/pull/875)) +- Move all deps to workspace toml ([#932](https://github.com/pezkuwichain/subxt/pull/932)) +- Speed up CI ([#928](https://github.com/pezkuwichain/subxt/pull/928)) and ([#926](https://github.com/pezkuwichain/subxt/pull/926)) +- metadata: Use v15 internally ([#912](https://github.com/pezkuwichain/subxt/pull/912)) +- Factor bizinikiwi node runner into separate crate ([#913](https://github.com/pezkuwichain/subxt/pull/913)) +- Remove need to import parity-scale-codec to use subxt macro ([#907](https://github.com/pezkuwichain/subxt/pull/907)) + +### Fixed + +- use blake2 for extrinsic hashing ([#921](https://github.com/pezkuwichain/subxt/pull/921)) +- Ensure unique types in codegen ([#967](https://github.com/pezkuwichain/subxt/pull/967)) +- use unit type in pezkuwi config ([#943](https://github.com/pezkuwichain/subxt/pull/943)) + + +## [0.28.0] - 2023-04-11 + +This is a fairly significant change; what follows is a description of the main changes to be aware of: + +### Unify how we encode and decode static and dynamic types ([#842](https://github.com/pezkuwichain/subxt/pull/842)) + +Prior to this, static types generated by codegen (ie subxt macro) would implement `Encode` and `Decode` from the `parity-scale-codec` library. This meant that they woule be encoded-to and decoded-from based on their shape. Dynamic types (eg the `subxt::dynamic::Value` type) would be encoded and decoded based on the node metadata instead. + +This change makes use of the new `scale-encode` and `scale-decode` crates to auto-implement `EncodeAsType` and `DecodeAsType` on all of our static types. These traits allow types to take the node metadata into account when working out how best to encode and decode into them. By using metadata, we can be much more flexible/robust about how to encode/decode various types (as an example, nested transactions will now be portable across runtimes). Additionally, we can merge our codepaths for static and dynamic encoding/decoding, since both static and dynamic types can implement these traits. Read [the PR description](https://github.com/pezkuwichain/subxt/pull/842) for more info. + +A notable impact of this is that any types you wish to substitute when performing codegen (via the CLI tool or `#[subxt]` macro) must also implement `EncodeAsType` and `DecodeAsType` too. Bizinikiwi types, for instance, generally do not. To work around this, [#886](https://github.com/pezkuwichain/subxt/pull/886) introduces a `Static` type and enhances the type substitution logic so that you're able to wrap any types which only implement `Encode` and `Decode` to work (note that you lose out on the improvements from `EncodeAsType` and `DecodeAsType` when you do this): + +```rust +#[subxt::subxt( + runtime_metadata_path = "/path/to/metadata.scale", + substitute_type( + type = "sp_runtime::multiaddress::MultiAddress", + with = "::subxt::utils::Static<::sp_runtime::multiaddress::MultiAddress>" + ) +)] +pub mod node_runtime {} +``` + +So, if you want to substitute in Bizinikiwi types, wrap them in `::subxt::utils::Static` in the type substitution, as above. [#886](https://github.com/pezkuwichain/subxt/pull/886) also generally improves type substitution so that you can substitute the generic params in nested types, since it's required in the above. + +Several types have been renamed as a result of this unification (though they aren't commonly made explicit use of). Additionally, to obtain the bytes from a storage address, instead of doing: + +```rust +let addr_bytes = storage_address.to_bytes() +``` + +You must now do: + +```rust +let addr_bytes = cxt.client().storage().address_bytes(&storage_address).unwrap(); +``` + +This is because the address on it's own no longer requires as much static information, and relies more heavily now on the node metadata to encode it to bytes. + +### Expose Signer payload ([#861](https://github.com/pezkuwichain/subxt/pull/861)) + +This is not a breaking change, but notable in that is adds `create_partial_signed_with_nonce` and `create_partial_signed` to the `TxClient` to allow you to break extrinsic creation into two steps: + +1. building a payload, and then +2. when a signature is provided, getting back an extrinsic ready to be submitted. + +This allows a signer payload to be obtained from Subxt, handed off to some external application, and then once a signature has been obtained, that can be passed back to Subxt to complete the creation of an extrinsic. This opens the door to using browser wallet extensions, for instance, to sign Subxt payloads. + +### Stripping unneeded pallets from metadata ([#879](https://github.com/pezkuwichain/subxt/pull/879)) + +This is not a breaking change, but adds the ability to use the Subxt CLI tool to strip out all but some named list of pallets from a metadata bundle. Aside from allowing you to store a significantly smaller metadata bundle with only the APIs you need in it, it will also lead to faster codegen, since there's much less of it to do. + +Use a command like `subxt metadata --pallets Balances,System` to select specific pallets. You can provide an existing metadata file to take that and strip it, outputting a smaller bundle. Alternately it will grab the metadata from a local node and strip that before outputting. + +### Dispatch error changes ([#878](https://github.com/pezkuwichain/subxt/pull/878)) + +The `DispatchError` returned from either attempting to submit an extrinsic, or from calling `.dry_run()` has changed. It's now far more complete with respect to the information it returns in each case, and the interface has been tidied up. Changes include: + +- For `ModuleError`'s, instead of `err.pallet` and `err.error`, you can obtain error details using `let details = err.details()?` and then `details.pallet()` and `details.error()`. +- `DryRunResult` is now a custom enum with 3 states, `Success`, `DispatchError` or `TransactionValidityError`. The middle of these contains much more information than previously. +- Errors in general have been marked `#[non_exahustive]` since they could grow and change at any time. (Owing to our use of `scale-decode` internally, we are not so contrained when it comes to having precise variant indexes or anything now, and can potentially deprecate rather than remove old variants as needed). +- On a lower level, the `rpc.dry_run()` RPC call now returns the raw dry run bytes which can then be decoded with the help of metadata into our `DryRunResult`. + +### Extrinsic submission changes ([#897](https://github.com/pezkuwichain/subxt/pull/897)) + +It was found by [@furoxr](https://github.com/furoxr) that Bizinikiwi nodes will stop sending transaction progress events under more circumstances than we originally expected. Thus, now calls like `wait_for_finalized()` and `wait_for_in_block()` will stop waiting for events when any of the following is sent from the node: + +- `Usurped` +- `Finalized` +- `FinalityTimeout` +- `Invalid` +- `Dropped` + +Previously we'd only close the subscription and stop waiting when we saw a `Finalized` or `FinalityTimeout` event. Thanks for digging into this [@furoxr](https://github.com/furoxr)! + +### Add `at_latest()` method ([#900](https://github.com/pezkuwichain/subxt/pull/900) and [#904](https://github.com/pezkuwichain/subxt/pull/904)) + +A small breaking change; previously we had `.at(None)` or `.at(Some(block_hash))` methods in a few places to obtain things at either the latest block or some specific block hash. + +This API has been clarified; we now have `.at_latest()` to obtain the thing at the latest block, or `.at(block_hash)` (note; no more option) to obtain the thing at some fixed block hash. In a few instances this has allowed us to ditch the `async` from the `.at()` call. + +That covers the larger changes in this release. For more details, have a look at all of the notable PRs since the last release here: + +### Added + +- added at_latest ([#900](https://github.com/pezkuwichain/subxt/pull/900) and [#904](https://github.com/pezkuwichain/subxt/pull/904)) +- Metadata: Retain a subset of metadata pallets ([#879](https://github.com/pezkuwichain/subxt/pull/879)) +- Expose signer payload to allow external signing ([#861](https://github.com/pezkuwichain/subxt/pull/861)) +- Add ink! as a user of `subxt` ([#837](https://github.com/pezkuwichain/subxt/pull/837)) +- codegen: Add codegen error ([#841](https://github.com/pezkuwichain/subxt/pull/841)) +- codegen: allow documentation to be opted out of ([#843](https://github.com/pezkuwichain/subxt/pull/843)) +- re-export `sp_core` and `sp_runtime` ([#853](https://github.com/pezkuwichain/subxt/pull/853)) +- Allow generating only runtime types in subxt macro ([#845](https://github.com/pezkuwichain/subxt/pull/845)) +- Add 'Static' type and improve type substitution codegen to accept it ([#886](https://github.com/pezkuwichain/subxt/pull/886)) + +### Changed + +- Improve Dispatch Errors ([#878](https://github.com/pezkuwichain/subxt/pull/878)) +- Use scale-encode and scale-decode to encode and decode based on metadata ([#842](https://github.com/pezkuwichain/subxt/pull/842)) +- For smoldot: support deserializing block number in header from hex or number ([#863](https://github.com/pezkuwichain/subxt/pull/863)) +- Bump Bizinikiwi dependencies to latest ([#905](https://github.com/pezkuwichain/subxt/pull/905)) + +### Fixed + +- wait_for_finalized behavior if the tx dropped, usurped or invalid ([#897](https://github.com/pezkuwichain/subxt/pull/897)) + + +## [0.27.1] - 2023-02-15 + +### Added + +- Add `find_last` for block types ([#825](https://github.com/pezkuwichain/subxt/pull/825)) + +## [0.27.0] - 2023-02-13 + +This is a fairly small release, primarily to bump bizinikiwi dependencies to their latest versions. + +The main breaking change is fairly small: [#804](https://github.com/pezkuwichain/subxt/pull/804). Here, the `BlockNumber` associated type has been removed from `Config` entirely, since it wasn't actually needed anywhere in Subxt. Additionally, the constraints on each of those associated types in `Config` were made more precise, primarily to tidy things up (but this should result in types more easily being able to meet the requirements here). If you use custom `Config`, the fix is simply to remove the `BlockNumber` type. If you also use the `Config` trait in your own functions and depend on those constraints, you may be able to define a custom `MyConfig` type which builds off `Config` and adds back any additional bounds that you want. + +Note worthy PRs merged since the last release: + +### Added + +- Add find last function ([#821](https://github.com/pezkuwichain/subxt/pull/821)) +- Doc: first item is current version comment ([#817](https://github.com/pezkuwichain/subxt/pull/817)) + +### Changed + +- Remove unneeded Config bounds and BlockNumber associated type ([#804](https://github.com/pezkuwichain/subxt/pull/804)) + + +## [0.26.0] - 2023-01-24 + +This release adds a number of improvements, most notably: + +- We make Bizinikiwi dependencies optional ([#760](https://github.com/pezkuwichain/subxt/pull/760)), which makes WASM builds both smaller and more reliable. To do this, we re-implement some core types like `AccountId32`, `MultiAddress` and `MultiSignature` internally. +- Allow access to storage entries ([#774](https://github.com/pezkuwichain/subxt/pull/774)) and runtime API's ([#777](https://github.com/pezkuwichain/subxt/pull/777)) from some block. This is part of a move towards a more "block centric" interface, which will better align with the newly available `chainHead` style RPC interface. +- Add RPC methods for the new `chainHead` style interface (see https://pezkuwichain.github.io/json-rpc-interface-spec/). These are currently unstable, but will allow users to start experimenting with this new API if their nodes support it. +- More advanced type substitution is now possible in the codegen interface ([#735](https://github.com/pezkuwichain/subxt/pull/735)). + +This release introduces a number of breaking changes that can be generally be fixed with mechanical tweaks to your code. The notable changes are described below. + +### Make Storage API more Block-centric + +See [#774](https://github.com/pezkuwichain/subxt/pull/774). This PR makes the Storage API more consistent with the Events API, and allows access to it from a given block as part of a push to provide a more block centric API that will hopefully be easier to understand, and will align with the new RPC `chainHead` style RPC interface. + +Before, your code will look like: + +```rust +let a = api.storage().fetch(&staking_bonded, None).await?; +``` + +After, it should look like: + +```rust +let a = api.storage().at(None).await?.fetch(&staking_bonded).await?; +``` + +Essentially, the final parameter for choosing which block to call some method at has been moved out of the storage method itself and is now provided to instantiate the storage API, either explicitly via an `.at(optional_block_hash)` as above, or implicitly when calling `block.storage()` to access the same storage methods for some block. + +An alternate way to access the same storage (primarily used if you have subscribed to blocks or otherwise are working with some block) now is: + +```rust +let block = api.blocks().at(None).await? +let a = block.storage().fetch(&staking_bonded, None).await?; +``` + +### More advanced type substitution in codegen + +See [#735](https://github.com/pezkuwichain/subxt/pull/735). Previously, you could perform basic type substitution like this: + +```rust +#[subxt::subxt(runtime_metadata_path = "../pezkuwi_metadata.scale")] +pub mod node_runtime { + #[subxt::subxt(substitute_type = "sp_arithmetic::per_things::Foo")] + use crate::Foo; +} +``` + +This example would use `crate::Foo` every time an `sp_arithmetic::per_things::Foo` was encountered in the codegen. However, this was limited; the substitute type had to have the name number and order of generic parameters for this to work. + +We've changed the interface above into: + +```rust +#[subxt::subxt( + runtime_metadata_path = "../pezkuwi_metadata.scale", + substitute_type( + type = "sp_arithmetic::per_things::Foo", + with = "crate::Foo" + ) +)] +pub mod node_runtime {} +``` + +In this example, we can (optionally) specify the generic parameters we expect to see on the original type ("type"), and then of those, decide which should be present on the substitute type ("with"). If no parameters are provided at all, we'll get the same behaviour as before. This allows much more flexibility when defining substitute types. + +### Optional Bizinikiwi dependencies + +See [#760](https://github.com/pezkuwichain/subxt/pull/760). Subxt now has a "bizinikiwi-compat" feature (enabled by default, and disabled for WASM builds). At present, enabling this feature simply exposes the `PairSigner` (which was always available before), allowing transactions to be signed via Bizinikiwi signer logic (as before). When disabled, you (currently) must bring your own signer implementation, but in return we can avoid bringing in a substantial number of Bizinikiwi dependencies in the process. + +Regardless, this change also tidied up and moved various bits and pieces around to be consistent with this goal. To address some common moves, previously we'd have: + +```rust +use subxt::{ + ext::{ + sp_core::{ sr25519, Pair }, + sp_runtime::{ AccountId32, generic::Header }, + }, + tx::{ + Era, + PlainTip, + PezkuwiExtrinsicParamsBuilder + } +}; +``` + +And now this would look more like: + +```rust +// `sp_core` and `sp_runtime` are no longer exposed via `ext`; add the crates yourself at matching versions to use: +use sp_core::{ + sr25519, + Pair, +}; +use subxt::{ + // You'll often want to use the "built-in" `AccountId32` now instead of the `sp_runtime` version: + utils::AccountId32, + // traits used in our `Config` trait are now provided directly in this module: + config::Header, + // Pezkuwi and Bizinikiwi specific Config types are now in the relevant Config section: + config::pezkuwi::{ + Era, + PlainTip, + PezkuwiExtrinsicParamsBuilder + } +} +``` + +Additionally, the `type Hashing` in the `Config` trait is now called `Hasher`, to clarify what it is, and types returned directly from the RPC calls now all live in `crate::rpc::types`, rather than sometimes living in Bizinikiwi crates. + +Some other note worthy PRs that were merged since the last release: + +### Added + +- Add block-centric Storage API ([#774](https://github.com/pezkuwichain/subxt/pull/774)) +- Add `chainHead` RPC methods ([#766](https://github.com/pezkuwichain/subxt/pull/766)) +- Allow for remapping type parameters in type substitutions ([#735](https://github.com/pezkuwichain/subxt/pull/735)) +- Add ability to set custom metadata etc on OnlineClient ([#794](https://github.com/pezkuwichain/subxt/pull/794)) +- Add `Cargo.lock` for deterministic builds ([#795](https://github.com/pezkuwichain/subxt/pull/795)) +- Add API to execute runtime calls ([#777](https://github.com/pezkuwichain/subxt/pull/777)) +- Add bitvec-like generic support to the scale-bits type for use in codegen ([#718](https://github.com/pezkuwichain/subxt/pull/718)) +- Add `--derive-for-type` to cli ([#708](https://github.com/pezkuwichain/subxt/pull/708)) + +### Changed + +- rename subscribe_to_updates() to updater() ([#792](https://github.com/pezkuwichain/subxt/pull/792)) +- Expose `Update` ([#791](https://github.com/pezkuwichain/subxt/pull/791)) +- Expose version info in CLI tool with build-time obtained git hash ([#787](https://github.com/pezkuwichain/subxt/pull/787)) +- Implement deserialize on AccountId32 ([#773](https://github.com/pezkuwichain/subxt/pull/773)) +- Codegen: Preserve attrs and add #[allow(clippy::all)] ([#784](https://github.com/pezkuwichain/subxt/pull/784)) +- make ChainBlockExtrinsic cloneable ([#778](https://github.com/pezkuwichain/subxt/pull/778)) +- Make sp_core and sp_runtime dependencies optional, and bump to latest ([#760](https://github.com/pezkuwichain/subxt/pull/760)) +- Make verbose rpc error display ([#758](https://github.com/pezkuwichain/subxt/pull/758)) +- rpc: Expose the `subscription ID` for `RpcClientT` ([#733](https://github.com/pezkuwichain/subxt/pull/733)) +- events: Fetch metadata at arbitrary blocks ([#727](https://github.com/pezkuwichain/subxt/pull/727)) + +### Fixed + +- Fix decoding events via `.as_root_event()` and add test ([#767](https://github.com/pezkuwichain/subxt/pull/767)) +- Retain Rust code items from `mod` decorated with `subxt` attribute ([#721](https://github.com/pezkuwichain/subxt/pull/721)) + + +## [0.25.0] - 2022-11-16 + +This release resolves the `parity-util-mem crate` several version guard by updating bizinikiwi related dependencies which makes +it possible to have other bizinikiwi dependencies in tree again along with subxt. + +In addition the release has several API improvements in the dynamic transaction API along with that subxt now compiles down to WASM. + +Notable PRs merged: + +### Added + +- Add getters for `Module` ([#697](https://github.com/pezkuwichain/subxt/pull/697)) +- add wasm support ([#700](https://github.com/pezkuwichain/subxt/pull/700)) +- Extend the new `api.blocks()` to be the primary way to subscribe and fetch blocks/extrinsics/events ([#691](https://github.com/pezkuwichain/subxt/pull/691)) +- Add runtime_metadata_url to pull metadata directly from a node ([#689](https://github.com/pezkuwichain/subxt/pull/689)) +- Implement `BlocksClient` for working with blocks ([#671](https://github.com/pezkuwichain/subxt/pull/671)) +- Allow specifying the `subxt` crate path for generated code ([#664](https://github.com/pezkuwichain/subxt/pull/664)) +- Allow taking out raw bytes from a SubmittableExtrinsic ([#683](https://github.com/pezkuwichain/subxt/pull/683)) +- Add DecodedValueThunk to allow getting bytes back from dynamic queries ([#680](https://github.com/pezkuwichain/subxt/pull/680)) + +### Changed + +- Update bizinikiwi crates ([#709](https://github.com/pezkuwichain/subxt/pull/709)) +- Make working with nested queries a touch easier ([#714](https://github.com/pezkuwichain/subxt/pull/714)) +- Upgrade to scale-info 2.3 and fix errors ([#704](https://github.com/pezkuwichain/subxt/pull/704)) +- No need to entangle Signer and nonce now ([#702](https://github.com/pezkuwichain/subxt/pull/702)) +- error: `RpcError` with custom client error ([#694](https://github.com/pezkuwichain/subxt/pull/694)) +- into_encoded() for consistency ([#685](https://github.com/pezkuwichain/subxt/pull/685)) +- make subxt::Config::Extrinsic Send ([#681](https://github.com/pezkuwichain/subxt/pull/681)) +- Refactor CLI tool to give room for growth ([#667](https://github.com/pezkuwichain/subxt/pull/667)) +- expose jsonrpc-core client ([#672](https://github.com/pezkuwichain/subxt/pull/672)) +- Upgrade clap to v4 ([#678](https://github.com/pezkuwichain/subxt/pull/678)) + + +## [0.24.0] - 2022-09-22 + +This release has a bunch of smaller changes and fixes. The breaking changes are fairly minor and should be easy to address if encountered. Notable additions are: +- Allowing the underlying RPC implementation to be swapped out ([#634](https://github.com/pezkuwichain/subxt/pull/634)). This makes `jsonrpsee` an optional dependency, and opens the door for Subxt to be integrated into things like light clients, since we can decide how to handle RPC calls. +- A low level "runtime upgrade" API is exposed, giving more visibility into when node updates happen in case your application needs to handle them. +- `scale-value` and `scale-decode` dependencies are bumped. The main effect of this is that `bitvec` is no longer used under the hood in the core of Subxt, which helps to remove one hurdle on the way to being able to compile it to WASM. + +Notable PRs merged: + +### Added + +- feat: add low-level `runtime upgrade API` ([#657](https://github.com/pezkuwichain/subxt/pull/657)) +- Add accessor for `StaticTxPayload::call_data` ([#660](https://github.com/pezkuwichain/subxt/pull/660)) +- Store type name of a field in event metadata, and export EventFieldMetadata ([#656](https://github.com/pezkuwichain/subxt/pull/656) and [#654](https://github.com/pezkuwichain/subxt/pull/654)) +- Allow generalising over RPC implementation ([#634](https://github.com/pezkuwichain/subxt/pull/634)) +- Add conversion and default functions for `NumberOrHex` ([#636](https://github.com/pezkuwichain/subxt/pull/636)) +- Allow creating/submitting unsigned transactions, too. ([#625](https://github.com/pezkuwichain/subxt/pull/625)) +- Add Staking Miner and Introspector to usage list ([#647](https://github.com/pezkuwichain/subxt/pull/647)) + +### Changed + +- Bump scale-value and scale-decode ([#659](https://github.com/pezkuwichain/subxt/pull/659)) +- Tweak 0.23 notes and add another test for events ([#618](https://github.com/pezkuwichain/subxt/pull/618)) +- Specialize metadata errors ([#633](https://github.com/pezkuwichain/subxt/pull/633)) +- Simplify the TxPayload trait a little ([#638](https://github.com/pezkuwichain/subxt/pull/638)) +- Remove unnecessary `async` ([#645](https://github.com/pezkuwichain/subxt/pull/645)) +- Use 'sp_core::Hxxx' for all hash types ([#623](https://github.com/pezkuwichain/subxt/pull/623)) + +### Fixed + +- Fix `history_depth` testing ([#662](https://github.com/pezkuwichain/subxt/pull/662)) +- Fix codegen for `codec::Compact` as type parameters ([#651](https://github.com/pezkuwichain/subxt/pull/651)) +- Support latest bizinikiwi release ([#653](https://github.com/pezkuwichain/subxt/pull/653)) + + +## [0.23.0] - 2022-08-11 + +This is one of the most significant releases to date in Subxt, and carries with it a number of significant breaking changes, but in exchange, a number of significant improvements. The most significant PR is [#593](https://github.com/pezkuwichain/subxt/pull/593); the fundamental change that this makes is to separate creating a query/transaction/address from submitting it. This gives us flexibility when creating queries; they can be either dynamically or statically generated, but also flexibility in our client, enabling methods to be exposed for online or offline use. + +The best place to look to get a feel for what's changed, aside from the documentation itself, is the `examples` folder. What follows are some examples of the changes you'll need to make, which all follow a similar pattern: + +### Submitting a transaction + +Previously, we'd build a client which is tied to the static codegen, and then use the client to build and submit a transaction like so: + +```rust +let api = ClientBuilder::new() + .build() + .await? + .to_runtime_api::>>(); + +let balance_transfer = api + .tx() + .balances() + .transfer(dest, 10_000)? + .sign_and_submit_then_watch_default(&signer) + .await? + .wait_for_finalized_success() + .await?; +``` + +Now, we build a transaction separately (in this case, using static codegen to guide us as before) and then submit it to a client like so: + +``` rust +let api = OnlineClient::::new().await?; + +let balance_transfer_tx = pezkuwi::tx().balances().transfer(dest, 10_000); + +let balance_transfer = api + .tx() + .sign_and_submit_then_watch_default(&balance_transfer_tx, &signer) + .await? + .wait_for_finalized_success() + .await?; +``` + +See the `examples/examples/submit_and_watch.rs` example for more. + +### Fetching a storage entry + +Previously, we build and submit a storage query in one step: + +```rust +let api = ClientBuilder::new() + .build() + .await? + .to_runtime_api::>>(); + +let entry = api.storage().staking().bonded(&addr, None).await; +``` + +Now, we build the storage query separately and submit it to the client: + +```rust +let api = OnlineClient::::new().await?; + +let staking_bonded = pezkuwi::storage().staking().bonded(&addr); + +let entry = api.storage().fetch(&staking_bonded, None).await; +``` + +Note that previously, the generated code would do the equivalent of `fetch_or_default` if possible, or `fetch` if no default existed. You must now decide whether to: +- fetch an entry, returning `None` if it's not found (`api.storage().fetch(..)`), or +- fetch an entry, returning the default if it's not found (`api.storage().fetch_or_default(..)`). + +The static types will protect you against using `fetch_or_default` when no such default exists, and so the recommendation is to try changing all storage requests to use `fetch_or_default`, falling back to using `fetch` where doing so leads to compile errors. + +See `examples/examples/concurrent_storage_requests.rs` for an example of fetching entries. + +### Iterating over storage entries + +Previously: + +```rust +let api = ClientBuilder::new() + .build() + .await? + .to_runtime_api::>>(); + +let mut iter = api + .storage() + .xcm_pallet() + .version_notifiers_iter(None) + .await?; + +while let Some((key, value)) = iter.next().await? { + // ... +} +``` + +Now, as before, building the storage query to iterate over is separate from using it: + +```rust +let api = OnlineClient::::new().await?; + +let key_addr = pezkuwi::storage() + .xcm_pallet() + .version_notifiers_root(); + +let mut iter = api + .storage() + .iter(key_addr, 10, None).await?; + +while let Some((key, value)) = iter.next().await? { + // ... +} +``` + +Note that the `_root()` suffix on generated storage queries accesses the root entry at that address, +and is available when the address is a map that can be iterated over. By not appending `_root()`, you'll +be asked to provide the values needed to access a specific entry in the map. + +See the `examples/examples/storage_iterating.rs` example for more. + +### Accessing constants + +Before, we'd build a client and use the client to select and query a constant: + +```rust +let api = ClientBuilder::new() + .build() + .await? + .to_runtime_api::>>(); + +let existential_deposit = api + .constants() + .balances() + .existential_deposit()?; +``` + +Now, similar to the other examples, we separately build a constant _address_ and provide that address to the client to look it up: + +```rust +let api = OnlineClient::::new().await?; + +let address = pezkuwi::constants() + .balances() + .existential_deposit(); + +let existential_deposit = api.constants().at(&address)?; +``` + +See the `examples/examples/fetch_constants.rs` example for more. + +### Subscribing to events + +Event subscriptions themselves are relatively unchanged (although the data you can access/get back has changed a little). Before: + +```rust +let api = ClientBuilder::new() + .build() + .await? + .to_runtime_api::>>(); + +let mut event_sub = api.events().subscribe().await?; + +while let Some(events) = event_sub.next().await { + // ... +} +``` + +Now, we simply swap the client out for our new one, and the rest is similar: + +```rust +let api = OnlineClient::::new().await?; + +let mut event_sub = api.events().subscribe().await?; + +while let Some(events) = event_sub.next().await { + // ... +} +``` + +Note that when working with a single event, the method `event.bytes()` previously returned just the bytes associated with the event fields. Now, `event.bytes()` returns _all_ of the bytes associated with the event. There is a separate method, `event.field_bytes()`, that returns the bytes for just the fields in the event. This change will **not** lead to a compile error, and so it's worth keeping an eye out for any uses of `.bytes()` to update them to `.field_bytes()`. + +See the `examples/examples/subscribe_all_events.rs` example for more. + + +The general pattern, as seen above, is that we break apart constructing a query/address and using it. You can now construct queries dynamically instead and forego all static codegen by using the functionality exposed in the `subxt::dynamic` module instead. + +Other smaller breaking changes have happened, but they should be easier to address by following compile errors. + +For more details about all of the changes, the full commit history since the last release is as follows: + +### Added + +- Expose the extrinsic hash from TxProgress ([#614](https://github.com/pezkuwichain/subxt/pull/614)) +- Add support for `ws` in `subxt-cli` ([#579](https://github.com/pezkuwichain/subxt/pull/579)) +- Expose the SCALE encoded call data of an extrinsic ([#573](https://github.com/pezkuwichain/subxt/pull/573)) +- Validate absolute path for `substitute_type` ([#577](https://github.com/pezkuwichain/subxt/pull/577)) + +### Changed + +- Rework Subxt API to support offline and dynamic transactions ([#593](https://github.com/pezkuwichain/subxt/pull/593)) +- Use scale-decode to help optimise event decoding ([#607](https://github.com/pezkuwichain/subxt/pull/607)) +- Decode raw events using scale_value and return the decoded Values, too ([#576](https://github.com/pezkuwichain/subxt/pull/576)) +- dual license ([#590](https://github.com/pezkuwichain/subxt/pull/590)) +- Don't hash constant values; only their types ([#587](https://github.com/pezkuwichain/subxt/pull/587)) +- metadata: Exclude `field::type_name` from metadata validation ([#595](https://github.com/pezkuwichain/subxt/pull/595)) +- Bump Swatinem/rust-cache from 1.4.0 to 2.0.0 ([#597](https://github.com/pezkuwichain/subxt/pull/597)) +- Update jsonrpsee requirement from 0.14.0 to 0.15.1 ([#603](https://github.com/pezkuwichain/subxt/pull/603)) + + +## [0.22.0] - 2022-06-20 + +With this release, subxt can subscribe to the node's runtime upgrades to ensure that the metadata is updated and +extrinsics are properly constructed. + +We have also made some slight API improvements to make in the area of storage keys, and thanks to an external contribution we now support dry running transactions before submitting them. + +This release also improves the documentation, adds UI tests, and defaults the `subxt-cli` to return metadata +bytes instead of the JSON format. + +### Fixed + +- Handle `StorageEntry` empty keys ([#565](https://github.com/pezkuwichain/subxt/pull/565)) +- Fix documentation examples ([#568](https://github.com/pezkuwichain/subxt/pull/568)) +- Fix cargo clippy ([#548](https://github.com/pezkuwichain/subxt/pull/548)) +- fix: Find bizinikiwi port on different log lines ([#536](https://github.com/pezkuwichain/subxt/pull/536)) + +### Added + +- Followup test for checking propagated documentation ([#514](https://github.com/pezkuwichain/subxt/pull/514)) +- feat: refactor signing in order to more easily be able to dryrun ([#547](https://github.com/pezkuwichain/subxt/pull/547)) +- Add subxt documentation ([#546](https://github.com/pezkuwichain/subxt/pull/546)) +- Add ability to iterate over N map storage keys ([#537](https://github.com/pezkuwichain/subxt/pull/537)) +- Subscribe to Runtime upgrades for proper extrinsic construction ([#513](https://github.com/pezkuwichain/subxt/pull/513)) + +### Changed +- Move test crates into a "testing" folder and add a ui (trybuild) test and ui-test helpers ([#567](https://github.com/pezkuwichain/subxt/pull/567)) +- Update jsonrpsee requirement from 0.13.0 to 0.14.0 ([#566](https://github.com/pezkuwichain/subxt/pull/566)) +- Make storage futures only borrow client, not self, for better ergonomics ([#561](https://github.com/pezkuwichain/subxt/pull/561)) +- Bump actions/checkout from 2 to 3 ([#557](https://github.com/pezkuwichain/subxt/pull/557)) +- Deny unused crate dependencies ([#549](https://github.com/pezkuwichain/subxt/pull/549)) +- Implement `Clone` for the generated `RuntimeApi` ([#544](https://github.com/pezkuwichain/subxt/pull/544)) +- Update color-eyre requirement from 0.5.11 to 0.6.1 ([#540](https://github.com/pezkuwichain/subxt/pull/540)) +- Update jsonrpsee requirement from 0.12.0 to 0.13.0 ([#541](https://github.com/pezkuwichain/subxt/pull/541)) +- Update artifacts and pezkuwi.rs and change CLI to default bytes ([#533](https://github.com/pezkuwichain/subxt/pull/533)) +- Replace `log` with `tracing` and record extrinsic info ([#535](https://github.com/pezkuwichain/subxt/pull/535)) +- Bump jsonrpsee ([#528](https://github.com/pezkuwichain/subxt/pull/528)) + + +## [0.21.0] - 2022-05-02 + +This release adds static metadata validation, via comparing the statically generated API with the target node's runtime +metadata. This implies a breaking change in the subxt API, as the user receives an error when interacting with an +incompatible API at the storage, call, and constant level. + +The `subxt-cli` can check the compatibility of multiple runtime nodes, either full metadata compatibility or +compatibility at the pallet level. + +Users can define custom derives for specific generated types of the API via adding the `derive_for_type` configuration +to the `subxt` attribute. + +The metadata documentation is propagated to the statically generated API. + +Previously developers wanting to build the subxt crate needed the `bizinikiwi` binary dependency in their local +environment. This restriction is removed via moving the integration tests to a dedicated crate. + +The number of dependencies is reduced for individual subxt crates. + +### Fixed +- test-runtime: Add exponential backoff ([#518](https://github.com/pezkuwichain/subxt/pull/518)) + +### Added + +- Add custom derives for specific generated types ([#520](https://github.com/pezkuwichain/subxt/pull/520)) +- Static Metadata Validation ([#478](https://github.com/pezkuwichain/subxt/pull/478)) +- Propagate documentation to runtime API ([#511](https://github.com/pezkuwichain/subxt/pull/511)) +- Add `tidext` in real world usage ([#508](https://github.com/pezkuwichain/subxt/pull/508)) +- Add system health rpc ([#510](https://github.com/pezkuwichain/subxt/pull/510)) + +### Changed +- Put integration tests behind feature flag ([#515](https://github.com/pezkuwichain/subxt/pull/515)) +- Use minimum amount of dependencies for crates ([#524](https://github.com/pezkuwichain/subxt/pull/524)) +- Export `BaseExtrinsicParams` ([#516](https://github.com/pezkuwichain/subxt/pull/516)) +- bump jsonrpsee to v0.10.1 ([#504](https://github.com/pezkuwichain/subxt/pull/504)) + + +## [0.20.0] - 2022-04-06 + +The most significant change in this release is how we create and sign extrinsics, and how we manage the +"additional" and "extra" data that is attached to them. See https://github.com/pezkuwichain/subxt/issues/477, and the +associated PR https://github.com/pezkuwichain/subxt/pull/490 for a more detailed look at the code changes. + +If you're targeting a node with compatible additional and extra transaction data to Bizinikiwi or Pezkuwi, the main +change you'll have to make is to import and use `subxt::PezkuwiExtrinsicParams` or `subxt::BizinikiwiExtrinsicParams` +instead of `subxt::DefaultExtra` (depending on what node you're compatible with), and then use `sign_and_submit_default` +instead of `sign_and_submit` when making a call. Now, `sign_and_submit` accepts a second argument which allows these +parameters (such as mortality and tip payment) to be customized. See `examples/balance_transfer_with_params.rs` for a +small usage example. + +If you're targeting a node which involves custom additional and extra transaction data, you'll need to implement the +trait `subxt::extrinsic::ExtrinsicParams`, which determines the parameters that can be provided to `sign_and_submit`, as +well as how to encode these into the "additional" and "extra" data needed for a transaction. Have a look at +`subxt/src/extrinsic/params.rs` for the trait definition and Bizinikiwi/Pezkuwi implementations. The aim with this change +is to make it easier to customise this for your own chains, and provide a simple way to provide values at runtime. + +### Fixed + +- Test utils: parse port from bizinikiwi binary output to avoid races ([#501](https://github.com/pezkuwichain/subxt/pull/501)) +- Rely on the kernel for port allocation ([#498](https://github.com/pezkuwichain/subxt/pull/498)) + +### Changed + +- Export ModuleError for downstream matching ([#499](https://github.com/pezkuwichain/subxt/pull/499)) +- Bump jsonrpsee to v0.9.0 ([#496](https://github.com/pezkuwichain/subxt/pull/496)) +- Use tokio instead of async-std in tests/examples ([#495](https://github.com/pezkuwichain/subxt/pull/495)) +- Read constants from metadata at runtime ([#494](https://github.com/pezkuwichain/subxt/pull/494)) +- Handle `sp_runtime::ModuleError` bizinikiwi updates ([#492](https://github.com/pezkuwichain/subxt/pull/492)) +- Simplify creating and signing extrinsics ([#490](https://github.com/pezkuwichain/subxt/pull/490)) +- Add `dev_getBlockStats` RPC ([#489](https://github.com/pezkuwichain/subxt/pull/489)) +- scripts: Hardcode github subxt pull link for changelog consistency ([#482](https://github.com/pezkuwichain/subxt/pull/482)) + + +## [0.19.0] - 2022-03-21 + +### Changed + +- Return events from blocks skipped over during Finalization, too ([#473](https://github.com/pezkuwichain/subxt/pull/473)) +- Use RPC call to get account nonce ([#476](https://github.com/pezkuwichain/subxt/pull/476)) +- Add script to generate release changelog based on commits ([#465](https://github.com/pezkuwichain/subxt/pull/465)) +- README updates ([#472](https://github.com/pezkuwichain/subxt/pull/472)) +- Make EventSubscription and FilterEvents Send-able ([#471](https://github.com/pezkuwichain/subxt/pull/471)) + + +## [0.18.1] - 2022-03-04 + +# Fixed + +- Remove unused `sp_version` dependency to fix duplicate `parity-scale-codec` deps ([#466](https://github.com/pezkuwichain/subxt/pull/466)) + + +## [0.18.0] - 2022-03-02 + +### Added + +- Expose method to fetch nonce via `Client` ([#451](https://github.com/pezkuwichain/subxt/pull/451)) + +### Changed + +- Reference key storage api ([#447](https://github.com/pezkuwichain/subxt/pull/447)) +- Filter one or multiple events by type from an EventSubscription ([#461](https://github.com/pezkuwichain/subxt/pull/461)) +- New Event Subscription API ([#442](https://github.com/pezkuwichain/subxt/pull/442)) +- Distinct handling for N fields + 1 hasher vs N fields + N hashers ([#458](https://github.com/pezkuwichain/subxt/pull/458)) +- Update scale-info and parity-scale-codec requirements ([#462](https://github.com/pezkuwichain/subxt/pull/462)) +- Substitute BTreeMap/BTreeSet generated types for Vec ([#459](https://github.com/pezkuwichain/subxt/pull/459)) +- Obtain DispatchError::Module info dynamically ([#453](https://github.com/pezkuwichain/subxt/pull/453)) +- Add hardcoded override to ElectionScore ([#455](https://github.com/pezkuwichain/subxt/pull/455)) +- DispatchError::Module is now a tuple variant in latest Bizinikiwi ([#439](https://github.com/pezkuwichain/subxt/pull/439)) +- Fix flaky event subscription test ([#450](https://github.com/pezkuwichain/subxt/pull/450)) +- Improve documentation ([#449](https://github.com/pezkuwichain/subxt/pull/449)) +- Export `codegen::TypeGenerator` ([#444](https://github.com/pezkuwichain/subxt/pull/444)) +- Fix conversion of `Call` struct names to UpperCamelCase ([#441](https://github.com/pezkuwichain/subxt/pull/441)) +- Update release documentation with dry-run ([#435](https://github.com/pezkuwichain/subxt/pull/435)) + + +## [0.17.0] - 2022-02-04 + +### Added + +- introduce jsonrpsee client abstraction + kill HTTP support. ([#341](https://github.com/pezkuwichain/subxt/pull/341)) +- Get event context on EventSubscription ([#423](https://github.com/pezkuwichain/subxt/pull/423)) + +### Changed + +- Add more tests for events.rs/decode_and_consume_type ([#430](https://github.com/pezkuwichain/subxt/pull/430)) +- Update bizinikiwi dependencies ([#429](https://github.com/pezkuwichain/subxt/pull/429)) +- export RuntimeError struct ([#427](https://github.com/pezkuwichain/subxt/pull/427)) +- remove unused PalletError struct ([#425](https://github.com/pezkuwichain/subxt/pull/425)) +- Move Subxt crate into a subfolder ([#424](https://github.com/pezkuwichain/subxt/pull/424)) +- Add release checklist ([#418](https://github.com/pezkuwichain/subxt/pull/418)) + + +## [0.16.0] - 2022-02-01 + +*Note*: This is a significant release which introduces support for V14 metadata and macro based codegen, as well as making many breaking changes to the API. + +### Changed + +- Log debug message for JSON-RPC response ([#415](https://github.com/pezkuwichain/subxt/pull/415)) +- Only convert struct names to camel case for Call variant structs ([#412](https://github.com/pezkuwichain/subxt/pull/412)) +- Parameterize AccountData ([#409](https://github.com/pezkuwichain/subxt/pull/409)) +- Allow decoding Events containing BitVecs ([#408](https://github.com/pezkuwichain/subxt/pull/408)) +- Custom derive for cli ([#407](https://github.com/pezkuwichain/subxt/pull/407)) +- make storage-n-map fields public too ([#404](https://github.com/pezkuwichain/subxt/pull/404)) +- add constants api to codegen ([#402](https://github.com/pezkuwichain/subxt/pull/402)) +- Expose transaction::TransactionProgress as public ([#401](https://github.com/pezkuwichain/subxt/pull/401)) +- add interbtc-clients to real world usage section ([#397](https://github.com/pezkuwichain/subxt/pull/397)) +- Make own version of RuntimeVersion to avoid mismatches ([#395](https://github.com/pezkuwichain/subxt/pull/395)) +- Use the generated DispatchError instead of the hardcoded Bizinikiwi one ([#394](https://github.com/pezkuwichain/subxt/pull/394)) +- Remove bounds on Config trait that aren't strictly necessary ([#389](https://github.com/pezkuwichain/subxt/pull/389)) +- add crunch to readme ([#388](https://github.com/pezkuwichain/subxt/pull/388)) +- fix remote example ([#386](https://github.com/pezkuwichain/subxt/pull/386)) +- fetch system chain, name and version ([#385](https://github.com/pezkuwichain/subxt/pull/385)) +- Fix compact event field decoding ([#384](https://github.com/pezkuwichain/subxt/pull/384)) +- fix: use index override when decoding enums in events ([#382](https://github.com/pezkuwichain/subxt/pull/382)) +- Update to jsonrpsee 0.7 and impl Stream on TransactionProgress ([#380](https://github.com/pezkuwichain/subxt/pull/380)) +- Add links to projects using subxt ([#376](https://github.com/pezkuwichain/subxt/pull/376)) +- Use released bizinikiwi dependencies ([#375](https://github.com/pezkuwichain/subxt/pull/375)) +- Configurable Config and Extra types ([#373](https://github.com/pezkuwichain/subxt/pull/373)) +- Implement pre_dispatch for SignedExtensions ([#370](https://github.com/pezkuwichain/subxt/pull/370)) +- Export TransactionEvents ([#363](https://github.com/pezkuwichain/subxt/pull/363)) +- Rebuild test-runtime if bizinikiwi binary is updated ([#362](https://github.com/pezkuwichain/subxt/pull/362)) +- Expand the subscribe_and_watch example ([#361](https://github.com/pezkuwichain/subxt/pull/361)) +- Add TooManyConsumers variant to track latest sp-runtime addition ([#360](https://github.com/pezkuwichain/subxt/pull/360)) +- Implement new API for sign_and_submit_then_watch ([#354](https://github.com/pezkuwichain/subxt/pull/354)) +- Simpler dependencies ([#353](https://github.com/pezkuwichain/subxt/pull/353)) +- Refactor type generation, remove code duplication ([#352](https://github.com/pezkuwichain/subxt/pull/352)) +- Make system properties an arbitrary JSON object, plus CI fixes ([#349](https://github.com/pezkuwichain/subxt/pull/349)) +- Fix a couple of CI niggles ([#344](https://github.com/pezkuwichain/subxt/pull/344)) +- Add timestamp pallet test ([#340](https://github.com/pezkuwichain/subxt/pull/340)) +- Add nightly CI check against latest bizinikiwi. ([#335](https://github.com/pezkuwichain/subxt/pull/335)) +- Ensure metadata is in sync with running node during tests ([#333](https://github.com/pezkuwichain/subxt/pull/333)) +- Update to jsonrpsee 0.5.1 ([#332](https://github.com/pezkuwichain/subxt/pull/332)) +- Update bizinikiwi and hardcoded default ChargeAssetTxPayment extension ([#330](https://github.com/pezkuwichain/subxt/pull/330)) +- codegen: fix compact unnamed fields ([#327](https://github.com/pezkuwichain/subxt/pull/327)) +- Check docs and run clippy on PRs ([#326](https://github.com/pezkuwichain/subxt/pull/326)) +- Additional parameters for SignedExtra ([#322](https://github.com/pezkuwichain/subxt/pull/322)) +- fix: also processess initialize and finalize events in event subscription ([#321](https://github.com/pezkuwichain/subxt/pull/321)) +- Release initial versions of subxt-codegen and subxt-cli ([#320](https://github.com/pezkuwichain/subxt/pull/320)) +- Add some basic usage docs to README. ([#319](https://github.com/pezkuwichain/subxt/pull/319)) +- Update jsonrpsee ([#317](https://github.com/pezkuwichain/subxt/pull/317)) +- Add missing cargo metadata fields for new crates ([#311](https://github.com/pezkuwichain/subxt/pull/311)) +- fix: keep processing a block's events after encountering a dispatch error ([#310](https://github.com/pezkuwichain/subxt/pull/310)) +- Codegen: enum variant indices ([#308](https://github.com/pezkuwichain/subxt/pull/308)) +- fix extrinsics retracted ([#307](https://github.com/pezkuwichain/subxt/pull/307)) +- Add utility pallet tests ([#300](https://github.com/pezkuwichain/subxt/pull/300)) +- fix metadata constants ([#299](https://github.com/pezkuwichain/subxt/pull/299)) +- Generate runtime API from metadata ([#294](https://github.com/pezkuwichain/subxt/pull/294)) +- Add NextKeys and QueuedKeys for session module ([#291](https://github.com/pezkuwichain/subxt/pull/291)) +- deps: update jsonrpsee 0.3.0 ([#289](https://github.com/pezkuwichain/subxt/pull/289)) +- deps: update jsonrpsee 0.2.0 ([#285](https://github.com/pezkuwichain/subxt/pull/285)) +- deps: Reorg the order of deps ([#284](https://github.com/pezkuwichain/subxt/pull/284)) +- Expose the rpc client in Client ([#267](https://github.com/pezkuwichain/subxt/pull/267)) +- update jsonrpsee to 0.2.0-alpha.6 ([#266](https://github.com/pezkuwichain/subxt/pull/266)) +- Remove funty pin, upgrade codec ([#265](https://github.com/pezkuwichain/subxt/pull/265)) +- Use async-trait ([#264](https://github.com/pezkuwichain/subxt/pull/264)) +- [jsonrpsee http client]: support tokio1 & tokio02. ([#263](https://github.com/pezkuwichain/subxt/pull/263)) +- impl `From>` and `From>` ([#257](https://github.com/pezkuwichain/subxt/pull/257)) +- update jsonrpsee ([#251](https://github.com/pezkuwichain/subxt/pull/251)) +- return none if subscription returns early ([#250](https://github.com/pezkuwichain/subxt/pull/250)) + + +## [0.15.0] - 2021-03-15 + +### Added +- implement variant of subscription that returns finalized storage changes - [#237](https://github.com/pezkuwichain/subxt/pull/237) +- implement session handling for unsubscribe in subxt-client - [#242](https://github.com/pezkuwichain/subxt/pull/242) + +### Changed +- update jsonrpsee [#251](https://github.com/pezkuwichain/subxt/pull/251) +- return none if subscription returns early [#250](https://github.com/pezkuwichain/subxt/pull/250) +- export ModuleError and RuntimeError for downstream usage - [#246](https://github.com/pezkuwichain/subxt/pull/246) +- rpc client methods should be public for downstream usage - [#240](https://github.com/pezkuwichain/subxt/pull/240) +- re-export WasmExecutionMethod for downstream usage - [#239](https://github.com/pezkuwichain/subxt/pull/239) +- integration with jsonrpsee v2 - [#214](https://github.com/pezkuwichain/subxt/pull/214) +- expose wasm execution method on subxt client config - [#230](https://github.com/pezkuwichain/subxt/pull/230) +- Add hooks to register event types for decoding - [#227](https://github.com/pezkuwichain/subxt/pull/227) +- Bizinikiwi 3.0 - [#232](https://github.com/pezkuwichain/subxt/pull/232) + + +## [0.14.0] - 2021-02-05 + +- Refactor event type decoding and declaration [#221](https://github.com/pezkuwichain/subxt/pull/221) +- Add Balances Locks [#197](https://github.com/pezkuwichain/subxt/pull/197) +- Add event Phase::Initialization [#215](https://github.com/pezkuwichain/subxt/pull/215) +- Make type explicit [#217](https://github.com/pezkuwichain/subxt/pull/217) +- Upgrade dependencies, bumps bizinikiwi to 2.0.1 [#219](https://github.com/pezkuwichain/subxt/pull/219) +- Export extra types [#212](https://github.com/pezkuwichain/subxt/pull/212) +- Enable retrieval of constants from rutnime metadata [#207](https://github.com/pezkuwichain/subxt/pull/207) +- register type sizes for u64 and u128 [#200](https://github.com/pezkuwichain/subxt/pull/200) +- Remove some bizinikiwi dependencies to improve compile time [#194](https://github.com/pezkuwichain/subxt/pull/194) +- propagate 'RuntimeError's to 'decode_raw_bytes' caller [#189](https://github.com/pezkuwichain/subxt/pull/189) +- Derive `Clone` for `PairSigner` [#184](https://github.com/pezkuwichain/subxt/pull/184) + + +## [0.13.0] + +- Make the contract call extrinsic work [#165](https://github.com/pezkuwichain/subxt/pull/165) +- Update to Bizinikiwi 2.0.0 [#173](https://github.com/pezkuwichain/subxt/pull/173) +- Display RawEvent data in hex [#168](https://github.com/pezkuwichain/subxt/pull/168) +- Add SudoUncheckedWeightCall [#167](https://github.com/pezkuwichain/subxt/pull/167) +- Add Add SetCodeWithoutChecksCall [#166](https://github.com/pezkuwichain/subxt/pull/166) +- Improve contracts pallet tests [#163](https://github.com/pezkuwichain/subxt/pull/163) +- Make Metadata types public [#162](https://github.com/pezkuwichain/subxt/pull/162) +- Fix option decoding and add basic sanity test [#161](https://github.com/pezkuwichain/subxt/pull/161) +- Add staking support [#160](https://github.com/pezkuwichain/subxt/pull/161) +- Decode option event arg [#158](https://github.com/pezkuwichain/subxt/pull/158) +- Remove unnecessary Sync bound [#172](https://github.com/pezkuwichain/subxt/pull/172) + + +## [0.12.0] + +- Only return an error if the extrinsic failed. [#156](https://github.com/pezkuwichain/subxt/pull/156) +- Update to rc6. [#155](https://github.com/pezkuwichain/subxt/pull/155) +- Different assert. [#153](https://github.com/pezkuwichain/subxt/pull/153) +- Add a method to fetch an unhashed key, close #100 [#152](https://github.com/pezkuwichain/subxt/pull/152) +- Fix port number. [#151](https://github.com/pezkuwichain/subxt/pull/151) +- Implement the `concat` in `twox_64_concat` [#150](https://github.com/pezkuwichain/subxt/pull/150) +- Storage map iter [#148](https://github.com/pezkuwichain/subxt/pull/148) + + +## [0.11.0] + +- Fix build error, wabt 0.9.2 is yanked [#146](https://github.com/pezkuwichain/subxt/pull/146) +- Rc5 [#143](https://github.com/pezkuwichain/subxt/pull/143) +- Refactor: extract functions and types for creating extrinsics [#138](https://github.com/pezkuwichain/subxt/pull/138) +- event subscription example [#140](https://github.com/pezkuwichain/subxt/pull/140) +- Document the `Call` derive macro [#137](https://github.com/pezkuwichain/subxt/pull/137) +- Document the #[module] macro [#135](https://github.com/pezkuwichain/subxt/pull/135) +- Support authors api. [#134](https://github.com/pezkuwichain/subxt/pull/134) + + +## [0.10.1] - 2020-06-19 + +- Release client v0.2.0 [#133](https://github.com/pezkuwichain/subxt/pull/133) + + +## [0.10.0] - 2020-06-19 + +- Upgrade to bizinikiwi rc4 release [#131](https://github.com/pezkuwichain/subxt/pull/131) +- Support unsigned extrinsics. [#130](https://github.com/pezkuwichain/subxt/pull/130) + + +## [0.9.0] - 2020-06-25 + +- Events sub [#126](https://github.com/pezkuwichain/subxt/pull/126) +- Improve error handling in proc-macros, handle DispatchError etc. [#123](https://github.com/pezkuwichain/subxt/pull/123) +- Support embedded full/light node clients. [#91](https://github.com/pezkuwichain/subxt/pull/91) +- Zero sized types [#121](https://github.com/pezkuwichain/subxt/pull/121) +- Fix optional store items. [#120](https://github.com/pezkuwichain/subxt/pull/120) +- Make signing fallable and asynchronous [#119](https://github.com/pezkuwichain/subxt/pull/119) + + +## [0.8.0] - 2020-05-26 + +- Update to Bizinikiwi release candidate [#116](https://github.com/pezkuwichain/subxt/pull/116) +- Update to alpha.8 [#114](https://github.com/pezkuwichain/subxt/pull/114) +- Refactors the api [#113](https://github.com/pezkuwichain/subxt/pull/113) + + +## [0.7.0] - 2020-05-13 + +- Split subxt [#102](https://github.com/pezkuwichain/subxt/pull/102) +- Add support for RPC `state_getReadProof` [#106](https://github.com/pezkuwichain/subxt/pull/106) +- Update to bizinikiwi alpha.7 release [#105](https://github.com/pezkuwichain/subxt/pull/105) +- Double map and plain storage support, introduce macros [#93](https://github.com/pezkuwichain/subxt/pull/93) +- Raw payload return SignedPayload struct [#92](https://github.com/pezkuwichain/subxt/pull/92) + + +## [0.6.0] - 2020-04-15 + +- Raw extrinsic payloads in Client [#83](https://github.com/pezkuwichain/subxt/pull/83) +- Custom extras [#89](https://github.com/pezkuwichain/subxt/pull/89) +- Wrap and export BlockNumber [#87](https://github.com/pezkuwichain/subxt/pull/87) +- All bizinikiwi dependencies upgraded to `alpha.6` + + +## [0.5.0] - 2020-03-25 + +- First release +- All bizinikiwi dependencies upgraded to `alpha.5` diff --git a/vendor/pezkuwi-subxt/CODEOWNERS b/vendor/pezkuwi-subxt/CODEOWNERS new file mode 100644 index 00000000..4f6bce81 --- /dev/null +++ b/vendor/pezkuwi-subxt/CODEOWNERS @@ -0,0 +1,23 @@ +# Lists some code owners. +# +# A codeowner just oversees some part of the codebase. If an owned file is changed then the +# corresponding codeowner receives a review request. An approval of the codeowner might be +# required for merging a PR (depends on repository settings). +# +# For details about syntax, see: +# https://help.github.com/en/articles/about-code-owners +# But here are some important notes: +# +# - Glob syntax is git-like, e.g. `/core` means the core directory in the root, unlike `core` +# which can be everywhere. +# - Multiple owners are supported. +# - Either handle (e.g, @github_user or @github_org/team) or email can be used. Keep in mind, +# that handles might work better because they are more recognizable on GitHub, +# you can use them for mentioning unlike an email. +# - The latest matching rule, if multiple, takes precedence. + +# main codeowner +* @paritytech/subxt-team + +# CI +/.github/ @paritytech/ci @paritytech/subxt-team diff --git a/vendor/pezkuwi-subxt/Cargo.toml b/vendor/pezkuwi-subxt/Cargo.toml new file mode 100644 index 00000000..af3d8627 --- /dev/null +++ b/vendor/pezkuwi-subxt/Cargo.toml @@ -0,0 +1,5 @@ +# This is a virtual manifest for the vendored pezkuwi-subxt crates +# Individual crates are managed by the main pezkuwi-sdk workspace + +[workspace] +# Empty workspace - crates are part of parent workspace diff --git a/vendor/pezkuwi-subxt/FILE_TEMPLATE b/vendor/pezkuwi-subxt/FILE_TEMPLATE new file mode 100644 index 00000000..d4fdb3cb --- /dev/null +++ b/vendor/pezkuwi-subxt/FILE_TEMPLATE @@ -0,0 +1,3 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. diff --git a/vendor/pezkuwi-subxt/LICENSE b/vendor/pezkuwi-subxt/LICENSE new file mode 100644 index 00000000..fdfe42be --- /dev/null +++ b/vendor/pezkuwi-subxt/LICENSE @@ -0,0 +1,17 @@ +Copyright 2019-2025 Parity Technologies (UK) Ltd. + +This program is free software: you can redistribute it and/or modify +it under the terms of (at your option) either the Apache License, +Version 2.0, or the GNU General Public License as published by the +Free Software Foundation, either version 3 of the License, or (at your +option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +For details and specific language governing permissions and +limitations, see either + + - http://www.gnu.org/licenses/ for the GNU GPL + - http://www.apache.org/licenses/LICENSE-2.0 for the Apache license \ No newline at end of file diff --git a/vendor/pezkuwi-subxt/README.md b/vendor/pezkuwi-subxt/README.md new file mode 100644 index 00000000..d1e69e1a --- /dev/null +++ b/vendor/pezkuwi-subxt/README.md @@ -0,0 +1,79 @@ +# subxt · [![build](https://github.com/pezkuwichain/subxt/actions/workflows/rust.yml/badge.svg)](https://github.com/pezkuwichain/subxt/actions/workflows/rust.yml) [![Latest Version](https://img.shields.io/crates/v/subxt.svg)](https://crates.io/crates/subxt) [![Documentation](https://docs.rs/subxt/badge.svg)](https://docs.rs/subxt) + +Subxt is a library for interacting with [Bizinikiwi](https://github.com/pezkuwichain/pezkuwi-sdk) based nodes in Rust and WebAssembly. It can: + +- Submit Extrinsics (this is where the name comes from). +- Subscribe to blocks, reading the extrinsics and associated events from them. +- Read and iterate over storage values. +- Read constants and custom values from the metadata. +- Call runtime APIs, returning the results. +- Do all of the above via a safe, statically typed interface or via a dynamic one when you need the flexibility. +- Compile to WASM and run entirely in the browser. +- Do a bunch of things in a `#[no_std]` environment via the `subxt-core` crate. +- Use a built-in light client (`smoldot`) to interact with chains. + +## Usage + +Take a look in the [examples](./subxt/examples) folder or the [examples](./examples) folder for various smaller or +larger `subxt` usage examples, or [read the guide](https://docs.rs/subxt/latest/subxt/book/index.html) to learn more. + +### Downloading metadata from a Bizinikiwi node + +Use the [`subxt-cli`](./cli) tool to download the metadata for your target runtime from a node. + +1. Install: + +```bash +cargo install subxt-cli +``` + +2. Save the encoded metadata to a file: + +```bash +subxt metadata -f bytes > metadata.scale +``` + +This defaults to querying the metadata of a locally running node on the default `http://localhost:9933/`. If querying +a different node then the `metadata` command accepts a `--url` argument. + +## Subxt Documentation + +For more details regarding utilizing subxt, please visit the [documentation](https://docs.rs/subxt/latest/subxt/). + +## Integration Testing + +Most tests require a running bizinikiwi node to communicate with. This is done by spawning an instance of the +bizinikiwi node per test. It requires an up-to-date `bizinikiwi` executable on your path. + +This can be installed from source via cargo: + +```bash +cargo install --git https://github.com/pezkuwichain/pezkuwi-sdk staging-node-cli --force +``` + +## Real world usage + +Please add your project to this list via a PR. + +- [cargo-contract](https://github.com/pezkuwichain/cargo-contract/) CLI for interacting with Wasm smart contracts. +- [xcm-cli](https://github.com/ascjones/xcm-cli) CLI for submitting XCM messages. +- [phala-pherry](https://github.com/Phala-Network/phala-blockchain/tree/master/standalone/pherry) The relayer between Phala blockchain and the off-chain Secure workers. +- [crunch](https://github.com/turboflakes/crunch) CLI to claim staking rewards in batch every Era or X hours for bizinikiwi-based chains. +- [interbtc-clients](https://github.com/interlay/interbtc-clients) Client implementations for the interBTC parachain; notably the Vault / Relayer and Oracle. +- [tidext](https://github.com/tidelabs/tidext) Tidechain client with Stronghold signer. +- [staking-miner-v2](https://github.com/pezkuwichain/staking-miner-v2) Submit NPos election solutions and get rewards. +- [pezkuwi-introspector](https://github.com/pezkuwichain/pezkuwi-introspector) Tools for monitoring Pezkuwi nodes. +- [ink!](https://github.com/pezkuwichain/ink) Smart contract language that uses `subxt` for allowing developers to conduct [End-to-End testing](https://use.ink/basics/contract-testing/end-to-end-e2e-testing) of their contracts. +- [Chainflip](https://github.com/chainflip-io/chainflip-backend) A decentralised exchange for native cross-chain swaps. +- [Hyperbridge](https://github.com/polytope-labs/hyperbridge) A hyperscalable coprocessor for verifiable cross-chain interoperability. +- [pop CLI](https://github.com/r0gue-io/pop-cli) The all-in-one tool for Pezkuwi development. + +**Alternatives** + +[bizinikiwi-api-client](https://github.com/scs/bizinikiwi-api-client) provides similar functionality. + +#### License + +The entire code within this repository is dual licensed under the _GPL-3.0_ or _Apache-2.0_ licenses. See [the LICENSE](./LICENSE) file for more details. + +Please contact us if you have questions about the licensing of our products. diff --git a/vendor/pezkuwi-subxt/RELEASING.md b/vendor/pezkuwi-subxt/RELEASING.md new file mode 100644 index 00000000..21499a2f --- /dev/null +++ b/vendor/pezkuwi-subxt/RELEASING.md @@ -0,0 +1,108 @@ +# Release Checklist + +These steps assume that you've checked out the Subxt repository and are in the root directory of it. + +We also assume that ongoing work done is being merged directly to the `master` branch. + +1. Ensure that everything you'd like to see released is on the `master` branch. + +2. Create a release branch off `master`, for example `release-v0.17.0`. Decide how far the version needs to be bumped based + on the changes to date. If unsure what to bump the version to (e.g. is it a major, minor or patch release), check with the + Parity Tools team. + +3. Check that you're happy with the current documentation. + + ``` + cargo doc --open + ``` + + CI checks for broken internal links at the moment. Optionally you can also confirm that any external links + are still valid like so: + + ``` + cargo install cargo-deadlinks + cargo deadlinks --check-http + ``` + + If there are minor issues with the documentation, they can be fixed in the release branch. + +4. Bump the crate versions in the root `Cargo.toml` to whatever was decided in step 2 (basically a find and replace from old version to new version in this file should do the trick). + +5. Ensure the `Cargo.lock` file is up to date. + + ``` + cargo generate-lockfile + ``` + +6. Update `CHANGELOG.md` to reflect the difference between this release and the last. If you're unsure of + what to add, check with the Tools team. See the `CHANGELOG.md` file for details of the format it follows. + + First, if there have been any significant changes, add a description of those changes to the top of the + changelog entry for this release. + + Next, you can use the following script to generate the merged PRs between releases: + + ``` + ./scripts/generate_changelog.sh + ``` + + Ensure that the script picked the latest published release tag (e.g. if releasing `v0.17.0`, the script should + provide `[+] Latest release tag: v0.16.0` ). Then group the PRs into "Fixed", "Added" and "Changed" sections, and make any + other adjustments that you feel are necessary for clarity. + +7. If any of the differences impact the minimum version of `rustc` that the code will run on, please update the `rust-version` + field in the root `Cargo.toml` accordingly. + +8. Commit any of the above changes to the release branch and open a PR in GitHub with a base of `master`. + +9. Once the branch has been reviewed and passes CI, merge it. + +10. Now, we're ready to publish the release to crates.io. + + 1. Checkout `master`, ensuring we're looking at that latest merge (`git pull`). + + ``` + git checkout master && git pull + ``` + + 2. Perform a final sanity check that everything looks ok. + + ``` + cargo test --all-targets + ``` + + 3. Run the following command to publish each crate in the required order (allowing + a little time in between each to let crates.io catch up with what we've published). + + ``` + (cd utils/strip-metadata && cargo publish) && \ + (cd metadata && cargo publish) && \ + (cd lightclient && cargo publish) && \ + (cd utils/fetch-metadata && cargo publish) && \ + (cd codegen && cargo publish) && \ + (cd macro && cargo publish); + ``` + + Now, remove the dev dependencies from `subxt-core` (to avoid circular deps), and then run: + + ``` + (cd core && cargo publish) && \ + (cd rpcs && cargo publish) && \ + (cd subxt && cargo publish) && \ + (cd signer && cargo publish) && \ + (cd cli && cargo publish); + ``` + + Finally, put back the dev dependencies in `subxt-core`. + +11. If the release was successful, tag the commit that we released in the `master` branch with the + version that we just released, for example: + + ``` + git tag -s v0.17.0 # use the version number you've just published to crates.io, not this one + git push --tags + ``` + + Once this is pushed, go along to [the releases page on GitHub](https://github.com/pezkuwichain/subxt/releases) + and draft a new release which points to the tag you just pushed to `master` above. Copy the changelog comments + for the current release into the release description. diff --git a/vendor/pezkuwi-subxt/clippy.toml b/vendor/pezkuwi-subxt/clippy.toml new file mode 100644 index 00000000..288718a8 --- /dev/null +++ b/vendor/pezkuwi-subxt/clippy.toml @@ -0,0 +1,3 @@ +# result_large_err lint complains if error variant is 128 bytes or more by default. +# Our error is. Let's up this limit a bit for now to avoid lots of warnings. +large-error-threshold = 512 diff --git a/vendor/pezkuwi-subxt/codegen/Cargo.toml b/vendor/pezkuwi-subxt/codegen/Cargo.toml new file mode 100644 index 00000000..436c6d2c --- /dev/null +++ b/vendor/pezkuwi-subxt/codegen/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "pezkuwi-subxt-codegen" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +publish = true +license.workspace = true +repository.workspace = true +documentation = "https://docs.rs/pezkuwi-subxt-codegen" +homepage.workspace = true +description = "Generate an API for interacting with a Pezkuwi/Bizinikiwi node from FRAME metadata" + +[features] +default = [] +web = ["getrandom/js"] + +[dependencies] +codec = { package = "parity-scale-codec", workspace = true, features = ["derive"] } +frame-metadata = { workspace = true, optional = true } +heck = { workspace = true } +pezkuwi-subxt-metadata = { workspace = true } +proc-macro2 = { workspace = true } +quote = { workspace = true } +scale-info = { workspace = true } +scale-typegen = { workspace = true } +syn = { workspace = true } +thiserror = { workspace = true } + +# Included if "web" feature is enabled, to enable its js feature. +getrandom = { workspace = true, optional = true } + +[dev-dependencies] +frame-metadata = { workspace = true } +scale-info = { workspace = true, features = ["bit-vec"] } + +[package.metadata.docs.rs] +features = ["default"] +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.playground] +default-features = true + +[lints] +workspace = true diff --git a/vendor/pezkuwi-subxt/codegen/src/api/calls.rs b/vendor/pezkuwi-subxt/codegen/src/api/calls.rs new file mode 100644 index 00000000..d6fb0508 --- /dev/null +++ b/vendor/pezkuwi-subxt/codegen/src/api/calls.rs @@ -0,0 +1,143 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::CodegenError; +use heck::{ToSnakeCase as _, ToUpperCamelCase as _}; +use pezkuwi_subxt_metadata::PalletMetadata; +use proc_macro2::TokenStream as TokenStream2; +use quote::{format_ident, quote}; +use scale_typegen::{ + TypeGenerator, + typegen::ir::{ToTokensWithSettings, type_ir::CompositeIRKind}, +}; + +/// Generate calls from the provided pallet's metadata. Each call returns a `StaticPayload` +/// that can be passed to the subxt client to submit/sign/encode. +/// +/// # Arguments +/// +/// - `type_gen` - [`scale_typegen::TypeGenerator`] that contains settings and all types from the +/// runtime metadata. +/// - `pallet` - Pallet metadata from which the calls are generated. +/// - `crate_path` - The crate path under which the `subxt-core` crate is located, e.g. +/// `::pezkuwi_subxt::ext::pezkuwi_subxt_core` when using subxt as a dependency. +pub fn generate_calls( + type_gen: &TypeGenerator, + pallet: &PalletMetadata, + crate_path: &syn::Path, +) -> Result { + // Early return if the pallet has no calls. + let Some(call_ty) = pallet.call_ty_id() else { + return Ok(quote!()); + }; + + let variant_names_and_struct_defs = super::generate_structs_from_variants( + type_gen, + call_ty, + |name| name.to_upper_camel_case().into(), + "Call", + )?; + let (call_structs, call_fns): (Vec<_>, Vec<_>) = variant_names_and_struct_defs + .into_iter() + .map(|var| { + let (call_fn_args, call_args): (Vec<_>, Vec<_>) = match &var.composite.kind { + CompositeIRKind::Named(named_fields) => named_fields + .iter() + .map(|(name, field)| { + // Note: fn_arg_type this is relative the type path of the type alias when + // prefixed with `types::`, e.g. `set_max_code_size::New` + let fn_arg_type = field.type_path.to_token_stream(type_gen.settings()); + let call_arg = if field.is_boxed { + quote! { #name: #crate_path::alloc::boxed::Box::new(#name) } + } else { + quote! { #name } + }; + (quote!( #name: types::#fn_arg_type ), call_arg) + }) + .unzip(), + CompositeIRKind::NoFields => Default::default(), + CompositeIRKind::Unnamed(_) => { + return Err(CodegenError::InvalidCallVariant(call_ty)); + }, + }; + + let pallet_name = pallet.name(); + let call_name = &var.variant_name; + let struct_name = &var.composite.name; + let Some(call_hash) = pallet.call_hash(call_name) else { + return Err(CodegenError::MissingCallMetadata( + pallet_name.into(), + call_name.to_string(), + )); + }; + let fn_name = format_ident!("{}", var.variant_name.to_snake_case()); + // Propagate the documentation just to `TransactionApi` methods, while + // draining the documentation of inner call structures. + let docs = &var.composite.docs; + + // this converts the composite into a full struct type. No Type Parameters needed here. + let struct_def = + type_gen.upcast_composite(&var.composite).to_token_stream(type_gen.settings()); + let alias_mod = var.type_alias_mod; + // The call structure's documentation was stripped above. + let call_struct = quote! { + #struct_def + #alias_mod + + impl #crate_path::blocks::StaticExtrinsic for #struct_name { + const PALLET: &'static str = #pallet_name; + const CALL: &'static str = #call_name; + } + }; + + let client_fn = quote! { + #docs + pub fn #fn_name( + &self, + #( #call_fn_args, )* + ) -> #crate_path::tx::payload::StaticPayload { + #crate_path::tx::payload::StaticPayload::new_static( + #pallet_name, + #call_name, + types::#struct_name { #( #call_args, )* }, + [#(#call_hash,)*] + ) + } + }; + + Ok((call_struct, client_fn)) + }) + .collect::, _>>()? + .into_iter() + .unzip(); + + let call_type = type_gen.resolve_type_path(call_ty)?.to_token_stream(type_gen.settings()); + let call_ty = type_gen.resolve_type(call_ty)?; + let docs = type_gen.docs_from_scale_info(&call_ty.docs); + + let types_mod_ident = type_gen.types_mod_ident(); + + Ok(quote! { + #docs + pub type Call = #call_type; + pub mod calls { + use super::root_mod; + use super::#types_mod_ident; + + type DispatchError = ::pezsp_runtime::DispatchError; + + pub mod types { + use super::#types_mod_ident; + + #( #call_structs )* + } + + pub struct TransactionApi; + + impl TransactionApi { + #( #call_fns )* + } + } + }) +} diff --git a/vendor/pezkuwi-subxt/codegen/src/api/constants.rs b/vendor/pezkuwi-subxt/codegen/src/api/constants.rs new file mode 100644 index 00000000..d8b021b5 --- /dev/null +++ b/vendor/pezkuwi-subxt/codegen/src/api/constants.rs @@ -0,0 +1,95 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use heck::ToSnakeCase as _; +use pezkuwi_subxt_metadata::PalletMetadata; +use proc_macro2::TokenStream as TokenStream2; +use quote::{format_ident, quote}; +use scale_typegen::{TypeGenerator, typegen::ir::ToTokensWithSettings}; + +use super::CodegenError; + +/// Generate constants from the provided pallet's metadata. +/// +/// The function creates a new module named `constants` under the pallet's module. +/// ```rust,ignore +/// pub mod PalletName { +/// pub mod constants { +/// ... +/// } +/// } +/// ``` +/// +/// The constants are exposed via the `ConstantsApi` wrapper. +/// +/// Although the constants are defined in the provided static metadata, the API +/// ensures that the constants are returned from the runtime metadata of the node. +/// This ensures that if the node's constants change value, we'll always see the latest values. +/// +/// # Arguments +/// +/// - `type_gen` - [`scale_typegen::TypeGenerator`] that contains settings and all types from the +/// runtime metadata. +/// - `pallet` - Pallet metadata from which the constants are generated. +/// - `crate_path` - The crate path under which the `subxt-core` crate is located, e.g. +/// `::pezkuwi_subxt::ext::pezkuwi_subxt_core` when using subxt as a dependency. +pub fn generate_constants( + type_gen: &TypeGenerator, + pallet: &PalletMetadata, + crate_path: &syn::Path, +) -> Result { + // Early return if the pallet has no constants. + if pallet.constants().len() == 0 { + return Ok(quote!()); + } + + let constant_fns = pallet + .constants() + .map(|constant| { + let fn_name = format_ident!("{}", constant.name().to_snake_case()); + let pallet_name = pallet.name(); + let constant_name = constant.name(); + let Some(constant_hash) = pallet.constant_hash(constant_name) else { + return Err(CodegenError::MissingConstantMetadata( + constant_name.into(), + pallet_name.into(), + )); + }; + + let return_ty = + type_gen.resolve_type_path(constant.ty())?.to_token_stream(type_gen.settings()); + let docs = constant.docs(); + let docs = type_gen + .settings() + .should_gen_docs + .then_some(quote! { #( #[doc = #docs ] )* }) + .unwrap_or_default(); + + Ok(quote! { + #docs + pub fn #fn_name(&self) -> #crate_path::constants::address::StaticAddress<#return_ty> { + #crate_path::constants::address::StaticAddress::new_static( + #pallet_name, + #constant_name, + [#(#constant_hash,)*] + ) + } + }) + }) + .collect::, _>>()?; + + let types_mod_ident = type_gen.types_mod_ident(); + + Ok(quote! { + pub mod constants { + use super::#types_mod_ident; + + pub struct ConstantsApi; + + impl ConstantsApi { + #(#constant_fns)* + } + } + }) +} diff --git a/vendor/pezkuwi-subxt/codegen/src/api/custom_values.rs b/vendor/pezkuwi-subxt/codegen/src/api/custom_values.rs new file mode 100644 index 00000000..7fad41ba --- /dev/null +++ b/vendor/pezkuwi-subxt/codegen/src/api/custom_values.rs @@ -0,0 +1,75 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use heck::ToSnakeCase as _; +use pezkuwi_subxt_metadata::{CustomValueMetadata, Metadata}; +use scale_typegen::{TypeGenerator, typegen::ir::ToTokensWithSettings}; +use std::collections::HashSet; + +use proc_macro2::TokenStream as TokenStream2; +use quote::quote; + +/// Generate the custom values mod, if there are any custom values in the metadata. Else returns +/// None. +pub fn generate_custom_values( + metadata: &Metadata, + type_gen: &TypeGenerator, + crate_path: &syn::Path, +) -> TokenStream2 { + let mut fn_names_taken = HashSet::new(); + let custom = metadata.custom(); + let custom_values_fns = custom.iter().filter_map(|custom_value| { + generate_custom_value_fn(custom_value, type_gen, crate_path, &mut fn_names_taken) + }); + + quote! { + pub struct CustomValuesApi; + + impl CustomValuesApi { + #(#custom_values_fns)* + } + } +} + +/// Generates runtime functions for the given API metadata. +/// Returns None, if the name would not make for a valid identifier. +fn generate_custom_value_fn( + custom_value: CustomValueMetadata, + type_gen: &TypeGenerator, + crate_path: &syn::Path, + fn_names_taken: &mut HashSet, +) -> Option { + // names are transformed to snake case to make for good function identifiers. + let name = custom_value.name(); + let fn_name = name.to_snake_case(); + if fn_names_taken.contains(&fn_name) { + return None; + } + // if the fn_name would be an invalid ident, return None: + let fn_name_ident = syn::parse_str::(&fn_name).ok()?; + fn_names_taken.insert(fn_name); + + let custom_value_hash = custom_value.hash(); + + // for custom values it is important to check if the type id is actually in the metadata: + let type_is_valid = custom_value.types().resolve(custom_value.type_id()).is_some(); + + let (return_ty, decodable) = if type_is_valid { + let return_ty = type_gen + .resolve_type_path(custom_value.type_id()) + .expect("type is in metadata; qed") + .to_token_stream(type_gen.settings()); + let decodable = quote!(#crate_path::utils::Maybe); + (return_ty, decodable) + } else { + // if type registry does not contain the type, we can just return the Encoded scale bytes. + (quote!(()), quote!(#crate_path::utils::No)) + }; + + Some(quote!( + pub fn #fn_name_ident(&self) -> #crate_path::custom_values::address::StaticAddress<#return_ty, #decodable> { + #crate_path::custom_values::address::StaticAddress::new_static(#name, [#(#custom_value_hash,)*]) + } + )) +} diff --git a/vendor/pezkuwi-subxt/codegen/src/api/errors.rs b/vendor/pezkuwi-subxt/codegen/src/api/errors.rs new file mode 100644 index 00000000..08927e30 --- /dev/null +++ b/vendor/pezkuwi-subxt/codegen/src/api/errors.rs @@ -0,0 +1,34 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use pezkuwi_subxt_metadata::PalletMetadata; +use proc_macro2::TokenStream as TokenStream2; +use quote::quote; +use scale_typegen::TypeGenerator; + +use super::CodegenError; +use scale_typegen::typegen::ir::ToTokensWithSettings; + +/// Generate error type alias from the provided pallet metadata. +pub fn generate_error_type_alias( + type_gen: &TypeGenerator, + pallet: &PalletMetadata, +) -> Result { + let Some(error_ty) = pallet.error_ty_id() else { + return Ok(quote!()); + }; + + let error_type = type_gen.resolve_type_path(error_ty)?.to_token_stream(type_gen.settings()); + let error_ty = type_gen.resolve_type(error_ty)?; + let docs = &error_ty.docs; + let docs = type_gen + .settings() + .should_gen_docs + .then_some(quote! { #( #[doc = #docs ] )* }) + .unwrap_or_default(); + Ok(quote! { + #docs + pub type Error = #error_type; + }) +} diff --git a/vendor/pezkuwi-subxt/codegen/src/api/events.rs b/vendor/pezkuwi-subxt/codegen/src/api/events.rs new file mode 100644 index 00000000..66816e4a --- /dev/null +++ b/vendor/pezkuwi-subxt/codegen/src/api/events.rs @@ -0,0 +1,91 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::CodegenError; +use pezkuwi_subxt_metadata::PalletMetadata; +use proc_macro2::TokenStream as TokenStream2; +use quote::quote; +use scale_typegen::{TypeGenerator, typegen::ir::ToTokensWithSettings}; + +/// Generate events from the provided pallet metadata. +/// +/// The function creates a new module named `events` under the pallet's module. +/// +/// ```rust,ignore +/// pub mod PalletName { +/// pub mod events { +/// ... +/// } +/// } +/// ``` +/// +/// The function generates the events as rust structs that implement the `pezkuwi_subxt::event::StaticEvent` +/// trait to uniquely identify the event's identity when creating the extrinsic. +/// +/// ```rust,ignore +/// pub struct EventName { +/// pub event_param: type, +/// } +/// impl ::pezkuwi_subxt::events::StaticEvent for EventName { +/// ... +/// } +/// ``` +/// +/// # Arguments +/// +/// - `type_gen` - [`scale_typegen::TypeGenerator`] that contains settings and all types from the +/// runtime metadata. +/// - `pallet` - Pallet metadata from which the events are generated. +/// - `crate_path` - The crate path under which the `subxt-core` crate is located, e.g. +/// `::pezkuwi_subxt::ext::pezkuwi_subxt_core` when using subxt as a dependency. +pub fn generate_events( + type_gen: &TypeGenerator, + pallet: &PalletMetadata, + crate_path: &syn::Path, +) -> Result { + // Early return if the pallet has no events. + let Some(event_ty) = pallet.event_ty_id() else { + return Ok(quote!()); + }; + + let variant_names_and_struct_defs = + super::generate_structs_from_variants(type_gen, event_ty, |name| name.into(), "Event")?; + + let event_structs = variant_names_and_struct_defs.into_iter().map(|var| { + let pallet_name = pallet.name(); + let event_struct_name = &var.composite.name; + let event_name = var.variant_name; + let alias_mod = var.type_alias_mod; + let struct_def = + type_gen.upcast_composite(&var.composite).to_token_stream(type_gen.settings()); + quote! { + #struct_def + #alias_mod + + impl #crate_path::events::StaticEvent for #event_struct_name { + const PALLET: &'static str = #pallet_name; + const EVENT: &'static str = #event_name; + } + } + }); + + let event_type = type_gen.resolve_type_path(event_ty)?.to_token_stream(type_gen.settings()); + let event_ty = type_gen.resolve_type(event_ty)?; + let docs = &event_ty.docs; + let docs = type_gen + .settings() + .should_gen_docs + .then_some(quote! { #( #[doc = #docs ] )* }) + .unwrap_or_default(); + let types_mod_ident = type_gen.types_mod_ident(); + + Ok(quote! { + #docs + pub type Event = #event_type; + pub mod events { + use super::#types_mod_ident; + #( #event_structs )* + } + }) +} diff --git a/vendor/pezkuwi-subxt/codegen/src/api/mod.rs b/vendor/pezkuwi-subxt/codegen/src/api/mod.rs new file mode 100644 index 00000000..3536c290 --- /dev/null +++ b/vendor/pezkuwi-subxt/codegen/src/api/mod.rs @@ -0,0 +1,458 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Generate code for submitting extrinsics and query storage of a Bizinikiwi runtime. + +mod calls; +mod constants; +mod custom_values; +mod errors; +mod events; +mod pallet_view_functions; +mod runtime_apis; +mod storage; + +use pezkuwi_subxt_metadata::Metadata; +use scale_typegen::{ + TypeGenerator, + typegen::{ + ir::{ + ToTokensWithSettings, + type_ir::{CompositeFieldIR, CompositeIR, CompositeIRKind}, + }, + type_params::TypeParameters, + type_path::TypePath, + }, +}; +use syn::{Ident, parse_quote}; + +use crate::{ + api::custom_values::generate_custom_values, error::CodegenError, ir, subxt_type_gen_settings, +}; + +use heck::{ToSnakeCase as _, ToUpperCamelCase}; +use proc_macro2::TokenStream as TokenStream2; +use quote::{format_ident, quote}; + +/// Create the API for interacting with a Bizinikiwi runtime. +pub struct RuntimeGenerator { + metadata: Metadata, +} + +impl RuntimeGenerator { + /// Create a new runtime generator from the provided metadata. + /// + /// **Note:** If you have the metadata path, URL or bytes to hand, prefer to use + /// `GenerateRuntimeApi` for generating the runtime API from that. + /// + /// # Panics + /// + /// Panics if the runtime metadata version is not supported. + /// + /// Supported versions: v14 and v15. + pub fn new(mut metadata: Metadata) -> Self { + scale_typegen::utils::ensure_unique_type_paths(metadata.types_mut()) + .expect("Duplicate type paths in metadata; this is bug please file an issue."); + RuntimeGenerator { metadata } + } + + /// Generate the API for interacting with a Bizinikiwi runtime. + /// + /// # Arguments + /// + /// * `item_mod` - The module declaration for which the API is implemented. + /// * `derives` - Provide custom derives for the generated types. + /// * `type_substitutes` - Provide custom type substitutes. + /// * `crate_path` - Path to the `subxt` crate. + /// * `should_gen_docs` - True if the generated API contains the documentation from the + /// metadata. + pub fn generate_runtime_types( + &self, + item_mod: syn::ItemMod, + derives: scale_typegen::DerivesRegistry, + type_substitutes: scale_typegen::TypeSubstitutes, + crate_path: syn::Path, + should_gen_docs: bool, + ) -> Result { + let item_mod_attrs = item_mod.attrs.clone(); + let item_mod_ir = ir::ItemMod::try_from(item_mod)?; + + let settings = + subxt_type_gen_settings(derives, type_substitutes, &crate_path, should_gen_docs); + + let type_gen = TypeGenerator::new(self.metadata.types(), &settings); + let types_mod = type_gen.generate_types_mod()?.to_token_stream(type_gen.settings()); + let mod_ident = &item_mod_ir.ident; + let rust_items = item_mod_ir.rust_items(); + + Ok(quote! { + #( #item_mod_attrs )* + #[allow(dead_code, unused_imports, non_camel_case_types, unreachable_patterns)] + #[allow(clippy::all)] + #[allow(rustdoc::broken_intra_doc_links)] + pub mod #mod_ident { + // Preserve any Rust items that were previously defined in the adorned module + #( #rust_items ) * + + // Make it easy to access the root items via `root_mod` at different levels + // without reaching out of this module. + #[allow(unused_imports)] + mod root_mod { + pub use super::*; + } + + #types_mod + } + }) + } + + /// Generate the API for interacting with a Bizinikiwi runtime. + /// + /// # Arguments + /// + /// * `item_mod` - The module declaration for which the API is implemented. + /// * `derives` - Provide custom derives for the generated types. + /// * `type_substitutes` - Provide custom type substitutes. + /// * `crate_path` - Path to the `subxt` crate. + /// * `should_gen_docs` - True if the generated API contains the documentation from the + /// metadata. + pub fn generate_runtime( + &self, + item_mod: syn::ItemMod, + derives: scale_typegen::DerivesRegistry, + type_substitutes: scale_typegen::TypeSubstitutes, + crate_path: syn::Path, + should_gen_docs: bool, + ) -> Result { + let item_mod_attrs = item_mod.attrs.clone(); + let item_mod_ir = ir::ItemMod::try_from(item_mod)?; + + let settings = + subxt_type_gen_settings(derives, type_substitutes, &crate_path, should_gen_docs); + + let type_gen = TypeGenerator::new(self.metadata.types(), &settings); + let types_mod = type_gen.generate_types_mod()?.to_token_stream(type_gen.settings()); + let types_mod_ident = type_gen.types_mod_ident(); + let pallets_with_mod_names = self + .metadata + .pallets() + .map(|pallet| (pallet, format_ident!("{}", pallet.name().to_string().to_snake_case()))) + .collect::>(); + + // Pallet names and their length are used to create PALLETS array. + // The array is used to identify the pallets composing the metadata for + // validation of just those pallets. + let pallet_names: Vec<_> = self.metadata.pallets().map(|pallet| pallet.name()).collect(); + let pallet_names_len = pallet_names.len(); + + let runtime_api_names: Vec<_> = + self.metadata.runtime_api_traits().map(|api| api.name().to_string()).collect(); + let runtime_api_names_len = runtime_api_names.len(); + + let modules = pallets_with_mod_names + .iter() + .map(|(pallet, mod_name)| { + let calls = calls::generate_calls(&type_gen, pallet, &crate_path)?; + + let event = events::generate_events(&type_gen, pallet, &crate_path)?; + + let storage_mod = storage::generate_storage(&type_gen, pallet, &crate_path)?; + + let constants_mod = constants::generate_constants(&type_gen, pallet, &crate_path)?; + + let errors = errors::generate_error_type_alias(&type_gen, pallet)?; + + let view_functions = pallet_view_functions::generate_pallet_view_functions( + &type_gen, + pallet, + &crate_path, + )?; + + Ok(quote! { + pub mod #mod_name { + use super::root_mod; + use super::#types_mod_ident; + #errors + #calls + #view_functions + #event + #storage_mod + #constants_mod + } + }) + }) + .collect::, CodegenError>>()?; + + let mod_ident = &item_mod_ir.ident; + let pallets_with_constants: Vec<_> = pallets_with_mod_names + .iter() + .filter_map(|(pallet, pallet_mod_name)| { + pallet.constants().next().is_some().then_some(pallet_mod_name) + }) + .collect(); + + let pallets_with_storage: Vec<_> = pallets_with_mod_names + .iter() + .filter_map(|(pallet, pallet_mod_name)| pallet.storage().map(|_| pallet_mod_name)) + .collect(); + + let pallets_with_calls: Vec<_> = pallets_with_mod_names + .iter() + .filter_map(|(pallet, pallet_mod_name)| pallet.call_ty_id().map(|_| pallet_mod_name)) + .collect(); + + let pallets_with_view_functions: Vec<_> = pallets_with_mod_names + .iter() + .filter(|(pallet, _pallet_mod_name)| pallet.has_view_functions()) + .map(|(_, pallet_mod_name)| pallet_mod_name) + .collect(); + + let rust_items = item_mod_ir.rust_items(); + + let apis_mod = runtime_apis::generate_runtime_apis( + &self.metadata, + &type_gen, + types_mod_ident, + &crate_path, + )?; + + // Fetch the paths of the outer enums. + // Bizinikiwi exposes those under `kitchensink_runtime`, while Pezkuwi under + // `pezkuwi_runtime`. + let call_path = type_gen + .resolve_type_path(self.metadata.outer_enums().call_enum_ty())? + .to_token_stream(type_gen.settings()); + let event_path = type_gen + .resolve_type_path(self.metadata.outer_enums().event_enum_ty())? + .to_token_stream(type_gen.settings()); + let error_path = type_gen + .resolve_type_path(self.metadata.outer_enums().error_enum_ty())? + .to_token_stream(type_gen.settings()); + + let metadata_hash = self.metadata.hasher().hash(); + + let custom_values = generate_custom_values(&self.metadata, &type_gen, &crate_path); + + Ok(quote! { + #( #item_mod_attrs )* + #[allow(dead_code, unused_imports, non_camel_case_types, unreachable_patterns)] + #[allow(clippy::all)] + #[allow(rustdoc::broken_intra_doc_links)] + pub mod #mod_ident { + // Preserve any Rust items that were previously defined in the adorned module. + #( #rust_items ) * + + // Make it easy to access the root items via `root_mod` at different levels + // without reaching out of this module. + #[allow(unused_imports)] + mod root_mod { + pub use super::*; + } + + // Identify the pallets composing the static metadata by name. + pub static PALLETS: [&str; #pallet_names_len] = [ #(#pallet_names,)* ]; + + // Runtime APIs in the metadata by name. + pub static RUNTIME_APIS: [&str; #runtime_api_names_len] = [ #(#runtime_api_names,)* ]; + + /// The error type that is returned when there is a runtime issue. + pub type DispatchError = ::pezsp_runtime::DispatchError; + + /// The outer event enum. + pub type Event = #event_path; + + /// The outer extrinsic enum. + pub type Call = #call_path; + + /// The outer error enum represents the DispatchError's Module variant. + pub type Error = #error_path; + + pub fn constants() -> ConstantsApi { + ConstantsApi + } + + pub fn storage() -> StorageApi { + StorageApi + } + + pub fn tx() -> TransactionApi { + TransactionApi + } + + pub fn apis() -> runtime_apis::RuntimeApi { + runtime_apis::RuntimeApi + } + + #apis_mod + + pub fn view_functions() -> ViewFunctionsApi { + ViewFunctionsApi + } + + pub fn custom() -> CustomValuesApi { + CustomValuesApi + } + + #custom_values + + pub struct ConstantsApi; + impl ConstantsApi { + #( + pub fn #pallets_with_constants(&self) -> #pallets_with_constants::constants::ConstantsApi { + #pallets_with_constants::constants::ConstantsApi + } + )* + } + + pub struct StorageApi; + impl StorageApi { + #( + pub fn #pallets_with_storage(&self) -> #pallets_with_storage::storage::StorageApi { + #pallets_with_storage::storage::StorageApi + } + )* + } + + pub struct TransactionApi; + impl TransactionApi { + #( + pub fn #pallets_with_calls(&self) -> #pallets_with_calls::calls::TransactionApi { + #pallets_with_calls::calls::TransactionApi + } + )* + } + + pub struct ViewFunctionsApi; + impl ViewFunctionsApi { + #( + pub fn #pallets_with_view_functions(&self) -> #pallets_with_view_functions::view_functions::ViewFunctionsApi { + #pallets_with_view_functions::view_functions::ViewFunctionsApi + } + )* + } + + /// check whether the metadata provided is aligned with this statically generated code. + pub fn is_codegen_valid_for(metadata: &#crate_path::Metadata) -> bool { + let runtime_metadata_hash = metadata + .hasher() + .only_these_pallets(&PALLETS) + .only_these_runtime_apis(&RUNTIME_APIS) + .hash(); + runtime_metadata_hash == [ #(#metadata_hash,)* ] + } + + #( #modules )* + #types_mod + } + }) + } +} + +/// Return a vector of tuples of variant names and corresponding struct definitions. +pub fn generate_structs_from_variants( + type_gen: &TypeGenerator, + type_id: u32, + variant_to_struct_name: F, + error_message_type_name: &str, +) -> Result, CodegenError> +where + F: Fn(&str) -> std::borrow::Cow, +{ + let ty = type_gen.resolve_type(type_id)?; + + let scale_info::TypeDef::Variant(variant) = &ty.type_def else { + return Err(CodegenError::InvalidType(error_message_type_name.into())); + }; + + variant + .variants + .iter() + .map(|var| { + let mut type_params = TypeParameters::from_scale_info(&[]); + let composite_ir_kind = + type_gen.create_composite_ir_kind(&var.fields, &mut type_params)?; + let struct_name = variant_to_struct_name(&var.name); + let mut composite = CompositeIR::new( + syn::parse_str(&struct_name).expect("enum variant is a valid ident; qed"), + composite_ir_kind, + type_gen.docs_from_scale_info(&var.docs), + ); + + let type_alias_mod = generate_type_alias_mod(&mut composite, type_gen); + Ok(StructFromVariant { variant_name: var.name.to_string(), composite, type_alias_mod }) + }) + .collect() +} + +pub struct StructFromVariant { + variant_name: String, + composite: CompositeIR, + type_alias_mod: TokenStream2, +} + +/// Modifies the composite, by replacing its types with references to the generated type alias +/// module. Returns the TokenStream of the type alias module. +/// +/// E.g a struct like this: +/// +/// ```rust,ignore +/// pub struct SetMaxCodeSize { +/// pub new: ::core::primitive::u32, +/// } +/// ``` +/// +/// will be made into this: +/// +/// ```rust,ignore +/// pub struct SetMaxCodeSize { +/// pub new: set_max_code_size::New, +/// } +/// ``` +/// +/// And the type alias module will look like this: +/// +/// ```rust,ignore +/// pub mod set_max_code_size { +/// use super::runtime_types; +/// pub type New = ::core::primitive::u32; +/// } +/// ``` +pub fn generate_type_alias_mod( + composite: &mut CompositeIR, + type_gen: &TypeGenerator, +) -> TokenStream2 { + let mut aliases: Vec = vec![]; + let alias_mod_name: Ident = syn::parse_str(&composite.name.to_string().to_snake_case()) + .expect("composite name in snake_case should be a valid identifier"); + + let mut modify_field_to_be_type_alias = |field: &mut CompositeFieldIR, alias_name: Ident| { + let type_path = field.type_path.to_token_stream(type_gen.settings()); + aliases.push(quote!(pub type #alias_name = #type_path;)); + + let type_alias_path: syn::Path = parse_quote!(#alias_mod_name::#alias_name); + field.type_path = TypePath::from_syn_path(type_alias_path); + }; + + match &mut composite.kind { + CompositeIRKind::NoFields => { + return quote!(); // no types mod generated for unit structs. + }, + CompositeIRKind::Named(named) => + for (name, field) in named.iter_mut() { + let alias_name = format_ident!("{}", name.to_string().to_upper_camel_case()); + modify_field_to_be_type_alias(field, alias_name); + }, + CompositeIRKind::Unnamed(unnamed) => + for (i, field) in unnamed.iter_mut().enumerate() { + let alias_name = format_ident!("Field{}", i); + modify_field_to_be_type_alias(field, alias_name); + }, + }; + + let types_mod_ident = type_gen.types_mod_ident(); + quote!(pub mod #alias_mod_name { + use super::#types_mod_ident; + #( #aliases )* + }) +} diff --git a/vendor/pezkuwi-subxt/codegen/src/api/pallet_view_functions.rs b/vendor/pezkuwi-subxt/codegen/src/api/pallet_view_functions.rs new file mode 100644 index 00000000..0340fa4c --- /dev/null +++ b/vendor/pezkuwi-subxt/codegen/src/api/pallet_view_functions.rs @@ -0,0 +1,183 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use heck::ToUpperCamelCase as _; + +use crate::CodegenError; +use pezkuwi_subxt_metadata::{PalletMetadata, ViewFunctionMetadata}; +use proc_macro2::TokenStream as TokenStream2; +use quote::{format_ident, quote}; +use scale_typegen::{TypeGenerator, typegen::ir::ToTokensWithSettings}; +use std::collections::HashSet; + +pub fn generate_pallet_view_functions( + type_gen: &TypeGenerator, + pallet: &PalletMetadata, + crate_path: &syn::Path, +) -> Result { + if !pallet.has_view_functions() { + // If there are no view functions in this pallet, we + // don't generate anything. + return Ok(quote! {}); + } + + let view_functions: Vec<_> = pallet + .view_functions() + .map(|vf| generate_pallet_view_function(pallet.name(), vf, type_gen, crate_path)) + .collect::>()?; + + let view_functions_types = view_functions.iter().map(|(apis, _)| apis); + let view_functions_methods = view_functions.iter().map(|(_, getters)| getters); + + let types_mod_ident = type_gen.types_mod_ident(); + + Ok(quote! { + pub mod view_functions { + use super::root_mod; + use super::#types_mod_ident; + + pub struct ViewFunctionsApi; + + impl ViewFunctionsApi { + #( #view_functions_methods )* + } + + #( #view_functions_types )* + } + }) +} + +fn generate_pallet_view_function( + pallet_name: &str, + view_function: ViewFunctionMetadata<'_>, + type_gen: &TypeGenerator, + crate_path: &syn::Path, +) -> Result<(TokenStream2, TokenStream2), CodegenError> { + let types_mod_ident = type_gen.types_mod_ident(); + + let view_function_name_str = view_function.name(); + let view_function_name_ident = format_ident!("{view_function_name_str}"); + let validation_hash = view_function.hash(); + + let docs = view_function.docs(); + let docs: TokenStream2 = type_gen + .settings() + .should_gen_docs + .then_some(quote! { #( #[doc = #docs ] )* }) + .unwrap_or_default(); + + struct Input { + name: syn::Ident, + type_alias: syn::Ident, + type_path: TokenStream2, + } + + let view_function_inputs: Vec = { + let mut unique_names = HashSet::new(); + let mut unique_aliases = HashSet::new(); + + view_function + .inputs() + .enumerate() + .map(|(idx, input)| { + // These are method names, which can just be '_', but struct field names can't + // just be an underscore, so fix any such names we find to work in structs. + let mut name = input.name.trim_start_matches('_').to_string(); + if name.is_empty() { + name = format!("_{idx}"); + } + while !unique_names.insert(name.clone()) { + name = format!("{name}_param{idx}"); + } + + // The alias type name is based on the name, above. + let mut alias = name.to_upper_camel_case(); + // Note: name is not empty. + if alias.as_bytes()[0].is_ascii_digit() { + alias = format!("Param{alias}"); + } + while !unique_aliases.insert(alias.clone()) { + alias = format!("{alias}Param{idx}"); + } + + // Path to the actual type we'll have generated for this input. + let type_path = type_gen + .resolve_type_path(input.id) + .expect("view function input type is in metadata; qed") + .to_token_stream(type_gen.settings()); + + Input { + name: format_ident!("{name}"), + type_alias: format_ident!("{alias}"), + type_path, + } + }) + .collect() + }; + + let input_tuple_types = view_function_inputs + .iter() + .map(|i| { + let ty = &i.type_alias; + quote!(#view_function_name_ident::#ty) + }) + .collect::>(); + + let input_args = view_function_inputs + .iter() + .map(|i| { + let arg = &i.name; + let ty = &i.type_alias; + quote!(#arg: #view_function_name_ident::#ty) + }) + .collect::>(); + + let input_type_aliases = view_function_inputs.iter().map(|i| { + let ty = &i.type_alias; + let path = &i.type_path; + quote!(pub type #ty = #path;) + }); + + let input_param_names = view_function_inputs.iter().map(|i| &i.name); + + let output_type_path = type_gen + .resolve_type_path(view_function.output_ty())? + .to_token_stream(type_gen.settings()); + + // Define the input and output type bits. + let view_function_types = quote!( + pub mod #view_function_name_ident { + use super::root_mod; + use super::#types_mod_ident; + + #(#input_type_aliases)* + + pub mod output { + use super::#types_mod_ident; + pub type Output = #output_type_path; + } + } + ); + + // Define the getter method that will live on the `ViewFunctionApi` type. + let view_function_method = quote!( + #docs + pub fn #view_function_name_ident( + &self, + #(#input_args),* + ) -> #crate_path::view_functions::payload::StaticPayload< + (#(#input_tuple_types,)*), + #view_function_name_ident::output::Output + > { + #crate_path::view_functions::payload::StaticPayload::new_static( + #pallet_name, + #view_function_name_str, + (#(#input_param_names,)*), + [#(#validation_hash,)*], + ) + } + ); + + Ok((view_function_types, view_function_method)) +} diff --git a/vendor/pezkuwi-subxt/codegen/src/api/runtime_apis.rs b/vendor/pezkuwi-subxt/codegen/src/api/runtime_apis.rs new file mode 100644 index 00000000..4a8495e1 --- /dev/null +++ b/vendor/pezkuwi-subxt/codegen/src/api/runtime_apis.rs @@ -0,0 +1,399 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use std::collections::HashSet; + +use heck::{ToSnakeCase as _, ToUpperCamelCase as _}; + +use pezkuwi_subxt_metadata::{Metadata, RuntimeApiMetadata}; +use scale_typegen::{TypeGenerator, typegen::ir::ToTokensWithSettings}; + +use proc_macro2::TokenStream as TokenStream2; +use quote::{format_ident, quote}; + +use crate::CodegenError; + +/// Generate the runtime APIs. +pub fn generate_runtime_apis( + metadata: &Metadata, + type_gen: &TypeGenerator, + types_mod_ident: &syn::Ident, + crate_path: &syn::Path, +) -> Result { + let runtime_fns: Vec<_> = metadata + .runtime_api_traits() + .map(|api| generate_runtime_api(api, type_gen, crate_path)) + .collect::>()?; + + let trait_defs = runtime_fns.iter().map(|(apis, _)| apis); + let trait_getters = runtime_fns.iter().map(|(_, getters)| getters); + + Ok(quote! { + pub mod runtime_apis { + use super::root_mod; + use super::#types_mod_ident; + + use #crate_path::ext::codec::Encode; + + pub struct RuntimeApi; + + impl RuntimeApi { + #( #trait_getters )* + } + + #( #trait_defs )* + } + }) +} + +/// Generates runtime functions for the given API metadata. +fn generate_runtime_api( + api: RuntimeApiMetadata, + type_gen: &TypeGenerator, + crate_path: &syn::Path, +) -> Result<(TokenStream2, TokenStream2), CodegenError> { + let types_mod_ident = type_gen.types_mod_ident(); + // Trait name must remain as is (upper case) to identify the runtime call. + let trait_name_str = api.name(); + // The snake case for the trait name. + let trait_name_snake = format_ident!("{}", api.name().to_snake_case()); + + let docs = api.docs(); + let docs: TokenStream2 = type_gen + .settings() + .should_gen_docs + .then_some(quote! { #( #[doc = #docs ] )* }) + .unwrap_or_default(); + + let types_and_methods = api + .methods() + .map(|method| { + let method_name = format_ident!("{}", method.name()); + let method_name_str = method.name(); + let validation_hash = method.hash(); + + let docs = method.docs(); + let docs: TokenStream2 = type_gen + .settings() + .should_gen_docs + .then_some(quote! { #( #[doc = #docs ] )* }) + .unwrap_or_default(); + + struct Input { + name: syn::Ident, + type_alias: syn::Ident, + type_path: TokenStream2, + } + + let runtime_api_inputs: Vec = { + let mut unique_names = HashSet::new(); + let mut unique_aliases = HashSet::new(); + + method + .inputs() + .enumerate() + .map(|(idx, input)| { + // The method argument name is either the input name or the + // index (eg _1, _2 etc) if one isn't provided. + // if we get unlucky we'll end up with param_param1 etc. + let mut name = input.name.trim_start_matches('_').to_string(); + if name.is_empty() { + name = format!("_{idx}"); + } + while !unique_names.insert(name.clone()) { + name = format!("{name}_param{idx}"); + } + + // The alias is either InputName if provided, or Param1, Param2 etc if not. + // If we get unlucky we may even end up with ParamParam1 etc. + let mut alias = name.trim_start_matches('_').to_upper_camel_case(); + // Note: name is not empty. + if alias.as_bytes()[0].is_ascii_digit() { + alias = format!("Param{alias}"); + } + while !unique_aliases.insert(alias.clone()) { + alias = format!("{alias}Param{idx}"); + } + + // Generate alias for runtime type. + let type_path = type_gen + .resolve_type_path(input.id) + .expect("runtime api input type is in metadata; qed") + .to_token_stream(type_gen.settings()); + + Input { + name: format_ident!("{name}"), + type_alias: format_ident!("{alias}"), + type_path, + } + }) + .collect() + }; + + let input_tuple_types = runtime_api_inputs + .iter() + .map(|i| { + let ty = &i.type_alias; + quote!(#method_name::#ty) + }) + .collect::>(); + + let input_args = runtime_api_inputs + .iter() + .map(|i| { + let arg = &i.name; + let ty = &i.type_alias; + quote!(#arg: #method_name::#ty) + }) + .collect::>(); + + let input_param_names = runtime_api_inputs.iter().map(|i| &i.name); + + let input_type_aliases = runtime_api_inputs.iter().map(|i| { + let ty = &i.type_alias; + let path = &i.type_path; + quote!(pub type #ty = #path;) + }); + + let output_type_path = type_gen + .resolve_type_path(method.output_ty())? + .to_token_stream(type_gen.settings()); + + // Define the input and output type bits for the method. + let runtime_api_types = quote! { + pub mod #method_name { + use super::root_mod; + use super::#types_mod_ident; + + #(#input_type_aliases)* + + pub mod output { + use super::#types_mod_ident; + pub type Output = #output_type_path; + } + } + }; + + // Define the getter method that will live on the `ViewFunctionApi` type. + let runtime_api_method = quote!( + #docs + pub fn #method_name( + &self, + #(#input_args),* + ) -> #crate_path::runtime_api::payload::StaticPayload< + (#(#input_tuple_types,)*), + #method_name::output::Output + > { + #crate_path::runtime_api::payload::StaticPayload::new_static( + #trait_name_str, + #method_name_str, + (#(#input_param_names,)*), + [#(#validation_hash,)*], + ) + } + ); + + Ok((runtime_api_types, runtime_api_method)) + }) + .collect::, CodegenError>>()?; + + let trait_name = format_ident!("{}", trait_name_str); + let types = types_and_methods.iter().map(|(types, _)| types); + let methods = types_and_methods.iter().map(|(_, methods)| methods); + + // The runtime API definition and types. + let trait_defs = quote!( + pub mod #trait_name_snake { + use super::root_mod; + use super::#types_mod_ident; + + #docs + pub struct #trait_name; + + impl #trait_name { + #( #methods )* + } + + #( #types )* + } + ); + + // A getter for the `RuntimeApi` to get the trait structure. + let trait_getter = quote!( + pub fn #trait_name_snake(&self) -> #trait_name_snake::#trait_name { + #trait_name_snake::#trait_name + } + ); + + Ok((trait_defs, trait_getter)) +} + +#[cfg(test)] +mod tests { + use crate::RuntimeGenerator; + use frame_metadata::v15::{ + self, RuntimeApiMetadata, RuntimeApiMethodMetadata, RuntimeApiMethodParamMetadata, + }; + use pezkuwi_subxt_metadata::Metadata; + use quote::quote; + use scale_info::meta_type; + + fn metadata_with_runtime_apis(runtime_apis: Vec) -> Metadata { + let extrinsic_metadata = v15::ExtrinsicMetadata { + version: 0, + signed_extensions: vec![], + address_ty: meta_type::<()>(), + call_ty: meta_type::<()>(), + signature_ty: meta_type::<()>(), + extra_ty: meta_type::<()>(), + }; + + let metadata: Metadata = v15::RuntimeMetadataV15::new( + vec![], + extrinsic_metadata, + meta_type::<()>(), + runtime_apis, + v15::OuterEnums { + call_enum_ty: meta_type::<()>(), + event_enum_ty: meta_type::<()>(), + error_enum_ty: meta_type::<()>(), + }, + v15::CustomMetadata { map: Default::default() }, + ) + .try_into() + .expect("can build valid metadata"); + metadata + } + + fn generate_code(runtime_apis: Vec) -> String { + let metadata = metadata_with_runtime_apis(runtime_apis); + let item_mod = syn::parse_quote!( + pub mod api {} + ); + let generator = RuntimeGenerator::new(metadata); + let generated = generator + .generate_runtime( + item_mod, + Default::default(), + Default::default(), + syn::parse_str("::subxt_path").unwrap(), + false, + ) + .expect("should be able to generate runtime"); + generated.to_string() + } + + #[test] + fn unique_param_names() { + let runtime_apis = vec![RuntimeApiMetadata { + name: "Test", + methods: vec![RuntimeApiMethodMetadata { + name: "test", + inputs: vec![ + RuntimeApiMethodParamMetadata { name: "foo", ty: meta_type::() }, + RuntimeApiMethodParamMetadata { name: "bar", ty: meta_type::() }, + ], + output: meta_type::(), + docs: vec![], + }], + + docs: vec![], + }]; + + let code = generate_code(runtime_apis); + + let expected_alias = quote!( + pub mod test { + use super::{root_mod, runtime_types}; + pub type Foo = ::core::primitive::bool; + pub type Bar = ::core::primitive::bool; + pub mod output { + use super::runtime_types; + pub type Output = ::core::primitive::bool; + } + } + ); + + assert!(code.contains(&expected_alias.to_string())); + } + + #[test] + fn duplicate_param_names() { + let runtime_apis = vec![RuntimeApiMetadata { + name: "Test", + methods: vec![RuntimeApiMethodMetadata { + name: "test", + inputs: vec![ + RuntimeApiMethodParamMetadata { name: "_a", ty: meta_type::() }, + RuntimeApiMethodParamMetadata { name: "a", ty: meta_type::() }, + RuntimeApiMethodParamMetadata { name: "__a", ty: meta_type::() }, + ], + output: meta_type::(), + docs: vec![], + }], + + docs: vec![], + }]; + + let code = generate_code(runtime_apis); + + let expected_alias = quote!( + pub mod test { + use super::{root_mod, runtime_types}; + pub type A = ::core::primitive::bool; + pub type AParam1 = ::core::primitive::bool; + pub type AParam2 = ::core::primitive::bool; + pub mod output { + use super::runtime_types; + pub type Output = ::core::primitive::bool; + } + } + ); + + assert!(code.contains(&expected_alias.to_string())); + } + + #[test] + fn duplicate_param_and_alias_names() { + let runtime_apis = vec![RuntimeApiMetadata { + name: "Test", + methods: vec![RuntimeApiMethodMetadata { + name: "test", + inputs: vec![ + RuntimeApiMethodParamMetadata { name: "_", ty: meta_type::() }, + RuntimeApiMethodParamMetadata { name: "_a", ty: meta_type::() }, + RuntimeApiMethodParamMetadata { name: "_param_0", ty: meta_type::() }, + RuntimeApiMethodParamMetadata { name: "__", ty: meta_type::() }, + RuntimeApiMethodParamMetadata { + name: "___param_0_param_2", + ty: meta_type::(), + }, + ], + output: meta_type::(), + docs: vec![], + }], + + docs: vec![], + }]; + + let code = generate_code(runtime_apis); + + let expected_alias = quote!( + pub mod test { + use super::{root_mod, runtime_types}; + pub type Param0 = ::core::primitive::bool; + pub type A = ::core::primitive::bool; + pub type Param0Param2 = ::core::primitive::bool; + pub type Param3 = ::core::primitive::bool; + pub type Param0Param2Param4 = ::core::primitive::bool; + pub mod output { + use super::runtime_types; + pub type Output = ::core::primitive::bool; + } + } + ); + + assert!(code.contains(&expected_alias.to_string())); + } +} diff --git a/vendor/pezkuwi-subxt/codegen/src/api/storage.rs b/vendor/pezkuwi-subxt/codegen/src/api/storage.rs new file mode 100644 index 00000000..606fbcbe --- /dev/null +++ b/vendor/pezkuwi-subxt/codegen/src/api/storage.rs @@ -0,0 +1,236 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use heck::ToSnakeCase as _; +use pezkuwi_subxt_metadata::{PalletMetadata, StorageEntryMetadata}; +use proc_macro2::TokenStream as TokenStream2; +use quote::{format_ident, quote}; +use scale_typegen::TypeGenerator; + +use super::CodegenError; + +use scale_typegen::typegen::ir::ToTokensWithSettings; + +/// Generate functions which create storage addresses from the provided pallet's metadata. +/// These addresses can be used to access and iterate over storage values. +/// +/// # Arguments +/// +/// - `type_gen` - [`scale_typegen::TypeGenerator`] that contains settings and all types from the +/// runtime metadata. +/// - `pallet` - Pallet metadata from which the storage items are generated. +/// - `crate_path` - The crate path under which the `subxt-core` crate is located, e.g. +/// `::pezkuwi_subxt::ext::pezkuwi_subxt_core` when using subxt as a dependency. +pub fn generate_storage( + type_gen: &TypeGenerator, + pallet: &PalletMetadata, + crate_path: &syn::Path, +) -> Result { + let Some(storage) = pallet.storage() else { + // If there are no storage entries in this pallet, we + // don't generate anything. + return Ok(quote!()); + }; + + let storage_entries = storage + .entries() + .iter() + .map(|entry| generate_storage_entry_fns(type_gen, pallet, entry, crate_path)) + .collect::, CodegenError>>()?; + + let storage_entry_types = storage_entries.iter().map(|(types, _)| types); + let storage_entry_methods = storage_entries.iter().map(|(_, method)| method); + + let types_mod_ident = type_gen.types_mod_ident(); + + Ok(quote! { + pub mod storage { + use super::root_mod; + use super::#types_mod_ident; + + pub struct StorageApi; + + impl StorageApi { + #( #storage_entry_methods )* + } + + #( #storage_entry_types )* + } + }) +} + +/// Returns storage entry functions and alias modules. +fn generate_storage_entry_fns( + type_gen: &TypeGenerator, + pallet: &PalletMetadata, + storage_entry: &StorageEntryMetadata, + crate_path: &syn::Path, +) -> Result<(TokenStream2, TokenStream2), CodegenError> { + let types_mod_ident = type_gen.types_mod_ident(); + + let pallet_name = pallet.name(); + let storage_entry_name_str = storage_entry.name(); + let storage_entry_snake_case_name = storage_entry_name_str.to_snake_case(); + let storage_entry_snake_case_ident = format_ident!("{storage_entry_snake_case_name}"); + let Some(validation_hash) = pallet.storage_hash(storage_entry_name_str) else { + return Err(CodegenError::MissingStorageMetadata( + pallet_name.into(), + storage_entry_name_str.into(), + )); + }; + + let docs = storage_entry.docs(); + let docs: TokenStream2 = type_gen + .settings() + .should_gen_docs + .then_some(quote! { #( #[doc = #docs ] )* }) + .unwrap_or_default(); + + struct Input { + type_alias: syn::Ident, + type_path: TokenStream2, + } + + let storage_key_types: Vec = storage_entry + .keys() + .enumerate() + .map(|(idx, key)| { + // Storage key aliases are just indexes; no names to use. + let type_alias = format_ident!("Param{}", idx); + + // Path to the actual type we'll have generated for this input. + let type_path = type_gen + .resolve_type_path(key.key_id) + .expect("view function input type is in metadata; qed") + .to_token_stream(type_gen.settings()); + + Input { type_alias, type_path } + }) + .collect(); + + let storage_key_tuple_types = storage_key_types + .iter() + .map(|i| { + let ty = &i.type_alias; + quote!(#storage_entry_snake_case_ident::#ty) + }) + .collect::>(); + + let storage_key_type_aliases = storage_key_types + .iter() + .map(|i| { + let ty = &i.type_alias; + let path = &i.type_path; + quote!(pub type #ty = #path;) + }) + .collect::>(); + + let storage_value_type_path = type_gen + .resolve_type_path(storage_entry.value_ty())? + .to_token_stream(type_gen.settings()); + + let is_plain = if storage_entry.keys().len() == 0 { + quote!(#crate_path::utils::Yes) + } else { + quote!(#crate_path::utils::Maybe) + }; + + let storage_entry_types = quote!( + pub mod #storage_entry_snake_case_ident { + use super::root_mod; + use super::#types_mod_ident; + + #(#storage_key_type_aliases)* + + pub mod output { + use super::#types_mod_ident; + pub type Output = #storage_value_type_path; + } + } + ); + + let storage_entry_method = quote!( + #docs + pub fn #storage_entry_snake_case_ident(&self) -> #crate_path::storage::address::StaticAddress< + (#(#storage_key_tuple_types,)*), + #storage_entry_snake_case_ident::output::Output, + #is_plain + > { + #crate_path::storage::address::StaticAddress::new_static( + #pallet_name, + #storage_entry_name_str, + [#(#validation_hash,)*], + ) + } + ); + + Ok((storage_entry_types, storage_entry_method)) +} + +#[cfg(test)] +mod tests { + use frame_metadata::v15; + use pezkuwi_subxt_metadata::Metadata; + use scale_info::{MetaType, meta_type}; + + // TODO: Think about adding tests for storage codegen which can use this sort of function. + #[allow(dead_code)] + fn metadata_with_storage_entries( + storage_entries: impl IntoIterator, + ) -> Metadata { + let storage_entries: Vec = storage_entries + .into_iter() + .map(|(name, key)| v15::StorageEntryMetadata { + name, + modifier: v15::StorageEntryModifier::Optional, + ty: v15::StorageEntryType::Map { + hashers: vec![v15::StorageHasher::Blake2_128Concat], + key, + value: meta_type::(), + }, + default: vec![], + docs: vec![], + }) + .collect(); + + let pallet_1 = v15::PalletMetadata { + name: "Pallet1", + storage: Some(v15::PalletStorageMetadata { + prefix: Default::default(), + entries: storage_entries, + }), + calls: None, + event: None, + constants: vec![], + error: None, + index: 0, + docs: vec![], + }; + + let extrinsic_metadata = v15::ExtrinsicMetadata { + version: 0, + signed_extensions: vec![], + address_ty: meta_type::<()>(), + call_ty: meta_type::<()>(), + signature_ty: meta_type::<()>(), + extra_ty: meta_type::<()>(), + }; + + let metadata: Metadata = v15::RuntimeMetadataV15::new( + vec![pallet_1], + extrinsic_metadata, + meta_type::<()>(), + vec![], + v15::OuterEnums { + call_enum_ty: meta_type::<()>(), + event_enum_ty: meta_type::<()>(), + error_enum_ty: meta_type::<()>(), + }, + v15::CustomMetadata { map: Default::default() }, + ) + .try_into() + .expect("can build valid metadata"); + metadata + } +} diff --git a/vendor/pezkuwi-subxt/codegen/src/error.rs b/vendor/pezkuwi-subxt/codegen/src/error.rs new file mode 100644 index 00000000..359b9ec8 --- /dev/null +++ b/vendor/pezkuwi-subxt/codegen/src/error.rs @@ -0,0 +1,101 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Errors that can be emitted from codegen. + +use proc_macro2::{Span, TokenStream as TokenStream2}; +use scale_typegen::TypegenError; + +/// Error returned when the Codegen cannot generate the runtime API. +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum CodegenError { + /// Cannot decode the metadata bytes. + #[error("Could not decode metadata, only V14 and V15 metadata are supported: {0}")] + Decode(#[from] codec::Error), + /// Out of line modules are not supported. + #[error( + "Out-of-line subxt modules are not supported, make sure you are providing a body to your module: pub mod pezkuwi {{ ... }}" + )] + InvalidModule(Span), + /// Invalid type path. + #[error("Invalid type path {0}: {1}")] + InvalidTypePath(String, syn::Error), + /// Metadata for constant could not be found. + #[error( + "Metadata for constant entry {0}_{1} could not be found. Make sure you are providing a valid bizinikiwi-based metadata" + )] + MissingConstantMetadata(String, String), + /// Metadata for storage could not be found. + #[error( + "Metadata for storage entry {0}_{1} could not be found. Make sure you are providing a valid bizinikiwi-based metadata" + )] + MissingStorageMetadata(String, String), + /// Metadata for call could not be found. + #[error( + "Metadata for call entry {0}_{1} could not be found. Make sure you are providing a valid bizinikiwi-based metadata" + )] + MissingCallMetadata(String, String), + /// Metadata for call could not be found. + #[error( + "Metadata for runtime API entry {0}_{1} could not be found. Make sure you are providing a valid bizinikiwi-based metadata" + )] + MissingRuntimeApiMetadata(String, String), + /// Call variant must have all named fields. + #[error( + "Call variant for type {0} must have all named fields. Make sure you are providing a valid bizinikiwi-based metadata" + )] + InvalidCallVariant(u32), + /// Type should be an variant/enum. + #[error( + "{0} type should be an variant/enum type. Make sure you are providing a valid bizinikiwi-based metadata" + )] + InvalidType(String), + /// Extrinsic call type could not be found. + #[error( + "Extrinsic call type could not be found. Make sure you are providing a valid bizinikiwi-based metadata" + )] + MissingCallType, + /// There are too many or too few hashers. + #[error( + "Could not generate functions for storage entry {storage_entry_name}. There are {key_count} keys, but only {hasher_count} hashers. The number of hashers must equal the number of keys or be exactly 1." + )] + InvalidStorageHasherCount { + /// The name of the storage entry + storage_entry_name: String, + /// Number of keys + key_count: usize, + /// Number of hashers + hasher_count: usize, + }, + /// Cannot generate types. + #[error("Type Generation failed: {0}")] + TypeGeneration(#[from] TypegenError), + /// Error when generating metadata from Wasm-runtime + #[error("Failed to generate metadata from wasm file. reason: {0}")] + Wasm(String), + /// Other error. + #[error("Other error: {0}")] + Other(String), +} + +impl CodegenError { + /// Fetch the location for this error. + // Todo: Probably worth storing location outside of the variant, + // so that there's a common way to set a location for some error. + fn get_location(&self) -> Span { + match self { + Self::InvalidModule(span) => *span, + Self::TypeGeneration(TypegenError::InvalidSubstitute(err)) => err.span, + Self::InvalidTypePath(_, err) => err.span(), + _ => proc_macro2::Span::call_site(), + } + } + /// Render the error as an invocation of syn::compile_error!. + pub fn into_compile_error(self) -> TokenStream2 { + let msg = self.to_string(); + let span = self.get_location(); + syn::Error::new(span, msg).into_compile_error() + } +} diff --git a/vendor/pezkuwi-subxt/codegen/src/ir.rs b/vendor/pezkuwi-subxt/codegen/src/ir.rs new file mode 100644 index 00000000..bfbad97c --- /dev/null +++ b/vendor/pezkuwi-subxt/codegen/src/ir.rs @@ -0,0 +1,34 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::error::CodegenError; +use syn::token; + +#[derive(Debug, PartialEq, Eq)] +pub struct ItemMod { + vis: syn::Visibility, + mod_token: token::Mod, + pub ident: syn::Ident, + brace: token::Brace, + items: Vec, +} + +impl TryFrom for ItemMod { + type Error = CodegenError; + + fn try_from(module: syn::ItemMod) -> Result { + let (brace, items) = match module.content { + Some((brace, items)) => (brace, items), + None => return Err(CodegenError::InvalidModule(module.ident.span())), + }; + + Ok(Self { vis: module.vis, mod_token: module.mod_token, ident: module.ident, brace, items }) + } +} + +impl ItemMod { + pub fn rust_items(&self) -> impl Iterator { + self.items.iter() + } +} diff --git a/vendor/pezkuwi-subxt/codegen/src/lib.rs b/vendor/pezkuwi-subxt/codegen/src/lib.rs new file mode 100644 index 00000000..0d82d298 --- /dev/null +++ b/vendor/pezkuwi-subxt/codegen/src/lib.rs @@ -0,0 +1,406 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Generate a type safe Subxt interface for a Bizinikiwi runtime from its metadata. +//! This is used by the `#[subxt]` macro and `subxt codegen` CLI command, but can also +//! be used directly if preferable. + +#![deny(missing_docs)] +#![cfg_attr(docsrs, feature(doc_cfg))] + +mod api; +pub mod error; +mod ir; + +#[cfg(feature = "web")] +use getrandom as _; + +use api::RuntimeGenerator; +use proc_macro2::TokenStream as TokenStream2; +use scale_typegen::{ + DerivesRegistry, TypeGeneratorSettings, TypeSubstitutes, TypegenError, + typegen::settings::{AllocCratePath, substitutes::absolute_path}, +}; +use std::collections::HashMap; +use syn::parse_quote; + +// Part of the public interface, so expose: +pub use error::CodegenError; +pub use pezkuwi_subxt_metadata::Metadata; +pub use syn; + +/// Generate a type safe interface to use with `subxt`. +/// The options exposed here are similar to those exposed via +/// the `#[subxt]` macro or via the `subxt codegen` CLI command. +/// Both use this under the hood. +/// +/// # Example +/// +/// Generating an interface using all of the defaults: +/// +/// ```rust,standalone_crate +/// use codec::Decode; +/// use pezkuwi_subxt_codegen::{ Metadata, CodegenBuilder }; +/// +/// // Get hold of and decode some metadata: +/// let encoded = std::fs::read("../artifacts/pezkuwi_metadata_full.scale").unwrap(); +/// let metadata = Metadata::decode(&mut &*encoded).unwrap(); +/// +/// // Generate a TokenStream representing the code for the interface. +/// // This can be converted to a string, displayed as-is or output from a macro. +/// let token_stream = CodegenBuilder::new().generate(metadata); +/// ```` +pub struct CodegenBuilder { + crate_path: syn::Path, + use_default_derives: bool, + use_default_substitutions: bool, + generate_docs: bool, + runtime_types_only: bool, + item_mod: syn::ItemMod, + extra_global_derives: Vec, + extra_global_attributes: Vec, + type_substitutes: HashMap, + derives_for_type: HashMap>, + attributes_for_type: HashMap>, + derives_for_type_recursive: HashMap>, + attributes_for_type_recursive: HashMap>, +} + +impl Default for CodegenBuilder { + fn default() -> Self { + CodegenBuilder { + crate_path: syn::parse_quote!(::pezkuwi_subxt::ext::pezkuwi_subxt_core), + use_default_derives: true, + use_default_substitutions: true, + generate_docs: true, + runtime_types_only: false, + item_mod: syn::parse_quote!( + pub mod api {} + ), + extra_global_derives: Vec::new(), + extra_global_attributes: Vec::new(), + type_substitutes: HashMap::new(), + derives_for_type: HashMap::new(), + attributes_for_type: HashMap::new(), + derives_for_type_recursive: HashMap::new(), + attributes_for_type_recursive: HashMap::new(), + } + } +} + +impl CodegenBuilder { + /// Construct a builder to configure and generate a type-safe interface for Subxt. + pub fn new() -> Self { + CodegenBuilder::default() + } + + /// Disable the default derives that are applied to all types. + /// + /// # Warning + /// + /// This is not recommended, and is highly likely to break some part of the + /// generated interface. Expect compile errors. + pub fn disable_default_derives(&mut self) { + self.use_default_derives = false; + } + + /// Disable the default type substitutions that are applied to the generated + /// code. + /// + /// # Warning + /// + /// This is not recommended, and is highly likely to break some part of the + /// generated interface. Expect compile errors. + pub fn disable_default_substitutes(&mut self) { + self.use_default_substitutions = false; + } + + /// Disable the output of doc comments associated with the generated types and + /// methods. This can reduce the generated code size at the expense of losing + /// documentation. + pub fn no_docs(&mut self) { + self.generate_docs = false; + } + + /// Only generate the types, and don't generate the rest of the Subxt specific + /// interface. + pub fn runtime_types_only(&mut self) { + self.runtime_types_only = true; + } + + /// Set the additional derives that will be applied to all types. By default, + /// a set of derives required for Subxt are automatically added for all types. + /// + /// # Warning + /// + /// Invalid derives, or derives that cannot be applied to _all_ of the generated + /// types (taking into account that some types are substituted for hand written ones + /// that we cannot add extra derives for) will lead to compile errors in the + /// generated code. + pub fn set_additional_global_derives(&mut self, derives: Vec) { + self.extra_global_derives = derives; + } + + /// Set the additional attributes that will be applied to all types. By default, + /// a set of attributes required for Subxt are automatically added for all types. + /// + /// # Warning + /// + /// Invalid attributes can very easily lead to compile errors in the generated code. + pub fn set_additional_global_attributes(&mut self, attributes: Vec) { + self.extra_global_attributes = attributes; + } + + /// Set additional derives for a specific type at the path given. + /// + /// If you want to set the additional derives on all contained types recursively as well, + /// you can set the `recursive` argument to `true`. If you don't do that, + /// there might be compile errors in the generated code, if the derived trait + /// relies on the fact that contained types also implement that trait. + pub fn add_derives_for_type( + &mut self, + ty: syn::TypePath, + derives: impl IntoIterator, + recursive: bool, + ) { + if recursive { + self.derives_for_type_recursive.entry(ty).or_default().extend(derives); + } else { + self.derives_for_type.entry(ty).or_default().extend(derives); + } + } + + /// Set additional attributes for a specific type at the path given. + /// + /// Setting the `recursive` argument to `true` will additionally add the specified + /// attributes to all contained types recursively. + pub fn add_attributes_for_type( + &mut self, + ty: syn::TypePath, + attributes: impl IntoIterator, + recursive: bool, + ) { + if recursive { + self.attributes_for_type_recursive.entry(ty).or_default().extend(attributes); + } else { + self.attributes_for_type.entry(ty).or_default().extend(attributes); + } + } + + /// Substitute a type at the given path with some type at the second path. During codegen, + /// we will avoid generating the type at the first path given, and instead point any references + /// to that type to the second path given. + /// + /// The substituted type will need to implement the relevant traits to be compatible with the + /// original, and it will need to SCALE encode and SCALE decode in a compatible way. + pub fn set_type_substitute(&mut self, ty: syn::Path, with: syn::Path) { + self.type_substitutes.insert(ty, with); + } + + /// By default, all of the code is generated inside a module `pub mod api {}`. We decorate + /// this module with a few attributes to reduce compile warnings and things. You can provide a + /// target module here, allowing you to add additional attributes or inner code items (with the + /// warning that duplicate identifiers will lead to compile errors). + pub fn set_target_module(&mut self, item_mod: syn::ItemMod) { + self.item_mod = item_mod; + } + + /// Set the path to the `subxt` crate. By default, we expect it to be at + /// `::pezkuwi_subxt::ext::pezkuwi_subxt_core`. + /// + /// # Panics + /// + /// Panics if the path provided is not an absolute path. + pub fn set_subxt_crate_path(&mut self, crate_path: syn::Path) { + if absolute_path(crate_path.clone()).is_err() { + // Throw an error here, because otherwise we end up with a harder to comprehend error + // when substitute types don't begin with an absolute path. + panic!( + "The provided crate path must be an absolute path, ie prefixed with '::' or 'crate'" + ); + } + self.crate_path = crate_path; + } + + /// Generate an interface, assuming that the default path to the `subxt` crate is + /// `::pezkuwi_subxt::ext::pezkuwi_subxt_core`. If the `subxt` crate is not available as a top + /// level dependency, use `generate` and provide a valid path to the `subxt¦ crate. + pub fn generate(self, metadata: Metadata) -> Result { + let crate_path = self.crate_path; + + let mut derives_registry: DerivesRegistry = if self.use_default_derives { + default_derives(&crate_path) + } else { + DerivesRegistry::new() + }; + + derives_registry.add_derives_for_all(self.extra_global_derives); + derives_registry.add_attributes_for_all(self.extra_global_attributes); + + for (ty, derives) in self.derives_for_type { + derives_registry.add_derives_for(ty, derives, false); + } + for (ty, derives) in self.derives_for_type_recursive { + derives_registry.add_derives_for(ty, derives, true); + } + for (ty, attributes) in self.attributes_for_type { + derives_registry.add_attributes_for(ty, attributes, false); + } + for (ty, attributes) in self.attributes_for_type_recursive { + derives_registry.add_attributes_for(ty, attributes, true); + } + + let mut type_substitutes: TypeSubstitutes = if self.use_default_substitutions { + default_substitutes(&crate_path) + } else { + TypeSubstitutes::new() + }; + + for (from, with) in self.type_substitutes { + let abs_path = absolute_path(with).map_err(TypegenError::from)?; + type_substitutes.insert(from, abs_path).map_err(TypegenError::from)?; + } + + let item_mod = self.item_mod; + let generator = RuntimeGenerator::new(metadata); + let should_gen_docs = self.generate_docs; + + if self.runtime_types_only { + generator.generate_runtime_types( + item_mod, + derives_registry, + type_substitutes, + crate_path, + should_gen_docs, + ) + } else { + generator.generate_runtime( + item_mod, + derives_registry, + type_substitutes, + crate_path, + should_gen_docs, + ) + } + } +} + +/// The default [`scale_typegen::TypeGeneratorSettings`], subxt is using for generating code. +/// Useful for emulating subxt's code generation settings from e.g. subxt-explorer. +pub fn default_subxt_type_gen_settings() -> TypeGeneratorSettings { + let crate_path: syn::Path = parse_quote!(::pezkuwi_subxt::ext::pezkuwi_subxt_core); + let derives = default_derives(&crate_path); + let substitutes = default_substitutes(&crate_path); + subxt_type_gen_settings(derives, substitutes, &crate_path, true) +} + +fn subxt_type_gen_settings( + derives: scale_typegen::DerivesRegistry, + substitutes: scale_typegen::TypeSubstitutes, + crate_path: &syn::Path, + should_gen_docs: bool, +) -> TypeGeneratorSettings { + // Are we using codec::Encode or codec::Decode derives? + let are_codec_derives_used = derives.default_derives().derives().iter().any(|path| { + let mut segments_backwards = path.segments.iter().rev(); + let ident = segments_backwards.next(); + let module = segments_backwards.next(); + + let is_ident_match = ident.is_some_and(|s| s.ident == "Encode" || s.ident == "Decode"); + let is_module_match = module.is_some_and(|s| s.ident == "codec"); + + is_ident_match && is_module_match + }); + + // If we're inserting the codec derives, we also should use `CompactAs` where necessary. + let compact_as_type_path = + are_codec_derives_used.then(|| parse_quote!(#crate_path::ext::codec::CompactAs)); + + TypeGeneratorSettings { + types_mod_ident: parse_quote!(runtime_types), + should_gen_docs, + derives, + substitutes, + decoded_bits_type_path: Some(parse_quote!(#crate_path::utils::bits::DecodedBits)), + compact_as_type_path, + compact_type_path: Some(parse_quote!(#crate_path::ext::codec::Compact)), + alloc_crate_path: AllocCratePath::Custom(parse_quote!(#crate_path::alloc)), + // Note: even when we don't use codec::Encode and codec::Decode, we need to keep + // #[codec(...)] attributes because `#[codec(skip)]` is still used/important with + // `EncodeAsType` and `DecodeAsType`. + insert_codec_attributes: true, + } +} + +fn default_derives(crate_path: &syn::Path) -> DerivesRegistry { + let encode_crate_path = quote::quote! { #crate_path::ext::scale_encode }.to_string(); + let decode_crate_path = quote::quote! { #crate_path::ext::scale_decode }.to_string(); + + let derives: [syn::Path; 3] = [ + parse_quote!(#crate_path::ext::scale_encode::EncodeAsType), + parse_quote!(#crate_path::ext::scale_decode::DecodeAsType), + parse_quote!(Debug), + ]; + + let attributes: [syn::Attribute; 2] = [ + parse_quote!(#[encode_as_type(crate_path = #encode_crate_path)]), + parse_quote!(#[decode_as_type(crate_path = #decode_crate_path)]), + ]; + + let mut derives_registry = DerivesRegistry::new(); + derives_registry.add_derives_for_all(derives); + derives_registry.add_attributes_for_all(attributes); + derives_registry +} + +fn default_substitutes(crate_path: &syn::Path) -> TypeSubstitutes { + let mut type_substitutes = TypeSubstitutes::new(); + + let defaults: [(syn::Path, syn::Path); 13] = [ + (parse_quote!(bitvec::order::Lsb0), parse_quote!(#crate_path::utils::bits::Lsb0)), + (parse_quote!(bitvec::order::Msb0), parse_quote!(#crate_path::utils::bits::Msb0)), + ( + parse_quote!(pezsp_core::crypto::AccountId32), + parse_quote!(#crate_path::utils::AccountId32), + ), + (parse_quote!(fp_account::AccountId20), parse_quote!(#crate_path::utils::AccountId20)), + ( + parse_quote!(pezsp_runtime::multiaddress::MultiAddress), + parse_quote!(#crate_path::utils::MultiAddress), + ), + (parse_quote!(primitive_types::H160), parse_quote!(#crate_path::utils::H160)), + (parse_quote!(primitive_types::H256), parse_quote!(#crate_path::utils::H256)), + (parse_quote!(primitive_types::H512), parse_quote!(#crate_path::utils::H512)), + ( + parse_quote!(pezframe_support::traits::misc::WrapperKeepOpaque), + parse_quote!(#crate_path::utils::WrapperKeepOpaque), + ), + // BTreeMap and BTreeSet impose an `Ord` constraint on their key types. This + // can cause an issue with generated code that doesn't impl `Ord` by default. + // Decoding them to Vec by default (KeyedVec is just an alias for Vec with + // suitable type params) avoids these issues. + (parse_quote!(BTreeMap), parse_quote!(#crate_path::utils::KeyedVec)), + (parse_quote!(BinaryHeap), parse_quote!(#crate_path::alloc::vec::Vec)), + (parse_quote!(BTreeSet), parse_quote!(#crate_path::alloc::vec::Vec)), + // The `UncheckedExtrinsic(pub Vec)` is part of the runtime API calls. + // The inner bytes represent the encoded extrinsic, however when deriving the + // `EncodeAsType` the bytes would be re-encoded. This leads to the bytes + // being altered by adding the length prefix in front of them. + + // Note: Not sure if this is appropriate or not. The most recent pezkuwi.rs file does not + // have these. + ( + parse_quote!(pezsp_runtime::generic::unchecked_extrinsic::UncheckedExtrinsic), + parse_quote!(#crate_path::utils::UncheckedExtrinsic), + ), + ]; + + let defaults = defaults.into_iter().map(|(from, to)| { + (from, absolute_path(to).expect("default substitutes above are absolute paths; qed")) + }); + type_substitutes + .extend(defaults) + .expect("default substitutes can always be parsed; qed"); + type_substitutes +} diff --git a/vendor/pezkuwi-subxt/core/Cargo.toml b/vendor/pezkuwi-subxt/core/Cargo.toml new file mode 100644 index 00000000..2f96e8e6 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/Cargo.toml @@ -0,0 +1,83 @@ +[package] +name = "pezkuwi-subxt-core" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +publish = true + +license.workspace = true +readme = "README.md" +repository.workspace = true +documentation.workspace = true +homepage.workspace = true +description = "A no-std compatible subset of Subxt's functionality" +keywords = ["extrinsic", "no-std", "parity", "subxt"] + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-metadata/std", + "hex/std", + "impl-serde/std", + "pezkuwi-subxt-metadata/std", + "pezsp-core/std", + "pezsp-crypto-hashing/std", + "pezsp-keyring/std", + "primitive-types/std", + "scale-info/std", + "serde/std", + "serde_json/std", + "tracing/std", +] + +[dependencies] +codec = { package = "parity-scale-codec", workspace = true, default-features = false, features = ["derive"] } +derive-where = { workspace = true } +frame-decode = { workspace = true } +frame-metadata = { workspace = true, default-features = false } +hashbrown = { workspace = true } +hex = { workspace = true } +pezkuwi-subxt-metadata = { workspace = true, default-features = false } +pezsp-crypto-hashing = { workspace = true } +scale-bits = { workspace = true, default-features = false } +scale-decode = { workspace = true, default-features = false, features = ["derive", "primitive-types"] } +scale-encode = { workspace = true, default-features = false, features = ["bits", "derive", "primitive-types"] } +scale-info = { workspace = true, default-features = false, features = ["bit-vec"] } +scale-value = { workspace = true, default-features = false } +serde = { workspace = true, default-features = false, features = ["derive"] } +serde_json = { workspace = true, default-features = false, features = ["alloc", "raw_value"] } +thiserror = { workspace = true, default-features = false } +tracing = { workspace = true, default-features = false } + +# For ss58 encoding AccountId32 to serialize them properly: +base58 = { workspace = true } +blake2 = { workspace = true } + +# Provides some deserialization, types like U256/H256 and hashing impls like twox/blake256: +impl-serde = { workspace = true, default-features = false } +primitive-types = { workspace = true, default-features = false, features = ["codec", "scale-info", "serde_no_std"] } + +# AccountId20 +keccak-hash = { workspace = true } + +[dev-dependencies] +assert_matches = { workspace = true } +bitvec = { workspace = true } +codec = { workspace = true, features = ["bit-vec", "derive"] } +hex = { workspace = true } +pezkuwi-subxt-macro = { workspace = true } +pezkuwi-subxt-signer = { workspace = true, features = ["sr25519", "subxt"] } +pezsp-core = { workspace = true } +pezsp-keyring = { workspace = true } + +[package.metadata.docs.rs] +default-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.playground] +default-features = true + +[lints] +workspace = true diff --git a/vendor/pezkuwi-subxt/core/README.md b/vendor/pezkuwi-subxt/core/README.md new file mode 100644 index 00000000..fb25fabf --- /dev/null +++ b/vendor/pezkuwi-subxt/core/README.md @@ -0,0 +1,3 @@ +# Subxt-Core + +This library provides a no-std compatible subset of functionality that `subxt` and `subxt-signer` rely on. \ No newline at end of file diff --git a/vendor/pezkuwi-subxt/core/src/blocks/extrinsic_transaction_extensions.rs b/vendor/pezkuwi-subxt/core/src/blocks/extrinsic_transaction_extensions.rs new file mode 100644 index 00000000..2d5c8124 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/blocks/extrinsic_transaction_extensions.rs @@ -0,0 +1,146 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::{ + Metadata, + config::{ + Config, TransactionExtension, + transaction_extensions::{ChargeAssetTxPayment, ChargeTransactionPayment, CheckNonce}, + }, + dynamic::Value, + error::ExtrinsicError, +}; +use alloc::borrow::ToOwned; +use frame_decode::extrinsics::ExtrinsicExtensions; +use scale_decode::DecodeAsType; + +/// The signed extensions of an extrinsic. +#[derive(Debug, Clone)] +pub struct ExtrinsicTransactionExtensions<'a, T: Config> { + bytes: &'a [u8], + metadata: &'a Metadata, + decoded_info: &'a ExtrinsicExtensions<'static, u32>, + _marker: core::marker::PhantomData, +} + +impl<'a, T: Config> ExtrinsicTransactionExtensions<'a, T> { + pub(crate) fn new( + bytes: &'a [u8], + metadata: &'a Metadata, + decoded_info: &'a ExtrinsicExtensions<'static, u32>, + ) -> Self { + Self { bytes, metadata, decoded_info, _marker: core::marker::PhantomData } + } + + /// Returns an iterator over each of the signed extension details of the extrinsic. + pub fn iter(&self) -> impl Iterator> + use<'a, T> { + self.decoded_info.iter().map(|s| ExtrinsicTransactionExtension { + bytes: &self.bytes[s.range()], + ty_id: *s.ty(), + identifier: s.name(), + metadata: self.metadata, + _marker: core::marker::PhantomData, + }) + } + + /// Searches through all signed extensions to find a specific one. + /// If the Signed Extension is not found `Ok(None)` is returned. + /// If the Signed Extension is found but decoding failed `Err(_)` is returned. + pub fn find>(&self) -> Result, ExtrinsicError> { + for ext in self.iter() { + match ext.as_signed_extension::() { + // We found a match; return it: + Ok(Some(e)) => return Ok(Some(e)), + // No error, but no match either; next! + Ok(None) => continue, + // Error? return it + Err(e) => return Err(e), + } + } + Ok(None) + } + + /// The tip of an extrinsic, extracted from the ChargeTransactionPayment or ChargeAssetTxPayment + /// signed extension, depending on which is present. + /// + /// Returns `None` if `tip` was not found or decoding failed. + pub fn tip(&self) -> Option { + // Note: the overhead of iterating multiple time should be negligible. + self.find::() + .ok() + .flatten() + .map(|e| e.tip()) + .or_else(|| self.find::>().ok().flatten().map(|e| e.tip())) + } + + /// The nonce of the account that submitted the extrinsic, extracted from the CheckNonce signed + /// extension. + /// + /// Returns `None` if `nonce` was not found or decoding failed. + pub fn nonce(&self) -> Option { + self.find::().ok()? + } +} + +/// A single signed extension +#[derive(Debug, Clone)] +pub struct ExtrinsicTransactionExtension<'a, T: Config> { + bytes: &'a [u8], + ty_id: u32, + identifier: &'a str, + metadata: &'a Metadata, + _marker: core::marker::PhantomData, +} + +impl<'a, T: Config> ExtrinsicTransactionExtension<'a, T> { + /// The bytes representing this signed extension. + pub fn bytes(&self) -> &'a [u8] { + self.bytes + } + + /// The name of the signed extension. + pub fn name(&self) -> &'a str { + self.identifier + } + + /// The type id of the signed extension. + pub fn type_id(&self) -> u32 { + self.ty_id + } + + /// Signed Extension as a [`scale_value::Value`] + pub fn value(&self) -> Result, ExtrinsicError> { + let value = scale_value::scale::decode_as_type( + &mut &self.bytes[..], + self.ty_id, + self.metadata.types(), + ) + .map_err(|e| ExtrinsicError::CouldNotDecodeTransactionExtension { + name: self.identifier.to_owned(), + error: e.into(), + })?; + Ok(value) + } + + /// Decodes the bytes of this Signed Extension into its associated `Decoded` type. + /// Returns `Ok(None)` if the data we have doesn't match the Signed Extension we're asking to + /// decode with. + pub fn as_signed_extension>( + &self, + ) -> Result, ExtrinsicError> { + if !S::matches(self.identifier, self.ty_id, self.metadata.types()) { + return Ok(None); + } + self.as_type::().map(Some) + } + + fn as_type(&self) -> Result { + let value = E::decode_as_type(&mut &self.bytes[..], self.ty_id, self.metadata.types()) + .map_err(|e| ExtrinsicError::CouldNotDecodeTransactionExtension { + name: self.identifier.to_owned(), + error: e, + })?; + Ok(value) + } +} diff --git a/vendor/pezkuwi-subxt/core/src/blocks/extrinsics.rs b/vendor/pezkuwi-subxt/core/src/blocks/extrinsics.rs new file mode 100644 index 00000000..cbbd88f2 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/blocks/extrinsics.rs @@ -0,0 +1,596 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::{ + Metadata, + blocks::extrinsic_transaction_extensions::ExtrinsicTransactionExtensions, + config::{Config, HashFor, Hasher}, + error::{ExtrinsicDecodeErrorAt, ExtrinsicDecodeErrorAtReason, ExtrinsicError}, +}; +use alloc::{sync::Arc, vec::Vec}; +use frame_decode::extrinsics::Extrinsic; +use scale_decode::{DecodeAsFields, DecodeAsType}; + +pub use crate::blocks::StaticExtrinsic; + +/// The body of a block. +pub struct Extrinsics { + extrinsics: Vec, Vec)>>, + metadata: Metadata, + hasher: T::Hasher, + _marker: core::marker::PhantomData, +} + +impl Extrinsics { + /// Instantiate a new [`Extrinsics`] object, given a vector containing + /// each extrinsic hash (in the form of bytes) and some metadata that + /// we'll use to decode them. + pub fn decode_from( + extrinsics: Vec>, + metadata: Metadata, + ) -> Result { + let hasher = T::Hasher::new(&metadata); + let extrinsics = extrinsics + .into_iter() + .enumerate() + .map(|(extrinsic_index, bytes)| { + let cursor = &mut &*bytes; + + // Try to decode the extrinsic. + let decoded_info = + frame_decode::extrinsics::decode_extrinsic(cursor, &metadata, metadata.types()) + .map_err(|error| ExtrinsicDecodeErrorAt { + extrinsic_index, + error: ExtrinsicDecodeErrorAtReason::DecodeError(error), + })? + .into_owned(); + + // We didn't consume all bytes, so decoding probably failed. + if !cursor.is_empty() { + return Err(ExtrinsicDecodeErrorAt { + extrinsic_index, + error: ExtrinsicDecodeErrorAtReason::LeftoverBytes(cursor.to_vec()), + }); + } + + Ok(Arc::new((decoded_info, bytes))) + }) + .collect::>()?; + + Ok(Self { extrinsics, hasher, metadata, _marker: core::marker::PhantomData }) + } + + /// The number of extrinsics. + pub fn len(&self) -> usize { + self.extrinsics.len() + } + + /// Are there no extrinsics in this block? + // Note: mainly here to satisfy clippy. + pub fn is_empty(&self) -> bool { + self.extrinsics.is_empty() + } + + /// Returns an iterator over the extrinsics in the block body. + // Dev note: The returned iterator is 'static + Send so that we can box it up and make + // use of it with our `FilterExtrinsic` stuff. + pub fn iter(&self) -> impl Iterator> + Send + Sync + 'static { + let extrinsics = self.extrinsics.clone(); + let num_extrinsics = self.extrinsics.len(); + let hasher = self.hasher; + let metadata = self.metadata.clone(); + + (0..num_extrinsics).map(move |index| { + ExtrinsicDetails::new(index as u32, extrinsics[index].clone(), hasher, metadata.clone()) + }) + } + + /// Iterate through the extrinsics using metadata to dynamically decode and skip + /// them, and return only those which should decode to the provided `E` type. + /// If an error occurs, all subsequent iterations return `None`. + pub fn find( + &self, + ) -> impl Iterator, ExtrinsicError>> { + self.iter().filter_map(|details| { + match details.as_extrinsic::() { + // Failed to decode extrinsic: + Err(err) => Some(Err(err)), + // Extrinsic for a different pallet / different call (skip): + Ok(None) => None, + Ok(Some(value)) => Some(Ok(FoundExtrinsic { details, value })), + } + }) + } + + /// Iterate through the extrinsics using metadata to dynamically decode and skip + /// them, and return the first extrinsic found which decodes to the provided `E` type. + pub fn find_first( + &self, + ) -> Result>, ExtrinsicError> { + self.find::().next().transpose() + } + + /// Iterate through the extrinsics using metadata to dynamically decode and skip + /// them, and return the last extrinsic found which decodes to the provided `Ev` type. + pub fn find_last( + &self, + ) -> Result>, ExtrinsicError> { + self.find::().last().transpose() + } + + /// Find an extrinsics that decodes to the type provided. Returns true if it was found. + pub fn has(&self) -> Result { + Ok(self.find::().next().transpose()?.is_some()) + } +} + +/// A single extrinsic in a block. +pub struct ExtrinsicDetails { + /// The index of the extrinsic in the block. + index: u32, + /// Extrinsic bytes and decode info. + ext: Arc<(Extrinsic<'static, u32>, Vec)>, + /// Hash the extrinsic if we want. + hasher: T::Hasher, + /// Subxt metadata to fetch the extrinsic metadata. + metadata: Metadata, + _marker: core::marker::PhantomData, +} + +impl ExtrinsicDetails +where + T: Config, +{ + // Attempt to dynamically decode a single extrinsic from the given input. + #[doc(hidden)] + pub fn new( + index: u32, + ext: Arc<(Extrinsic<'static, u32>, Vec)>, + hasher: T::Hasher, + metadata: Metadata, + ) -> ExtrinsicDetails { + ExtrinsicDetails { index, ext, hasher, metadata, _marker: core::marker::PhantomData } + } + + /// Calculate and return the hash of the extrinsic, based on the configured hasher. + pub fn hash(&self) -> HashFor { + // Use hash(), not hash_of(), because we don't want to double encode the bytes. + self.hasher.hash(self.bytes()) + } + + /// Is the extrinsic signed? + pub fn is_signed(&self) -> bool { + self.decoded_info().is_signed() + } + + /// The index of the extrinsic in the block. + pub fn index(&self) -> u32 { + self.index + } + + /// Return _all_ of the bytes representing this extrinsic, which include, in order: + /// - First byte: abbbbbbb (a = 0 for unsigned, 1 for signed, b = version) + /// - SignatureType (if the payload is signed) + /// - Address + /// - Signature + /// - Extra fields + /// - Extrinsic call bytes + pub fn bytes(&self) -> &[u8] { + &self.ext.1 + } + + /// Return only the bytes representing this extrinsic call: + /// - First byte is the pallet index + /// - Second byte is the variant (call) index + /// - Followed by field bytes. + /// + /// # Note + /// + /// Please use [`Self::bytes`] if you want to get all extrinsic bytes. + pub fn call_bytes(&self) -> &[u8] { + &self.bytes()[self.decoded_info().call_data_range()] + } + + /// Return the bytes representing the fields stored in this extrinsic. + /// + /// # Note + /// + /// This is a subset of [`Self::call_bytes`] that does not include the + /// first two bytes that denote the pallet index and the variant index. + pub fn field_bytes(&self) -> &[u8] { + // Note: this cannot panic because we checked the extrinsic bytes + // to contain at least two bytes. + &self.bytes()[self.decoded_info().call_data_args_range()] + } + + /// Return only the bytes of the address that signed this extrinsic. + /// + /// # Note + /// + /// Returns `None` if the extrinsic is not signed. + pub fn address_bytes(&self) -> Option<&[u8]> { + self.decoded_info() + .signature_payload() + .map(|s| &self.bytes()[s.address_range()]) + } + + /// Returns Some(signature_bytes) if the extrinsic was signed otherwise None is returned. + pub fn signature_bytes(&self) -> Option<&[u8]> { + self.decoded_info() + .signature_payload() + .map(|s| &self.bytes()[s.signature_range()]) + } + + /// Returns the signed extension `extra` bytes of the extrinsic. + /// Each signed extension has an `extra` type (May be zero-sized). + /// These bytes are the scale encoded `extra` fields of each signed extension in order of the + /// signed extensions. They do *not* include the `additional` signed bytes that are used as + /// part of the payload that is signed. + /// + /// Note: Returns `None` if the extrinsic is not signed. + pub fn transaction_extensions_bytes(&self) -> Option<&[u8]> { + self.decoded_info() + .transaction_extension_payload() + .map(|t| &self.bytes()[t.range()]) + } + + /// Returns `None` if the extrinsic is not signed. + pub fn transaction_extensions(&self) -> Option> { + self.decoded_info() + .transaction_extension_payload() + .map(|t| ExtrinsicTransactionExtensions::new(self.bytes(), &self.metadata, t)) + } + + /// The index of the pallet that the extrinsic originated from. + pub fn pallet_index(&self) -> u8 { + self.decoded_info().pallet_index() + } + + /// The index of the extrinsic variant that the extrinsic originated from. + pub fn call_index(&self) -> u8 { + self.decoded_info().call_index() + } + + /// The name of the pallet from whence the extrinsic originated. + pub fn pallet_name(&self) -> &str { + self.decoded_info().pallet_name() + } + + /// The name of the call (ie the name of the variant that it corresponds to). + pub fn call_name(&self) -> &str { + self.decoded_info().call_name() + } + + /// Decode and provide the extrinsic fields back in the form of a [`scale_value::Composite`] + /// type which represents the named or unnamed fields that were present in the extrinsic. + pub fn decode_as_fields(&self) -> Result { + let bytes = &mut self.field_bytes(); + let mut fields = self.decoded_info().call_data().map(|d| { + let name = if d.name().is_empty() { None } else { Some(d.name()) }; + scale_decode::Field::new(*d.ty(), name) + }); + let decoded = + E::decode_as_fields(bytes, &mut fields, self.metadata.types()).map_err(|e| { + ExtrinsicError::CannotDecodeFields { + extrinsic_index: self.index as usize, + error: e, + } + })?; + + Ok(decoded) + } + + /// Attempt to decode these [`ExtrinsicDetails`] into a type representing the extrinsic fields. + /// Such types are exposed in the codegen as `pallet_name::calls::types::CallName` types. + pub fn as_extrinsic(&self) -> Result, ExtrinsicError> { + if self.decoded_info().pallet_name() == E::PALLET && + self.decoded_info().call_name() == E::CALL + { + let mut fields = self.decoded_info().call_data().map(|d| { + let name = if d.name().is_empty() { None } else { Some(d.name()) }; + scale_decode::Field::new(*d.ty(), name) + }); + let decoded = + E::decode_as_fields(&mut self.field_bytes(), &mut fields, self.metadata.types()) + .map_err(|e| ExtrinsicError::CannotDecodeFields { + extrinsic_index: self.index as usize, + error: e, + })?; + Ok(Some(decoded)) + } else { + Ok(None) + } + } + + /// Attempt to decode these [`ExtrinsicDetails`] into an outer call enum type (which includes + /// the pallet and extrinsic enum variants as well as the extrinsic fields). A compatible + /// type for this is exposed via static codegen as a root level `Call` type. + pub fn as_root_extrinsic(&self) -> Result { + let decoded = E::decode_as_type( + &mut &self.call_bytes()[..], + self.metadata.outer_enums().call_enum_ty(), + self.metadata.types(), + ) + .map_err(|e| ExtrinsicError::CannotDecodeIntoRootExtrinsic { + extrinsic_index: self.index as usize, + error: e, + })?; + + Ok(decoded) + } + + fn decoded_info(&self) -> &Extrinsic<'static, u32> { + &self.ext.0 + } +} + +/// A Static Extrinsic found in a block coupled with it's details. +pub struct FoundExtrinsic { + /// Details for the extrinsic. + pub details: ExtrinsicDetails, + /// The decoded extrinsic value. + pub value: E, +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::BizinikiwConfig; + use assert_matches::assert_matches; + use codec::{Decode, Encode}; + use frame_metadata::{ + RuntimeMetadataPrefixed, + v15::{ + CustomMetadata, ExtrinsicMetadata, OuterEnums, PalletCallMetadata, PalletMetadata, + RuntimeMetadataV15, + }, + }; + use scale_info::{TypeInfo, meta_type}; + use scale_value::Value; + + // Extrinsic needs to contain at least the generic type parameter "Call" + // for the metadata to be valid. + // The "Call" type from the metadata is used to decode extrinsics. + #[allow(unused)] + #[derive(TypeInfo)] + struct ExtrinsicType { + pub signature: Option<(Address, Signature, Extra)>, + pub function: Call, + } + + // Because this type is used to decode extrinsics, we expect this to be a TypeDefVariant. + // Each pallet must contain one single variant. + #[allow(unused)] + #[derive( + Encode, + Decode, + TypeInfo, + Clone, + Debug, + PartialEq, + Eq, + scale_encode::EncodeAsType, + scale_decode::DecodeAsType, + )] + enum RuntimeCall { + Test(Pallet), + } + + // The calls of the pallet. + #[allow(unused)] + #[derive( + Encode, + Decode, + TypeInfo, + Clone, + Debug, + PartialEq, + Eq, + scale_encode::EncodeAsType, + scale_decode::DecodeAsType, + )] + enum Pallet { + #[allow(unused)] + #[codec(index = 2)] + TestCall { value: u128, signed: bool, name: String }, + } + + #[allow(unused)] + #[derive( + Encode, + Decode, + TypeInfo, + Clone, + Debug, + PartialEq, + Eq, + scale_encode::EncodeAsType, + scale_decode::DecodeAsType, + )] + struct TestCallExtrinsic { + value: u128, + signed: bool, + name: String, + } + + impl StaticExtrinsic for TestCallExtrinsic { + const PALLET: &'static str = "Test"; + const CALL: &'static str = "TestCall"; + } + + /// Build fake metadata consisting the types needed to represent an extrinsic. + fn metadata() -> Metadata { + let pallets = vec![PalletMetadata { + name: "Test", + storage: None, + calls: Some(PalletCallMetadata { ty: meta_type::() }), + event: None, + constants: vec![], + error: None, + index: 0, + docs: vec![], + }]; + + let extrinsic = ExtrinsicMetadata { + version: 4, + signed_extensions: vec![], + address_ty: meta_type::<()>(), + call_ty: meta_type::(), + signature_ty: meta_type::<()>(), + extra_ty: meta_type::<()>(), + }; + + let meta = RuntimeMetadataV15::new( + pallets, + extrinsic, + meta_type::<()>(), + vec![], + OuterEnums { + call_enum_ty: meta_type::(), + event_enum_ty: meta_type::<()>(), + error_enum_ty: meta_type::<()>(), + }, + CustomMetadata { map: Default::default() }, + ); + let runtime_metadata: RuntimeMetadataPrefixed = meta.into(); + let metadata: pezkuwi_subxt_metadata::Metadata = runtime_metadata.try_into().unwrap(); + + metadata + } + + #[test] + fn extrinsic_metadata_consistency() { + let metadata = metadata(); + + // Except our metadata to contain the registered types. + let pallet = metadata.pallet_by_call_index(0).expect("pallet exists"); + let extrinsic = pallet + .call_variant_by_index(2) + .expect("metadata contains the RuntimeCall enum with this pallet"); + + assert_eq!(pallet.name(), "Test"); + assert_eq!(&extrinsic.name, "TestCall"); + } + + #[test] + fn insufficient_extrinsic_bytes() { + let metadata = metadata(); + + // Decode with empty bytes. + let result = Extrinsics::::decode_from(vec![vec![]], metadata); + assert_matches!( + result.err(), + Some(crate::error::ExtrinsicDecodeErrorAt { extrinsic_index: 0, error: _ }) + ); + } + + #[test] + fn unsupported_version_extrinsic() { + use frame_decode::extrinsics::ExtrinsicDecodeError; + + let metadata = metadata(); + + // Decode with invalid version. + let result = Extrinsics::::decode_from(vec![vec![3u8].encode()], metadata); + + assert_matches!( + result.err(), + Some(crate::error::ExtrinsicDecodeErrorAt { + extrinsic_index: 0, + error: ExtrinsicDecodeErrorAtReason::DecodeError( + ExtrinsicDecodeError::VersionNotSupported(3) + ), + }) + ); + } + + #[test] + fn tx_hashes_line_up() { + let metadata = metadata(); + let hasher = ::Hasher::new(&metadata); + + let tx = crate::dynamic::tx( + "Test", + "TestCall", + vec![Value::u128(10), Value::bool(true), Value::string("SomeValue")], + ); + + // Encoded TX ready to submit. + let tx_encoded = crate::tx::create_v4_unsigned::(&tx, &metadata) + .expect("Valid dynamic parameters are provided"); + + // Extrinsic details ready to decode. + let extrinsics = Extrinsics::::decode_from( + vec![tx_encoded.encoded().to_owned()], + metadata, + ) + .expect("Valid extrinsic"); + + let extrinsic = extrinsics.iter().next().unwrap(); + + // Both of these types should produce the same bytes. + assert_eq!(tx_encoded.encoded(), extrinsic.bytes(), "bytes should eq"); + // Both of these types should produce the same hash. + assert_eq!(tx_encoded.hash_with(hasher), extrinsic.hash(), "hashes should eq"); + } + + #[test] + fn statically_decode_extrinsic() { + let metadata = metadata(); + + let tx = crate::dynamic::tx( + "Test", + "TestCall", + vec![Value::u128(10), Value::bool(true), Value::string("SomeValue")], + ); + let tx_encoded = crate::tx::create_v4_unsigned::(&tx, &metadata) + .expect("Valid dynamic parameters are provided"); + + // Note: `create_unsigned` produces the extrinsic bytes by prefixing the extrinsic length. + // The length is handled deserializing `ChainBlockExtrinsic`, therefore the first byte is + // not needed. + let extrinsics = Extrinsics::::decode_from( + vec![tx_encoded.encoded().to_owned()], + metadata, + ) + .expect("Valid extrinsic"); + + let extrinsic = extrinsics.iter().next().unwrap(); + + assert!(!extrinsic.is_signed()); + + assert_eq!(extrinsic.index(), 0); + + assert_eq!(extrinsic.pallet_index(), 0); + assert_eq!(extrinsic.pallet_name(), "Test"); + + assert_eq!(extrinsic.call_index(), 2); + assert_eq!(extrinsic.call_name(), "TestCall"); + + // Decode the extrinsic to the root enum. + let decoded_extrinsic = extrinsic + .as_root_extrinsic::() + .expect("can decode extrinsic to root enum"); + + assert_eq!( + decoded_extrinsic, + RuntimeCall::Test(Pallet::TestCall { + value: 10, + signed: true, + name: "SomeValue".into(), + }) + ); + + // Decode the extrinsic to the extrinsic variant. + let decoded_extrinsic = extrinsic + .as_extrinsic::() + .expect("can decode extrinsic to extrinsic variant") + .expect("value cannot be None"); + + assert_eq!( + decoded_extrinsic, + TestCallExtrinsic { value: 10, signed: true, name: "SomeValue".into() } + ); + } +} diff --git a/vendor/pezkuwi-subxt/core/src/blocks/mod.rs b/vendor/pezkuwi-subxt/core/src/blocks/mod.rs new file mode 100644 index 00000000..886098f0 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/blocks/mod.rs @@ -0,0 +1,89 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Decode and iterate over the extrinsics in block bodies. +//! +//! Use the [`decode_from`] function as an entry point to decoding extrinsics, and then +//! have a look at [`Extrinsics`] and [`ExtrinsicDetails`] to see which methods are available +//! to work with the extrinsics. +//! +//! # Example +//! +//! ```rust +//! extern crate alloc; +//! +//! use pezkuwi_subxt_macro::subxt; +//! use pezkuwi_subxt_core::blocks; +//! use pezkuwi_subxt_core::Metadata; +//! use pezkuwi_subxt_core::config::PezkuwiConfig; +//! use alloc::vec; +//! +//! // If we generate types without `subxt`, we need to point to `::pezkuwi_subxt_core`: +//! #[subxt( +//! crate = "::pezkuwi_subxt_core", +//! runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale", +//! )] +//! pub mod pezkuwi {} +//! +//! // Some metadata we'd like to use to help us decode extrinsics: +//! let metadata_bytes = include_bytes!("../../../artifacts/pezkuwi_metadata_small.scale"); +//! let metadata = Metadata::decode_from(&metadata_bytes[..]).unwrap(); +//! +//! // Some extrinsics we'd like to decode: +//! let ext_bytes = vec![ +//! hex::decode("1004020000").unwrap(), +//! hex::decode("c10184001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c01a27c400241aeafdea1871b32f1f01e92acd272ddfe6b2f8b73b64c606572a530c470a94ef654f7baa5828474754a1fe31b59f91f6bb5c2cd5a07c22d4b8b8387350100000000001448656c6c6f").unwrap(), +//! hex::decode("550284001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c0144bb92734447c893ab16d520fae0d455257550efa28ee66bf6dc942cb8b00d5d2799b98bc2865d21812278a9a266acd7352f40742ff11a6ce1f400013961598485010000000400008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a481700505a4f7e9f4eb106").unwrap() +//! ]; +//! +//! // Given some chain config and metadata, we know how to decode the bytes. +//! let exts = blocks::decode_from::(ext_bytes, metadata).unwrap(); +//! +//! // We'll see 3 extrinsics: +//! assert_eq!(exts.len(), 3); +//! +//! // We can iterate over them and decode various details out of them. +//! for ext in exts.iter() { +//! println!("Pallet: {}", ext.pallet_name()); +//! println!("Call: {}", ext.call_name()); +//! } +//! +//! # let ext_details: Vec<_> = exts.iter() +//! # .map(|ext| { +//! # let pallet = ext.pallet_name().to_string(); +//! # let call = ext.call_name().to_string(); +//! # (pallet, call) +//! # }) +//! # .collect(); +//! # +//! # assert_eq!(ext_details, vec![ +//! # ("Timestamp".to_owned(), "set".to_owned()), +//! # ("System".to_owned(), "remark".to_owned()), +//! # ("Balances".to_owned(), "transfer_allow_death".to_owned()), +//! # ]); +//! ``` + +mod extrinsic_transaction_extensions; +mod extrinsics; +mod static_extrinsic; + +pub use crate::error::ExtrinsicError; +use crate::{Metadata, config::Config, error::ExtrinsicDecodeErrorAt}; +use alloc::vec::Vec; +pub use extrinsic_transaction_extensions::{ + ExtrinsicTransactionExtension, ExtrinsicTransactionExtensions, +}; +pub use extrinsics::{ExtrinsicDetails, Extrinsics, FoundExtrinsic}; +pub use static_extrinsic::StaticExtrinsic; + +/// Instantiate a new [`Extrinsics`] object, given a vector containing each extrinsic hash (in the +/// form of bytes) and some metadata that we'll use to decode them. +/// +/// This is a shortcut for [`Extrinsics::decode_from`]. +pub fn decode_from( + extrinsics: Vec>, + metadata: Metadata, +) -> Result, ExtrinsicDecodeErrorAt> { + Extrinsics::decode_from(extrinsics, metadata) +} diff --git a/vendor/pezkuwi-subxt/core/src/blocks/static_extrinsic.rs b/vendor/pezkuwi-subxt/core/src/blocks/static_extrinsic.rs new file mode 100644 index 00000000..f462f750 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/blocks/static_extrinsic.rs @@ -0,0 +1,23 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use scale_decode::DecodeAsFields; + +/// Trait to uniquely identify the extrinsic's identity from the runtime metadata. +/// +/// Generated API structures that represent an extrinsic implement this trait. +/// +/// The trait is utilized to decode emitted extrinsics from a block, via obtaining the +/// form of the `Extrinsic` from the metadata. +pub trait StaticExtrinsic: DecodeAsFields { + /// Pallet name. + const PALLET: &'static str; + /// Call name. + const CALL: &'static str; + + /// Returns true if the given pallet and call names match this extrinsic. + fn is_extrinsic(pallet: &str, call: &str) -> bool { + Self::PALLET == pallet && Self::CALL == call + } +} diff --git a/vendor/pezkuwi-subxt/core/src/client.rs b/vendor/pezkuwi-subxt/core/src/client.rs new file mode 100644 index 00000000..968b8711 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/client.rs @@ -0,0 +1,42 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! A couple of client types that we use elsewhere. + +use crate::{ + Metadata, + config::{Config, HashFor}, +}; +use derive_where::derive_where; + +/// This provides access to some relevant client state in transaction extensions, +/// and is just a combination of some of the available properties. +#[derive_where(Clone, Debug)] +pub struct ClientState { + /// Genesis hash. + pub genesis_hash: HashFor, + /// Runtime version. + pub runtime_version: RuntimeVersion, + /// Metadata. + pub metadata: Metadata, +} + +/// Runtime version information needed to submit transactions. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct RuntimeVersion { + /// Version of the runtime specification. A full-node will not attempt to use its native + /// runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, + /// `spec_version` and `authoring_version` are the same between Wasm and native. + pub spec_version: u32, + /// All existing dispatches are fully compatible when this number doesn't change. If this + /// number changes, then `spec_version` must change, also. + /// + /// This number must change when an existing dispatchable (module ID, dispatch ID) is changed, + /// either through an alteration in its user-level semantics, a parameter + /// added/removed/changed, a dispatchable being removed, a module being removed, or a + /// dispatchable/module changing its index. + /// + /// It need *not* change when a new module is added or when a dispatchable is added. + pub transaction_version: u32, +} diff --git a/vendor/pezkuwi-subxt/core/src/config/bizinikiwi.rs b/vendor/pezkuwi-subxt/core/src/config/bizinikiwi.rs new file mode 100644 index 00000000..efcf47d5 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/config/bizinikiwi.rs @@ -0,0 +1,392 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Bizinikiwi specific configuration + +use super::{Config, DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder, Hasher, Header}; +pub use crate::utils::{AccountId32, MultiAddress, MultiSignature}; +use alloc::{format, vec::Vec}; +use codec::{Decode, Encode}; +use pezkuwi_subxt_metadata::Metadata; +pub use primitive_types::{H256, U256}; +use serde::{Deserialize, Serialize}; + +/// Default set of commonly used types by Bizinikiwi runtimes. +// Note: We only use this at the type level, so it should be impossible to +// create an instance of it. +// The trait implementations exist just to make life easier, +// but shouldn't strictly be necessary since users can't instantiate this type. +#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] +pub enum BizinikiwConfig {} + +impl Config for BizinikiwConfig { + type AccountId = AccountId32; + type Address = MultiAddress; + type Signature = MultiSignature; + type Hasher = DynamicHasher256; + type Header = BizinikiwiHeader; + type ExtrinsicParams = BizinikiwiExtrinsicParams; + type AssetId = u32; +} + +/// A struct representing the signed extra and additional parameters required +/// to construct a transaction for the default bizinikiwi node. +pub type BizinikiwiExtrinsicParams = DefaultExtrinsicParams; + +/// A builder which leads to [`BizinikiwiExtrinsicParams`] being constructed. +/// This is what you provide to methods like `sign_and_submit()`. +pub type BizinikiwiExtrinsicParamsBuilder = DefaultExtrinsicParamsBuilder; + +/// A hasher (ie implements [`Hasher`]) which hashes values using the blaks2_256 algorithm. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct BlakeTwo256; + +impl Hasher for BlakeTwo256 { + type Output = H256; + + fn new(_metadata: &Metadata) -> Self { + Self + } + + fn hash(&self, s: &[u8]) -> Self::Output { + pezsp_crypto_hashing::blake2_256(s).into() + } +} + +/// A hasher (ie implements [`Hasher`]) which inspects the runtime metadata to decide how to +/// hash types, falling back to blake2_256 if the hasher information is not available. +/// +/// Currently this hasher supports only `BlakeTwo256` and `Keccak256` hashing methods. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct DynamicHasher256(HashType); + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum HashType { + // Most chains use this: + BlakeTwo256, + // Chains like Hyperbridge use this (tends to be eth compatible chains) + Keccak256, + // If we don't have V16 metadata, we'll emit this and default to BlakeTwo256. + Unknown, +} + +impl Hasher for DynamicHasher256 { + type Output = H256; + + fn new(metadata: &Metadata) -> Self { + // Determine the Hash associated type used for the current chain, if possible. + let Some(system_pallet) = metadata.pallet_by_name("System") else { + return Self(HashType::Unknown); + }; + let Some(hash_ty_id) = system_pallet.associated_type_id("Hashing") else { + return Self(HashType::Unknown); + }; + + let ty = metadata + .types() + .resolve(hash_ty_id) + .expect("Type information for 'Hashing' associated type should be in metadata"); + + let hash_type = match ty.path.ident().as_deref().unwrap_or("") { + "BlakeTwo256" => HashType::BlakeTwo256, + "Keccak256" => HashType::Keccak256, + _ => HashType::Unknown, + }; + + Self(hash_type) + } + + fn hash(&self, s: &[u8]) -> Self::Output { + match self.0 { + HashType::BlakeTwo256 | HashType::Unknown => pezsp_crypto_hashing::blake2_256(s).into(), + HashType::Keccak256 => pezsp_crypto_hashing::keccak_256(s).into(), + } + } +} + +/// A generic Bizinikiwi header type, adapted from `sp_runtime::generic::Header`. +/// The block number and hasher can be configured to adapt this for other nodes. +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct BizinikiwiHeader + TryFrom, H: Hasher> { + /// The parent hash. + pub parent_hash: H::Output, + /// The block number. + #[serde(serialize_with = "serialize_number", deserialize_with = "deserialize_number")] + #[codec(compact)] + pub number: N, + /// The state trie merkle root + pub state_root: H::Output, + /// The merkle root of the extrinsics. + pub extrinsics_root: H::Output, + /// A chain-specific digest of data useful for light clients or referencing auxiliary data. + pub digest: Digest, +} + +impl Header for BizinikiwiHeader +where + N: Copy + Into + Into + TryFrom + Encode, + H: Hasher, + BizinikiwiHeader: Encode + Decode, +{ + type Number = N; + type Hasher = H; + + fn number(&self) -> Self::Number { + self.number + } +} + +/// Generic header digest. From `sp_runtime::generic::digest`. +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Default)] +pub struct Digest { + /// A list of digest items. + pub logs: Vec, +} + +/// Digest item that is able to encode/decode 'system' digest items and +/// provide opaque access to other items. From `sp_runtime::generic::digest`. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum DigestItem { + /// A pre-runtime digest. + /// + /// These are messages from the consensus engine to the runtime, although + /// the consensus engine can (and should) read them itself to avoid + /// code and state duplication. It is erroneous for a runtime to produce + /// these, but this is not (yet) checked. + /// + /// NOTE: the runtime is not allowed to panic or fail in an `on_initialize` + /// call if an expected `PreRuntime` digest is not present. It is the + /// responsibility of a external block verifier to check this. Runtime API calls + /// will initialize the block without pre-runtime digests, so initialization + /// cannot fail when they are missing. + PreRuntime(ConsensusEngineId, Vec), + + /// A message from the runtime to the consensus engine. This should *never* + /// be generated by the native code of any consensus engine, but this is not + /// checked (yet). + Consensus(ConsensusEngineId, Vec), + + /// Put a Seal on it. This is only used by native code, and is never seen + /// by runtimes. + Seal(ConsensusEngineId, Vec), + + /// Some other thing. Unsupported and experimental. + Other(Vec), + + /// An indication for the light clients that the runtime execution + /// environment is updated. + /// + /// Currently this is triggered when: + /// 1. Runtime code blob is changed or + /// 2. `heap_pages` value is changed. + RuntimeEnvironmentUpdated, +} + +// From sp_runtime::generic, DigestItem enum indexes are encoded using this: +#[repr(u32)] +#[derive(Encode, Decode)] +enum DigestItemType { + Other = 0u32, + Consensus = 4u32, + Seal = 5u32, + PreRuntime = 6u32, + RuntimeEnvironmentUpdated = 8u32, +} +impl Encode for DigestItem { + fn encode(&self) -> Vec { + let mut v = Vec::new(); + + match self { + Self::Consensus(val, data) => { + DigestItemType::Consensus.encode_to(&mut v); + (val, data).encode_to(&mut v); + }, + Self::Seal(val, sig) => { + DigestItemType::Seal.encode_to(&mut v); + (val, sig).encode_to(&mut v); + }, + Self::PreRuntime(val, data) => { + DigestItemType::PreRuntime.encode_to(&mut v); + (val, data).encode_to(&mut v); + }, + Self::Other(val) => { + DigestItemType::Other.encode_to(&mut v); + val.encode_to(&mut v); + }, + Self::RuntimeEnvironmentUpdated => { + DigestItemType::RuntimeEnvironmentUpdated.encode_to(&mut v); + }, + } + + v + } +} +impl Decode for DigestItem { + fn decode(input: &mut I) -> Result { + let item_type: DigestItemType = Decode::decode(input)?; + match item_type { + DigestItemType::PreRuntime => { + let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; + Ok(Self::PreRuntime(vals.0, vals.1)) + }, + DigestItemType::Consensus => { + let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; + Ok(Self::Consensus(vals.0, vals.1)) + }, + DigestItemType::Seal => { + let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; + Ok(Self::Seal(vals.0, vals.1)) + }, + DigestItemType::Other => Ok(Self::Other(Decode::decode(input)?)), + DigestItemType::RuntimeEnvironmentUpdated => Ok(Self::RuntimeEnvironmentUpdated), + } + } +} + +/// Consensus engine unique ID. From `sp_runtime::ConsensusEngineId`. +pub type ConsensusEngineId = [u8; 4]; + +impl serde::Serialize for DigestItem { + fn serialize(&self, seq: S) -> Result + where + S: serde::Serializer, + { + self.using_encoded(|bytes| impl_serde::serialize::serialize(bytes, seq)) + } +} + +impl<'a> serde::Deserialize<'a> for DigestItem { + fn deserialize(de: D) -> Result + where + D: serde::Deserializer<'a>, + { + let r = impl_serde::serialize::deserialize(de)?; + Decode::decode(&mut &r[..]) + .map_err(|e| serde::de::Error::custom(format!("Decode error: {e}"))) + } +} + +fn serialize_number>(val: &T, s: S) -> Result +where + S: serde::Serializer, +{ + let u256: U256 = (*val).into(); + serde::Serialize::serialize(&u256, s) +} + +fn deserialize_number<'a, D, T: TryFrom>(d: D) -> Result +where + D: serde::Deserializer<'a>, +{ + // At the time of writing, Smoldot gives back block numbers in numeric rather + // than hex format. So let's support deserializing from both here: + let number_or_hex = NumberOrHex::deserialize(d)?; + let u256 = number_or_hex.into_u256(); + TryFrom::try_from(u256).map_err(|_| serde::de::Error::custom("Try from failed")) +} + +/// A number type that can be serialized both as a number or a string that encodes a number in a +/// string. +/// +/// We allow two representations of the block number as input. Either we deserialize to the type +/// that is specified in the block type or we attempt to parse given hex value. +/// +/// The primary motivation for having this type is to avoid overflows when using big integers in +/// JavaScript (which we consider as an important RPC API consumer). +#[derive(Copy, Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] +#[serde(untagged)] +pub enum NumberOrHex { + /// The number represented directly. + Number(u64), + /// Hex representation of the number. + Hex(U256), +} + +impl NumberOrHex { + /// Converts this number into an U256. + pub fn into_u256(self) -> U256 { + match self { + NumberOrHex::Number(n) => n.into(), + NumberOrHex::Hex(h) => h, + } + } +} + +impl From for U256 { + fn from(num_or_hex: NumberOrHex) -> U256 { + num_or_hex.into_u256() + } +} + +macro_rules! into_number_or_hex { + ($($t: ty)+) => { + $( + impl From<$t> for NumberOrHex { + fn from(x: $t) -> Self { + NumberOrHex::Number(x.into()) + } + } + )+ + } +} +into_number_or_hex!(u8 u16 u32 u64); + +impl From for NumberOrHex { + fn from(n: u128) -> Self { + NumberOrHex::Hex(n.into()) + } +} + +impl From for NumberOrHex { + fn from(n: U256) -> Self { + NumberOrHex::Hex(n) + } +} + +#[cfg(test)] +mod test { + use super::*; + + // Smoldot returns numeric block numbers in the header at the time of writing; + // ensure we can deserialize them properly. + #[test] + fn can_deserialize_numeric_block_number() { + let numeric_block_number_json = r#" + { + "digest": { + "logs": [] + }, + "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "number": 4, + "parentHash": "0xcb2690b2c85ceab55be03fc7f7f5f3857e7efeb7a020600ebd4331e10be2f7a5", + "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + "#; + + let header: BizinikiwiHeader = + serde_json::from_str(numeric_block_number_json).expect("valid block header"); + assert_eq!(header.number(), 4); + } + + // Bizinikiwi returns hex block numbers; ensure we can also deserialize those OK. + #[test] + fn can_deserialize_hex_block_number() { + let numeric_block_number_json = r#" + { + "digest": { + "logs": [] + }, + "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "number": "0x04", + "parentHash": "0xcb2690b2c85ceab55be03fc7f7f5f3857e7efeb7a020600ebd4331e10be2f7a5", + "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + "#; + + let header: BizinikiwiHeader = + serde_json::from_str(numeric_block_number_json).expect("valid block header"); + assert_eq!(header.number(), 4); + } +} diff --git a/vendor/pezkuwi-subxt/core/src/config/default_extrinsic_params.rs b/vendor/pezkuwi-subxt/core/src/config/default_extrinsic_params.rs new file mode 100644 index 00000000..d1aa9ebd --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/config/default_extrinsic_params.rs @@ -0,0 +1,167 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::config::transaction_extensions::CheckMortalityParams; + +use super::{Config, ExtrinsicParams, HashFor, transaction_extensions}; + +/// The default [`super::ExtrinsicParams`] implementation understands common signed extensions +/// and how to apply them to a given chain. +pub type DefaultExtrinsicParams = transaction_extensions::AnyOf< + T, + ( + transaction_extensions::VerifySignature, + transaction_extensions::CheckSpecVersion, + transaction_extensions::CheckTxVersion, + transaction_extensions::CheckNonce, + transaction_extensions::CheckGenesis, + transaction_extensions::CheckMortality, + transaction_extensions::ChargeAssetTxPayment, + transaction_extensions::ChargeTransactionPayment, + transaction_extensions::CheckMetadataHash, + ), +>; + +/// A builder that outputs the set of [`super::ExtrinsicParams::Params`] required for +/// [`DefaultExtrinsicParams`]. This may expose methods that aren't applicable to the current +/// chain; such values will simply be ignored if so. +pub struct DefaultExtrinsicParamsBuilder { + /// `None` means the tx will be immortal, else it's mortality is described. + mortality: transaction_extensions::CheckMortalityParams, + /// `None` means the nonce will be automatically set. + nonce: Option, + /// `None` means we'll use the native token. + tip_of_asset_id: Option, + tip: u128, + tip_of: u128, +} + +impl Default for DefaultExtrinsicParamsBuilder { + fn default() -> Self { + Self { + mortality: CheckMortalityParams::default(), + tip: 0, + tip_of: 0, + tip_of_asset_id: None, + nonce: None, + } + } +} + +impl DefaultExtrinsicParamsBuilder { + /// Configure new extrinsic params. We default to providing no tip + /// and using an immortal transaction unless otherwise configured + pub fn new() -> Self { + Default::default() + } + + /// Make the transaction immortal, meaning it will never expire. This means that it could, in + /// theory, be pending for a long time and only be included many blocks into the future. + pub fn immortal(mut self) -> Self { + self.mortality = transaction_extensions::CheckMortalityParams::immortal(); + self + } + + /// Make the transaction mortal, given a number of blocks it will be mortal for from + /// the current block at the time of submission. + /// + /// # Warning + /// + /// This will ultimately return an error if used for creating extrinsic offline, because we need + /// additional information in order to set the mortality properly. + /// + /// When creating offline transactions, you must use [`Self::mortal_from_unchecked`] instead to + /// set the mortality. This provides all of the necessary information which we must otherwise + /// be online in order to obtain. + pub fn mortal(mut self, for_n_blocks: u64) -> Self { + self.mortality = transaction_extensions::CheckMortalityParams::mortal(for_n_blocks); + self + } + + /// Configure a transaction that will be mortal for the number of blocks given, and from the + /// block details provided. Prefer to use [`Self::mortal()`] where possible, which prevents + /// the block number and hash from being misaligned. + pub fn mortal_from_unchecked( + mut self, + for_n_blocks: u64, + from_block_n: u64, + from_block_hash: HashFor, + ) -> Self { + self.mortality = transaction_extensions::CheckMortalityParams::mortal_from_unchecked( + for_n_blocks, + from_block_n, + from_block_hash, + ); + self + } + + /// Provide a specific nonce for the submitter of the extrinsic + pub fn nonce(mut self, nonce: u64) -> Self { + self.nonce = Some(nonce); + self + } + + /// Provide a tip to the block author in the chain's native token. + pub fn tip(mut self, tip: u128) -> Self { + self.tip = tip; + self.tip_of = tip; + self.tip_of_asset_id = None; + self + } + + /// Provide a tip to the block author using the token denominated by the `asset_id` provided. + /// This is not applicable on chains which don't use the `ChargeAssetTxPayment` signed + /// extension; in this case, no tip will be given. + pub fn tip_of(mut self, tip: u128, asset_id: T::AssetId) -> Self { + self.tip = 0; + self.tip_of = tip; + self.tip_of_asset_id = Some(asset_id); + self + } + + /// Build the extrinsic parameters. + pub fn build(self) -> as ExtrinsicParams>::Params { + let check_mortality_params = self.mortality; + + let charge_asset_tx_params = if let Some(asset_id) = self.tip_of_asset_id { + transaction_extensions::ChargeAssetTxPaymentParams::tip_of(self.tip, asset_id) + } else { + transaction_extensions::ChargeAssetTxPaymentParams::tip(self.tip) + }; + + let charge_transaction_params = + transaction_extensions::ChargeTransactionPaymentParams::tip(self.tip); + + let check_nonce_params = if let Some(nonce) = self.nonce { + transaction_extensions::CheckNonceParams::with_nonce(nonce) + } else { + transaction_extensions::CheckNonceParams::from_chain() + }; + + ( + (), + (), + (), + check_nonce_params, + (), + check_mortality_params, + charge_asset_tx_params, + charge_transaction_params, + (), + ) + } +} + +#[cfg(test)] +mod test { + use super::*; + + fn assert_default(_t: T) {} + + #[test] + fn params_are_default() { + let params = DefaultExtrinsicParamsBuilder::::new().build(); + assert_default(params) + } +} diff --git a/vendor/pezkuwi-subxt/core/src/config/extrinsic_params.rs b/vendor/pezkuwi-subxt/core/src/config/extrinsic_params.rs new file mode 100644 index 00000000..55702a3b --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/config/extrinsic_params.rs @@ -0,0 +1,128 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module contains a trait which controls the parameters that must +//! be provided in order to successfully construct an extrinsic. +//! [`crate::config::DefaultExtrinsicParams`] provides a general-purpose +//! implementation of this that will work in many cases. + +use crate::{ + client::ClientState, + config::{Config, HashFor}, + error::ExtrinsicParamsError, +}; +use alloc::vec::Vec; +use core::any::Any; + +/// This trait allows you to configure the "signed extra" and +/// "additional" parameters that are a part of the transaction payload +/// or the signer payload respectively. +pub trait ExtrinsicParams: ExtrinsicParamsEncoder + Sized + Send + 'static { + /// These parameters can be provided to the constructor along with + /// some default parameters that `subxt` understands, in order to + /// help construct your [`ExtrinsicParams`] object. + type Params: Params; + + /// Construct a new instance of our [`ExtrinsicParams`]. + fn new(client: &ClientState, params: Self::Params) -> Result; +} + +/// This trait is expected to be implemented for any [`ExtrinsicParams`], and +/// defines how to encode the "additional" and "extra" params. Both functions +/// are optional and will encode nothing by default. +pub trait ExtrinsicParamsEncoder: 'static { + /// This is expected to SCALE encode the transaction extension data to some + /// buffer that has been provided. This data is attached to the transaction + /// and also (by default) attached to the signer payload which is signed to + /// provide a signature for the transaction. + /// + /// If [`ExtrinsicParamsEncoder::encode_signer_payload_value_to`] is implemented, + /// then that will be used instead when generating a signer payload. Useful for + /// eg the `VerifySignature` extension, which is send with the transaction but + /// is not a part of the signer payload. + fn encode_value_to(&self, _v: &mut Vec) {} + + /// See [`ExtrinsicParamsEncoder::encode_value_to`]. This defaults to calling that + /// method, but if implemented will dictate what is encoded to the signer payload. + fn encode_signer_payload_value_to(&self, v: &mut Vec) { + self.encode_value_to(v); + } + + /// This is expected to SCALE encode the "implicit" (formally "additional") + /// parameters to some buffer that has been provided. These parameters are + /// _not_ sent along with the transaction, but are taken into account when + /// signing it, meaning the client and node must agree on their values. + fn encode_implicit_to(&self, _v: &mut Vec) {} + + /// Set the signature. This happens after we have constructed the extrinsic params, + /// and so is defined here rather than on the params, below. We need to use `&dyn Any` + /// to keep this trait object safe, but can downcast in the impls. + /// + /// # Panics + /// + /// Implementations of this will likely try to downcast the provided `account_id` + /// and `signature` into `T::AccountId` and `T::Signature` (where `T: Config`), and are + /// free to panic if this downcasting does not succeed. + /// + /// In typical usage, this is not a problem, since this method is only called internally + /// and provided values which line up with the relevant `Config`. In theory though, this + /// method can be called manually with any types, hence this warning. + fn inject_signature(&mut self, _account_id: &dyn Any, _signature: &dyn Any) {} +} + +/// The parameters (ie [`ExtrinsicParams::Params`]) can also have data injected into them, +/// allowing Subxt to retrieve data from the chain and amend the parameters with it when +/// online. +pub trait Params { + /// Set the account nonce. + fn inject_account_nonce(&mut self, _nonce: u64) {} + /// Set the current block. + fn inject_block(&mut self, _number: u64, _hash: HashFor) {} +} + +impl Params for () {} + +macro_rules! impl_tuples { + ($($ident:ident $index:tt),+) => { + impl ),+> Params for ($($ident,)+){ + fn inject_account_nonce(&mut self, nonce: u64) { + $(self.$index.inject_account_nonce(nonce);)+ + } + + fn inject_block(&mut self, number: u64, hash: HashFor) { + $(self.$index.inject_block(number, hash);)+ + } + } + } +} + +#[rustfmt::skip] +const _: () = { + impl_tuples!(A 0); + impl_tuples!(A 0, B 1); + impl_tuples!(A 0, B 1, C 2); + impl_tuples!(A 0, B 1, C 2, D 3); + impl_tuples!(A 0, B 1, C 2, D 3, E 4); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19, U 20); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19, U 20, V 21); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19, U 20, V 21, W 22); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19, U 20, V 21, W 22, X 23); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19, U 20, V 21, W 22, X 23, Y 24); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19, U 20, V 21, W 22, X 23, Y 24, Z 25); +}; diff --git a/vendor/pezkuwi-subxt/core/src/config/mod.rs b/vendor/pezkuwi-subxt/core/src/config/mod.rs new file mode 100644 index 00000000..523f5608 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/config/mod.rs @@ -0,0 +1,130 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module provides a [`Config`] type, which is used to define various +//! types that are important in order to speak to a particular chain. +//! [`BizinikiwConfig`] provides a default set of these types suitable for the +//! default Bizinikiwi node implementation, and [`PezkuwiConfig`] for a +//! Pezkuwi node. + +mod default_extrinsic_params; +mod extrinsic_params; + +pub mod pezkuwi; +pub mod bizinikiwi; +pub mod transaction_extensions; + +use codec::{Decode, Encode}; +use core::fmt::Debug; +use pezkuwi_subxt_metadata::Metadata; +use scale_decode::DecodeAsType; +use scale_encode::EncodeAsType; +use serde::{Serialize, de::DeserializeOwned}; + +pub use default_extrinsic_params::{DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder}; +pub use extrinsic_params::{ExtrinsicParams, ExtrinsicParamsEncoder}; +pub use pezkuwi::{PezkuwiConfig, PezkuwiExtrinsicParams, PezkuwiExtrinsicParamsBuilder}; +pub use bizinikiwi::{BizinikiwConfig, BizinikiwiExtrinsicParams, BizinikiwiExtrinsicParamsBuilder}; +pub use transaction_extensions::TransactionExtension; + +/// Runtime types. +// Note: the `Send + Sync + 'static` bound isn't strictly required, but currently deriving +// TypeInfo automatically applies a 'static bound to all generic types (including this one), +// And we want the compiler to infer `Send` and `Sync` OK for things which have `T: Config` +// rather than having to `unsafe impl` them ourselves. +pub trait Config: Sized + Send + Sync + 'static { + /// The account ID type. + type AccountId: Debug + Clone + Encode + Decode + Serialize + Send; + + /// The address type. + type Address: Debug + Encode + From; + + /// The signature type. + type Signature: Debug + Clone + Encode + Decode + Send; + + /// The hashing system (algorithm) being used in the runtime (e.g. Blake2). + type Hasher: Debug + Clone + Copy + Hasher + Send + Sync; + + /// The block header. + type Header: Debug + Header + Sync + Send + DeserializeOwned + Clone; + + /// This type defines the extrinsic extra and additional parameters. + type ExtrinsicParams: ExtrinsicParams; + + /// This is used to identify an asset in the `ChargeAssetTxPayment` signed extension. + type AssetId: Debug + Clone + Encode + DecodeAsType + EncodeAsType + Send; +} + +/// Given some [`Config`], this returns the type of hash used. +pub type HashFor = <::Hasher as Hasher>::Output; + +/// given some [`Config`], this return the other params needed for its `ExtrinsicParams`. +pub type ParamsFor = <::ExtrinsicParams as ExtrinsicParams>::Params; + +/// Block hashes must conform to a bunch of things to be used in Subxt. +pub trait Hash: + Debug + + Copy + + Send + + Sync + + Decode + + AsRef<[u8]> + + Serialize + + DeserializeOwned + + Encode + + PartialEq + + Eq + + core::hash::Hash +{ +} +impl Hash for T where + T: Debug + + Copy + + Send + + Sync + + Decode + + AsRef<[u8]> + + Serialize + + DeserializeOwned + + Encode + + PartialEq + + Eq + + core::hash::Hash +{ +} + +/// This represents the hasher used by a node to hash things like block headers +/// and extrinsics. +pub trait Hasher { + /// The type given back from the hash operation + type Output: Hash; + + /// Construct a new hasher. + fn new(metadata: &Metadata) -> Self; + + /// Hash some bytes to the given output type. + fn hash(&self, s: &[u8]) -> Self::Output; + + /// Hash some SCALE encodable type to the given output type. + fn hash_of(&self, s: &S) -> Self::Output { + let out = s.encode(); + self.hash(&out) + } +} + +/// This represents the block header type used by a node. +pub trait Header: Sized + Encode + Decode { + /// The block number type for this header. + type Number: Into; + /// The hasher used to hash this header. + type Hasher: Hasher; + + /// Return the block number of this header. + fn number(&self) -> Self::Number; + + /// Hash this header. + fn hash_with(&self, hasher: Self::Hasher) -> ::Output { + hasher.hash_of(self) + } +} diff --git a/vendor/pezkuwi-subxt/core/src/config/pezkuwi.rs b/vendor/pezkuwi-subxt/core/src/config/pezkuwi.rs new file mode 100644 index 00000000..845c9f01 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/config/pezkuwi.rs @@ -0,0 +1,41 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Pezkuwi specific configuration + +use super::{Config, DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder}; + +use crate::config::BizinikiwConfig; +pub use crate::utils::{AccountId32, MultiAddress, MultiSignature}; +pub use primitive_types::{H256, U256}; + +/// Default set of commonly used types by Pezkuwi nodes. +// Note: The trait implementations exist just to make life easier, +// but shouldn't strictly be necessary since users can't instantiate this type. +#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] +pub enum PezkuwiConfig {} + +impl Config for PezkuwiConfig { + type AccountId = ::AccountId; + type Signature = ::Signature; + type Hasher = ::Hasher; + type Header = ::Header; + type AssetId = ::AssetId; + + // Address on Pezkuwi has no account index, whereas it's u32 on + // the default bizinikiwi dev node. + type Address = MultiAddress; + + // These are the same as the default bizinikiwi node, but redefined + // because we need to pass the PezkuwiConfig trait as a param. + type ExtrinsicParams = PezkuwiExtrinsicParams; +} + +/// A struct representing the signed extra and additional parameters required +/// to construct a transaction for a pezkuwi node. +pub type PezkuwiExtrinsicParams = DefaultExtrinsicParams; + +/// A builder which leads to [`PezkuwiExtrinsicParams`] being constructed. +/// This is what you provide to methods like `sign_and_submit()`. +pub type PezkuwiExtrinsicParamsBuilder = DefaultExtrinsicParamsBuilder; diff --git a/vendor/pezkuwi-subxt/core/src/config/transaction_extensions.rs b/vendor/pezkuwi-subxt/core/src/config/transaction_extensions.rs new file mode 100644 index 00000000..58e334e1 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/config/transaction_extensions.rs @@ -0,0 +1,679 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module contains implementations for common transaction extensions, each +//! of which implements [`TransactionExtension`], and can be used in conjunction with +//! [`AnyOf`] to configure the set of transaction extensions which are known about +//! when interacting with a chain. + +use super::extrinsic_params::ExtrinsicParams; +use crate::{ + client::ClientState, + config::{Config, ExtrinsicParamsEncoder, HashFor}, + error::ExtrinsicParamsError, + utils::{Era, Static}, +}; +use alloc::{borrow::ToOwned, boxed::Box, vec::Vec}; +use codec::{Compact, Encode}; +use core::{any::Any, fmt::Debug}; +use derive_where::derive_where; +use hashbrown::HashMap; +use scale_decode::DecodeAsType; +use scale_info::PortableRegistry; + +// Re-export this here; it's a bit generically named to be re-exported from ::config. +pub use super::extrinsic_params::Params; + +/// A single [`TransactionExtension`] has a unique name, but is otherwise the +/// same as [`ExtrinsicParams`] in describing how to encode the extra and +/// additional data. +pub trait TransactionExtension: ExtrinsicParams { + /// The type representing the `extra` / value bytes of a transaction extension. + /// Decoding from this type should be symmetrical to the respective + /// `ExtrinsicParamsEncoder::encode_value_to()` implementation of this transaction extension. + type Decoded: DecodeAsType; + + /// This should return true if the transaction extension matches the details given. + /// Often, this will involve just checking that the identifier given matches that of the + /// extension in question. + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool; +} + +/// The [`VerifySignature`] extension. For V5 General transactions, this is how a signature +/// is provided. The signature is constructed by signing a payload which contains the +/// transaction call data as well as the encoded "additional" bytes for any extensions _after_ +/// this one in the list. +pub struct VerifySignature(VerifySignatureDetails); + +impl ExtrinsicParams for VerifySignature { + type Params = (); + + fn new(_client: &ClientState, _params: Self::Params) -> Result { + Ok(VerifySignature(VerifySignatureDetails::Disabled)) + } +} + +impl ExtrinsicParamsEncoder for VerifySignature { + fn encode_value_to(&self, v: &mut Vec) { + self.0.encode_to(v); + } + fn encode_signer_payload_value_to(&self, v: &mut Vec) { + // This extension is never encoded to the signer payload, and extensions + // prior to this are ignored when creating said payload, so clear anything + // we've seen so far. + v.clear(); + } + fn encode_implicit_to(&self, v: &mut Vec) { + // We only use the "implicit" data for extensions _after_ this one + // in the pipeline to form the signer payload. Thus, clear anything + // we've seen so far. + v.clear(); + } + + fn inject_signature(&mut self, account: &dyn Any, signature: &dyn Any) { + // Downcast refs back to concrete types (we use `&dyn Any`` so that the trait remains object + // safe) + let account = account + .downcast_ref::() + .expect("A T::AccountId should have been provided") + .clone(); + let signature = signature + .downcast_ref::() + .expect("A T::Signature should have been provided") + .clone(); + + // The signature is not set through params, only here, once given by a user: + self.0 = VerifySignatureDetails::Signed { signature, account } + } +} + +impl TransactionExtension for VerifySignature { + type Decoded = Static>; + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "VerifySignature" + } +} + +/// This allows a signature to be provided to the [`VerifySignature`] transaction extension. +// Dev note: this must encode identically to https://github.com/pezkuwichain/pezkuwi-sdk/blob/fd72d58313c297a10600037ce1bb88ec958d722e/bizinikiwi/frame/verify-signature/src/extension.rs#L43 +#[derive(codec::Encode, codec::Decode)] +pub enum VerifySignatureDetails { + /// A signature has been provided. + Signed { + /// The signature. + signature: T::Signature, + /// The account that generated the signature. + account: T::AccountId, + }, + /// No signature was provided. + Disabled, +} + +/// The [`CheckMetadataHash`] transaction extension. +pub struct CheckMetadataHash { + // Eventually we might provide or calculate the metadata hash here, + // but for now we never provide a hash and so this is empty. +} + +impl ExtrinsicParams for CheckMetadataHash { + type Params = (); + + fn new(_client: &ClientState, _params: Self::Params) -> Result { + Ok(CheckMetadataHash {}) + } +} + +impl ExtrinsicParamsEncoder for CheckMetadataHash { + fn encode_value_to(&self, v: &mut Vec) { + // A single 0 byte in the TX payload indicates that the chain should + // _not_ expect any metadata hash to exist in the signer payload. + 0u8.encode_to(v); + } + fn encode_implicit_to(&self, v: &mut Vec) { + // We provide no metadata hash in the signer payload to align with the above. + None::<()>.encode_to(v); + } +} + +impl TransactionExtension for CheckMetadataHash { + type Decoded = CheckMetadataHashMode; + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "CheckMetadataHash" + } +} + +/// Is metadata checking enabled or disabled? +// Dev note: The "Disabled" and "Enabled" variant names match those that the +// transaction extension will be encoded with, in order that DecodeAsType will work +// properly. +#[derive(Copy, Clone, Debug, DecodeAsType)] +pub enum CheckMetadataHashMode { + /// No hash was provided in the signer payload. + Disabled, + /// A hash was provided in the signer payload. + Enabled, +} + +impl CheckMetadataHashMode { + /// Is metadata checking enabled or disabled for this transaction? + pub fn is_enabled(&self) -> bool { + match self { + CheckMetadataHashMode::Disabled => false, + CheckMetadataHashMode::Enabled => true, + } + } +} + +/// The [`CheckSpecVersion`] transaction extension. +pub struct CheckSpecVersion(u32); + +impl ExtrinsicParams for CheckSpecVersion { + type Params = (); + + fn new(client: &ClientState, _params: Self::Params) -> Result { + Ok(CheckSpecVersion(client.runtime_version.spec_version)) + } +} + +impl ExtrinsicParamsEncoder for CheckSpecVersion { + fn encode_implicit_to(&self, v: &mut Vec) { + self.0.encode_to(v); + } +} + +impl TransactionExtension for CheckSpecVersion { + type Decoded = (); + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "CheckSpecVersion" + } +} + +/// The [`CheckNonce`] transaction extension. +pub struct CheckNonce(u64); + +impl ExtrinsicParams for CheckNonce { + type Params = CheckNonceParams; + + fn new(_client: &ClientState, params: Self::Params) -> Result { + Ok(CheckNonce(params.0.unwrap_or(0))) + } +} + +impl ExtrinsicParamsEncoder for CheckNonce { + fn encode_value_to(&self, v: &mut Vec) { + Compact(self.0).encode_to(v); + } +} + +impl TransactionExtension for CheckNonce { + type Decoded = u64; + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "CheckNonce" + } +} + +/// Configure the nonce used. +#[derive(Debug, Clone, Default)] +pub struct CheckNonceParams(Option); + +impl CheckNonceParams { + /// Retrieve the nonce from the chain and use that. + pub fn from_chain() -> Self { + Self(None) + } + /// Manually set an account nonce to use. + pub fn with_nonce(nonce: u64) -> Self { + Self(Some(nonce)) + } +} + +impl Params for CheckNonceParams { + fn inject_account_nonce(&mut self, nonce: u64) { + if self.0.is_none() { + self.0 = Some(nonce) + } + } +} + +/// The [`CheckTxVersion`] transaction extension. +pub struct CheckTxVersion(u32); + +impl ExtrinsicParams for CheckTxVersion { + type Params = (); + + fn new(client: &ClientState, _params: Self::Params) -> Result { + Ok(CheckTxVersion(client.runtime_version.transaction_version)) + } +} + +impl ExtrinsicParamsEncoder for CheckTxVersion { + fn encode_implicit_to(&self, v: &mut Vec) { + self.0.encode_to(v); + } +} + +impl TransactionExtension for CheckTxVersion { + type Decoded = (); + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "CheckTxVersion" + } +} + +/// The [`CheckGenesis`] transaction extension. +pub struct CheckGenesis(HashFor); + +impl ExtrinsicParams for CheckGenesis { + type Params = (); + + fn new(client: &ClientState, _params: Self::Params) -> Result { + Ok(CheckGenesis(client.genesis_hash)) + } +} + +impl ExtrinsicParamsEncoder for CheckGenesis { + fn encode_implicit_to(&self, v: &mut Vec) { + self.0.encode_to(v); + } +} + +impl TransactionExtension for CheckGenesis { + type Decoded = (); + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "CheckGenesis" + } +} + +/// The [`CheckMortality`] transaction extension. +pub struct CheckMortality { + params: CheckMortalityParamsInner, + genesis_hash: HashFor, +} + +impl ExtrinsicParams for CheckMortality { + type Params = CheckMortalityParams; + + fn new(client: &ClientState, params: Self::Params) -> Result { + // If a user has explicitly configured the transaction to be mortal for n blocks, but we get + // to this stage and no injected information was able to turn this into MortalFromBlock{..}, + // then we hit an error as we are unable to construct a mortal transaction here. + if matches!(¶ms.0, CheckMortalityParamsInner::MortalForBlocks(_)) { + return Err(ExtrinsicParamsError::custom( + "CheckMortality: We cannot construct an offline extrinsic with only the number of blocks it is mortal for. Use mortal_from_unchecked instead.", + )); + } + + Ok(CheckMortality { + // if nothing has been explicitly configured, we will have a mortal transaction + // valid for 32 blocks if block info is available. + params: params.0, + genesis_hash: client.genesis_hash, + }) + } +} + +impl ExtrinsicParamsEncoder for CheckMortality { + fn encode_value_to(&self, v: &mut Vec) { + match &self.params { + CheckMortalityParamsInner::MortalFromBlock { for_n_blocks, from_block_n, .. } => { + Era::mortal(*for_n_blocks, *from_block_n).encode_to(v); + }, + _ => { + // Note: if we see `CheckMortalityInner::MortalForBlocks`, then it means the user + // has configured a block to be mortal for N blocks, but the current block was + // never injected, so we don't know where to start from and default back to + // building an immortal tx. + Era::Immortal.encode_to(v); + }, + } + } + fn encode_implicit_to(&self, v: &mut Vec) { + match &self.params { + CheckMortalityParamsInner::MortalFromBlock { from_block_hash, .. } => { + from_block_hash.encode_to(v); + }, + _ => { + self.genesis_hash.encode_to(v); + }, + } + } +} + +impl TransactionExtension for CheckMortality { + type Decoded = Era; + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "CheckMortality" + } +} + +/// Parameters to configure the [`CheckMortality`] transaction extension. +pub struct CheckMortalityParams(CheckMortalityParamsInner); + +enum CheckMortalityParamsInner { + /// The transaction will be immortal. + Immortal, + /// The transaction is mortal for N blocks. This must be "upgraded" into + /// [`CheckMortalityParamsInner::MortalFromBlock`] to ultimately work. + MortalForBlocks(u64), + /// The transaction is mortal for N blocks, but if it cannot be "upgraded", + /// then it will be set to immortal instead. This is the default if unset. + MortalForBlocksOrImmortalIfNotPossible(u64), + /// The transaction is mortal and all of the relevant information is provided. + MortalFromBlock { for_n_blocks: u64, from_block_n: u64, from_block_hash: HashFor }, +} + +impl Default for CheckMortalityParams { + fn default() -> Self { + // default to being mortal for 32 blocks if possible, else immortal: + CheckMortalityParams(CheckMortalityParamsInner::MortalForBlocksOrImmortalIfNotPossible(32)) + } +} + +impl CheckMortalityParams { + /// Configure a transaction that will be mortal for the number of blocks given. + pub fn mortal(for_n_blocks: u64) -> Self { + Self(CheckMortalityParamsInner::MortalForBlocks(for_n_blocks)) + } + + /// Configure a transaction that will be mortal for the number of blocks given, + /// and from the block details provided. Prefer to use [`CheckMortalityParams::mortal()`] + /// where possible, which prevents the block number and hash from being misaligned. + pub fn mortal_from_unchecked( + for_n_blocks: u64, + from_block_n: u64, + from_block_hash: HashFor, + ) -> Self { + Self(CheckMortalityParamsInner::MortalFromBlock { + for_n_blocks, + from_block_n, + from_block_hash, + }) + } + /// An immortal transaction. + pub fn immortal() -> Self { + Self(CheckMortalityParamsInner::Immortal) + } +} + +impl Params for CheckMortalityParams { + fn inject_block(&mut self, from_block_n: u64, from_block_hash: HashFor) { + match &self.0 { + CheckMortalityParamsInner::MortalForBlocks(n) | + CheckMortalityParamsInner::MortalForBlocksOrImmortalIfNotPossible(n) => + self.0 = CheckMortalityParamsInner::MortalFromBlock { + for_n_blocks: *n, + from_block_n, + from_block_hash, + }, + _ => { + // Don't change anything if explicit Immortal or explicit block set. + }, + } + } +} + +/// The [`ChargeAssetTxPayment`] transaction extension. +#[derive(DecodeAsType)] +#[derive_where(Clone, Debug; T::AssetId)] +#[decode_as_type(trait_bounds = "T::AssetId: DecodeAsType")] +pub struct ChargeAssetTxPayment { + tip: Compact, + asset_id: Option, +} + +impl ChargeAssetTxPayment { + /// Tip to the extrinsic author in the native chain token. + pub fn tip(&self) -> u128 { + self.tip.0 + } + + /// Tip to the extrinsic author using the asset ID given. + pub fn asset_id(&self) -> Option<&T::AssetId> { + self.asset_id.as_ref() + } +} + +impl ExtrinsicParams for ChargeAssetTxPayment { + type Params = ChargeAssetTxPaymentParams; + + fn new(_client: &ClientState, params: Self::Params) -> Result { + Ok(ChargeAssetTxPayment { tip: Compact(params.tip), asset_id: params.asset_id }) + } +} + +impl ExtrinsicParamsEncoder for ChargeAssetTxPayment { + fn encode_value_to(&self, v: &mut Vec) { + (self.tip, &self.asset_id).encode_to(v); + } +} + +impl TransactionExtension for ChargeAssetTxPayment { + type Decoded = Self; + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "ChargeAssetTxPayment" + } +} + +/// Parameters to configure the [`ChargeAssetTxPayment`] transaction extension. +pub struct ChargeAssetTxPaymentParams { + tip: u128, + asset_id: Option, +} + +impl Default for ChargeAssetTxPaymentParams { + fn default() -> Self { + ChargeAssetTxPaymentParams { tip: Default::default(), asset_id: Default::default() } + } +} + +impl ChargeAssetTxPaymentParams { + /// Don't provide a tip to the extrinsic author. + pub fn no_tip() -> Self { + ChargeAssetTxPaymentParams { tip: 0, asset_id: None } + } + /// Tip the extrinsic author in the native chain token. + pub fn tip(tip: u128) -> Self { + ChargeAssetTxPaymentParams { tip, asset_id: None } + } + /// Tip the extrinsic author using the asset ID given. + pub fn tip_of(tip: u128, asset_id: T::AssetId) -> Self { + ChargeAssetTxPaymentParams { tip, asset_id: Some(asset_id) } + } +} + +impl Params for ChargeAssetTxPaymentParams {} + +/// The [`ChargeTransactionPayment`] transaction extension. +#[derive(Clone, Debug, DecodeAsType)] +pub struct ChargeTransactionPayment { + tip: Compact, +} + +impl ChargeTransactionPayment { + /// Tip to the extrinsic author in the native chain token. + pub fn tip(&self) -> u128 { + self.tip.0 + } +} + +impl ExtrinsicParams for ChargeTransactionPayment { + type Params = ChargeTransactionPaymentParams; + + fn new(_client: &ClientState, params: Self::Params) -> Result { + Ok(ChargeTransactionPayment { tip: Compact(params.tip) }) + } +} + +impl ExtrinsicParamsEncoder for ChargeTransactionPayment { + fn encode_value_to(&self, v: &mut Vec) { + self.tip.encode_to(v); + } +} + +impl TransactionExtension for ChargeTransactionPayment { + type Decoded = Self; + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "ChargeTransactionPayment" + } +} + +/// Parameters to configure the [`ChargeTransactionPayment`] transaction extension. +#[derive(Default)] +pub struct ChargeTransactionPaymentParams { + tip: u128, +} + +impl ChargeTransactionPaymentParams { + /// Don't provide a tip to the extrinsic author. + pub fn no_tip() -> Self { + ChargeTransactionPaymentParams { tip: 0 } + } + /// Tip the extrinsic author in the native chain token. + pub fn tip(tip: u128) -> Self { + ChargeTransactionPaymentParams { tip } + } +} + +impl Params for ChargeTransactionPaymentParams {} + +/// This accepts a tuple of [`TransactionExtension`]s, and will dynamically make use of whichever +/// ones are actually required for the chain in the correct order, ignoring the rest. This +/// is a sensible default, and allows for a single configuration to work across multiple chains. +pub struct AnyOf { + params: Vec>, + _marker: core::marker::PhantomData<(T, Params)>, +} + +macro_rules! impl_tuples { + ($($ident:ident $index:tt),+) => { + // We do some magic when the tuple is wrapped in AnyOf. We + // look at the metadata, and use this to select and make use of only the extensions + // that we actually need for the chain we're dealing with. + impl ExtrinsicParams for AnyOf + where + T: Config, + $($ident: TransactionExtension,)+ + { + type Params = ($($ident::Params,)+); + + fn new( + client: &ClientState, + params: Self::Params, + ) -> Result { + let metadata = &client.metadata; + let types = metadata.types(); + + // For each transaction extension in the tuple, find the matching index in the metadata, if + // there is one, and add it to a map with that index as the key. + let mut exts_by_index = HashMap::new(); + $({ + for (idx, e) in metadata.extrinsic().transaction_extensions_to_use_for_encoding().enumerate() { + // Skip over any exts that have a match already: + if exts_by_index.contains_key(&idx) { + continue + } + // Break and record as soon as we find a match: + if $ident::matches(e.identifier(), e.extra_ty(), types) { + let ext = $ident::new(client, params.$index)?; + let boxed_ext: Box = Box::new(ext); + exts_by_index.insert(idx, boxed_ext); + break + } + } + })+ + + // Next, turn these into an ordered vec, erroring if we haven't matched on any exts yet. + let mut params = Vec::new(); + for (idx, e) in metadata.extrinsic().transaction_extensions_to_use_for_encoding().enumerate() { + let Some(ext) = exts_by_index.remove(&idx) else { + if is_type_empty(e.extra_ty(), types) { + continue + } else { + return Err(ExtrinsicParamsError::UnknownTransactionExtension(e.identifier().to_owned())); + } + }; + params.push(ext); + } + + Ok(AnyOf { + params, + _marker: core::marker::PhantomData + }) + } + } + + impl ExtrinsicParamsEncoder for AnyOf + where + T: Config, + $($ident: TransactionExtension,)+ + { + fn encode_value_to(&self, v: &mut Vec) { + for ext in &self.params { + ext.encode_value_to(v); + } + } + fn encode_signer_payload_value_to(&self, v: &mut Vec) { + for ext in &self.params { + ext.encode_signer_payload_value_to(v); + } + } + fn encode_implicit_to(&self, v: &mut Vec) { + for ext in &self.params { + ext.encode_implicit_to(v); + } + } + fn inject_signature(&mut self, account_id: &dyn Any, signature: &dyn Any) { + for ext in &mut self.params { + ext.inject_signature(account_id, signature); + } + } + } + } +} + +#[rustfmt::skip] +const _: () = { + impl_tuples!(A 0); + impl_tuples!(A 0, B 1); + impl_tuples!(A 0, B 1, C 2); + impl_tuples!(A 0, B 1, C 2, D 3); + impl_tuples!(A 0, B 1, C 2, D 3, E 4); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, U 19); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, U 19, V 20); +}; + +/// Checks to see whether the type being given is empty, ie would require +/// 0 bytes to encode. +fn is_type_empty(type_id: u32, types: &scale_info::PortableRegistry) -> bool { + let Some(ty) = types.resolve(type_id) else { + // Can't resolve; type may not be empty. Not expected to hit this. + return false; + }; + + use scale_info::TypeDef; + match &ty.type_def { + TypeDef::Composite(c) => c.fields.iter().all(|f| is_type_empty(f.ty.id, types)), + TypeDef::Array(a) => a.len == 0 || is_type_empty(a.type_param.id, types), + TypeDef::Tuple(t) => t.fields.iter().all(|f| is_type_empty(f.id, types)), + // Explicitly list these in case any additions are made in the future. + TypeDef::BitSequence(_) | + TypeDef::Variant(_) | + TypeDef::Sequence(_) | + TypeDef::Compact(_) | + TypeDef::Primitive(_) => false, + } +} diff --git a/vendor/pezkuwi-subxt/core/src/constants/address.rs b/vendor/pezkuwi-subxt/core/src/constants/address.rs new file mode 100644 index 00000000..57da48c4 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/constants/address.rs @@ -0,0 +1,137 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Construct addresses to access constants with. + +use alloc::{borrow::Cow, string::String}; +use derive_where::derive_where; +use scale_decode::DecodeAsType; + +/// This represents a constant address. Anything implementing this trait +/// can be used to fetch constants. +pub trait Address { + /// The target type of the value that lives at this address. + type Target: DecodeAsType; + + /// The name of the pallet that the constant lives under. + fn pallet_name(&self) -> &str; + + /// The name of the constant in a given pallet. + fn constant_name(&self) -> &str; + + /// An optional hash which, if present, will be checked against + /// the node metadata to confirm that the return type matches what + /// we are expecting. + fn validation_hash(&self) -> Option<[u8; 32]> { + None + } +} + +// Any reference to an address is a valid address. +impl Address for &'_ A { + type Target = A::Target; + + fn pallet_name(&self) -> &str { + A::pallet_name(*self) + } + + fn constant_name(&self) -> &str { + A::constant_name(*self) + } + + fn validation_hash(&self) -> Option<[u8; 32]> { + A::validation_hash(*self) + } +} + +// (str, str) and similar are valid addresses. +impl, B: AsRef> Address for (A, B) { + type Target = scale_value::Value; + + fn pallet_name(&self) -> &str { + self.0.as_ref() + } + + fn constant_name(&self) -> &str { + self.1.as_ref() + } + + fn validation_hash(&self) -> Option<[u8; 32]> { + None + } +} + +/// This represents the address of a constant. +#[derive_where(Clone, Debug, PartialOrd, Ord, PartialEq, Eq)] +pub struct StaticAddress { + pallet_name: Cow<'static, str>, + constant_name: Cow<'static, str>, + constant_hash: Option<[u8; 32]>, + _marker: core::marker::PhantomData, +} + +/// A dynamic lookup address to access a constant. +pub type DynamicAddress = StaticAddress; + +impl StaticAddress { + /// Create a new [`StaticAddress`] to use to look up a constant. + pub fn new(pallet_name: impl Into, constant_name: impl Into) -> Self { + Self { + pallet_name: Cow::Owned(pallet_name.into()), + constant_name: Cow::Owned(constant_name.into()), + constant_hash: None, + _marker: core::marker::PhantomData, + } + } + + /// Create a new [`StaticAddress`] that will be validated + /// against node metadata using the hash given. + #[doc(hidden)] + pub fn new_static( + pallet_name: &'static str, + constant_name: &'static str, + hash: [u8; 32], + ) -> Self { + Self { + pallet_name: Cow::Borrowed(pallet_name), + constant_name: Cow::Borrowed(constant_name), + constant_hash: Some(hash), + _marker: core::marker::PhantomData, + } + } + + /// Do not validate this constant prior to accessing it. + pub fn unvalidated(self) -> Self { + Self { + pallet_name: self.pallet_name, + constant_name: self.constant_name, + constant_hash: None, + _marker: self._marker, + } + } +} + +impl Address for StaticAddress { + type Target = ReturnTy; + + fn pallet_name(&self) -> &str { + &self.pallet_name + } + + fn constant_name(&self) -> &str { + &self.constant_name + } + + fn validation_hash(&self) -> Option<[u8; 32]> { + self.constant_hash + } +} + +/// Construct a new dynamic constant lookup. +pub fn dynamic( + pallet_name: impl Into, + constant_name: impl Into, +) -> DynamicAddress { + DynamicAddress::new(pallet_name, constant_name) +} diff --git a/vendor/pezkuwi-subxt/core/src/constants/mod.rs b/vendor/pezkuwi-subxt/core/src/constants/mod.rs new file mode 100644 index 00000000..1b97b059 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/constants/mod.rs @@ -0,0 +1,106 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Access constants from metadata. +//! +//! Use [`get`] to retrieve a constant from some metadata, or [`validate`] to check that a static +//! constant address lines up with the value seen in the metadata. +//! +//! # Example +//! +//! ```rust +//! use pezkuwi_subxt_macro::subxt; +//! use pezkuwi_subxt_core::constants; +//! use pezkuwi_subxt_core::Metadata; +//! +//! // If we generate types without `subxt`, we need to point to `::pezkuwi_subxt_core`: +//! #[subxt( +//! crate = "::pezkuwi_subxt_core", +//! runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale", +//! )] +//! pub mod pezkuwi {} +//! +//! // Some metadata we'd like to access constants in: +//! let metadata_bytes = include_bytes!("../../../artifacts/pezkuwi_metadata_small.scale"); +//! let metadata = Metadata::decode_from(&metadata_bytes[..]).unwrap(); +//! +//! // We can use a static address to obtain some constant: +//! let address = pezkuwi::constants().balances().existential_deposit(); +//! +//! // This validates that the address given is in line with the metadata +//! // we're trying to access the constant in: +//! constants::validate(&address, &metadata).expect("is valid"); +//! +//! // This acquires the constant (and internally also validates it): +//! let ed = constants::get(&address, &metadata).expect("can decode constant"); +//! +//! assert_eq!(ed, 33_333_333); +//! ``` + +pub mod address; + +use crate::{Metadata, error::ConstantError}; +use address::Address; +use alloc::{borrow::ToOwned, string::ToString, vec::Vec}; +use frame_decode::constants::ConstantTypeInfo; +use scale_decode::IntoVisitor; + +/// When the provided `address` is statically generated via the `#[subxt]` macro, this validates +/// that the shape of the constant value is the same as the shape expected by the static address. +/// +/// When the provided `address` is dynamic (and thus does not come with any expectation of the +/// shape of the constant value), this just returns `Ok(())` +pub fn validate(address: Addr, metadata: &Metadata) -> Result<(), ConstantError> { + if let Some(actual_hash) = address.validation_hash() { + let expected_hash = metadata + .pallet_by_name(address.pallet_name()) + .ok_or_else(|| ConstantError::PalletNameNotFound(address.pallet_name().to_string()))? + .constant_hash(address.constant_name()) + .ok_or_else(|| ConstantError::ConstantNameNotFound { + pallet_name: address.pallet_name().to_string(), + constant_name: address.constant_name().to_owned(), + })?; + if actual_hash != expected_hash { + return Err(ConstantError::IncompatibleCodegen); + } + } + Ok(()) +} + +/// Fetch a constant out of the metadata given a constant address. If the `address` has been +/// statically generated, this will validate that the constant shape is as expected, too. +pub fn get( + address: Addr, + metadata: &Metadata, +) -> Result { + // 1. Validate constant shape if hash given: + validate(&address, metadata)?; + + // 2. Attempt to decode the constant into the type given: + let constant = frame_decode::constants::decode_constant( + address.pallet_name(), + address.constant_name(), + metadata, + metadata.types(), + Addr::Target::into_visitor(), + ) + .map_err(ConstantError::CouldNotDecodeConstant)?; + + Ok(constant) +} + +/// Access the bytes of a constant by the address it is registered under. +pub fn get_bytes( + address: Addr, + metadata: &Metadata, +) -> Result, ConstantError> { + // 1. Validate custom value shape if hash given: + validate(&address, metadata)?; + + // 2. Return the underlying bytes: + let constant = metadata + .constant_info(address.pallet_name(), address.constant_name()) + .map_err(|e| ConstantError::ConstantInfoError(e.into_owned()))?; + Ok(constant.bytes.to_vec()) +} diff --git a/vendor/pezkuwi-subxt/core/src/custom_values/address.rs b/vendor/pezkuwi-subxt/core/src/custom_values/address.rs new file mode 100644 index 00000000..7ae889d0 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/custom_values/address.rs @@ -0,0 +1,104 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Construct addresses to access custom values with. + +use alloc::{borrow::Cow, string::String}; +use derive_where::derive_where; +use scale_decode::DecodeAsType; + +/// Use this with [`Address::IsDecodable`]. +pub use crate::utils::{Maybe, No, NoMaybe}; + +/// This represents the address of a custom value in the metadata. +/// Anything that implements it can be used to fetch custom values from the metadata. +/// The trait is implemented by [`str`] for dynamic lookup and [`StaticAddress`] for static queries. +pub trait Address { + /// The type of the custom value. + type Target: DecodeAsType; + /// Should be set to `Yes` for Dynamic values and static values that have a valid type. + /// Should be `No` for custom values, that have an invalid type id. + type IsDecodable: NoMaybe; + + /// the name (key) by which the custom value can be accessed in the metadata. + fn name(&self) -> &str; + + /// An optional hash which, if present, can be checked against node metadata. + fn validation_hash(&self) -> Option<[u8; 32]> { + None + } +} + +// Any reference to an address is a valid address +impl Address for &'_ A { + type Target = A::Target; + type IsDecodable = A::IsDecodable; + + fn name(&self) -> &str { + A::name(*self) + } + + fn validation_hash(&self) -> Option<[u8; 32]> { + A::validation_hash(*self) + } +} + +// Support plain strings for looking up custom values. +impl Address for str { + type Target = scale_value::Value; + type IsDecodable = Maybe; + + fn name(&self) -> &str { + self + } +} + +/// A static address to a custom value. +#[derive_where(Clone, Debug, PartialOrd, Ord, PartialEq, Eq)] +pub struct StaticAddress { + name: Cow<'static, str>, + hash: Option<[u8; 32]>, + marker: core::marker::PhantomData<(ReturnTy, IsDecodable)>, +} + +/// A dynamic address to a custom value. +pub type DynamicAddress = StaticAddress; + +impl StaticAddress { + #[doc(hidden)] + /// Creates a new StaticAddress. + pub fn new_static(name: &'static str, hash: [u8; 32]) -> Self { + Self { name: Cow::Borrowed(name), hash: Some(hash), marker: core::marker::PhantomData } + } + + /// Create a new [`StaticAddress`] + pub fn new(name: impl Into) -> Self { + Self { name: name.into().into(), hash: None, marker: core::marker::PhantomData } + } + + /// Do not validate this custom value prior to accessing it. + pub fn unvalidated(self) -> Self { + Self { name: self.name, hash: None, marker: self.marker } + } +} + +impl Address for StaticAddress { + type Target = Target; + type IsDecodable = IsDecodable; + + fn name(&self) -> &str { + &self.name + } + + fn validation_hash(&self) -> Option<[u8; 32]> { + self.hash + } +} + +/// Construct a new dynamic custom value lookup. +pub fn dynamic( + custom_value_name: impl Into, +) -> DynamicAddress { + DynamicAddress::new(custom_value_name) +} diff --git a/vendor/pezkuwi-subxt/core/src/custom_values/mod.rs b/vendor/pezkuwi-subxt/core/src/custom_values/mod.rs new file mode 100644 index 00000000..5d378423 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/custom_values/mod.rs @@ -0,0 +1,164 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Access custom values from metadata. +//! +//! Use [`get`] to retrieve a custom value from some metadata, or [`validate`] to check that a +//! static custom value address lines up with the value seen in the metadata. +//! +//! # Example +//! +//! ```rust +//! use pezkuwi_subxt_macro::subxt; +//! use pezkuwi_subxt_core::custom_values; +//! use pezkuwi_subxt_core::Metadata; +//! +//! // If we generate types without `subxt`, we need to point to `::pezkuwi_subxt_core`: +//! #[subxt( +//! crate = "::pezkuwi_subxt_core", +//! runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale", +//! )] +//! pub mod pezkuwi {} +//! +//! // Some metadata we'd like to access custom values in: +//! let metadata_bytes = include_bytes!("../../../artifacts/pezkuwi_metadata_small.scale"); +//! let metadata = Metadata::decode_from(&metadata_bytes[..]).unwrap(); +//! +//! // At the moment, we don't expect to see any custom values in the metadata +//! // for Pezkuwi, so this will return an error: +//! let err = custom_values::get("Foo", &metadata); +//! ``` + +pub mod address; + +use crate::{Metadata, error::CustomValueError, utils::Maybe}; +use address::Address; +use alloc::vec::Vec; +use frame_decode::custom_values::CustomValueTypeInfo; +use scale_decode::IntoVisitor; + +/// Run the validation logic against some custom value address you'd like to access. Returns +/// `Ok(())` if the address is valid (or if it's not possible to check since the address has no +/// validation hash). Returns an error if the address was not valid (wrong name, type or raw bytes) +pub fn validate(address: Addr, metadata: &Metadata) -> Result<(), CustomValueError> { + if let Some(actual_hash) = address.validation_hash() { + let custom = metadata.custom(); + let custom_value = custom + .get(address.name()) + .ok_or_else(|| CustomValueError::NotFound(address.name().into()))?; + let expected_hash = custom_value.hash(); + if actual_hash != expected_hash { + return Err(CustomValueError::IncompatibleCodegen); + } + } + Ok(()) +} + +/// Access a custom value by the address it is registered under. This can be just a [str] to get +/// back a dynamic value, or a static address from the generated static interface to get a value of +/// a static type returned. +pub fn get>( + address: Addr, + metadata: &Metadata, +) -> Result { + // 1. Validate custom value shape if hash given: + validate(&address, metadata)?; + + // 2. Attempt to decode custom value: + let value = frame_decode::custom_values::decode_custom_value( + address.name(), + metadata, + metadata.types(), + Addr::Target::into_visitor(), + ) + .map_err(CustomValueError::CouldNotDecodeCustomValue)?; + + Ok(value) +} + +/// Access the bytes of a custom value by the address it is registered under. +pub fn get_bytes( + address: Addr, + metadata: &Metadata, +) -> Result, CustomValueError> { + // 1. Validate custom value shape if hash given: + validate(&address, metadata)?; + + // 2. Return the underlying bytes: + let custom_value = metadata + .custom_value_info(address.name()) + .map_err(|e| CustomValueError::NotFound(e.not_found))?; + Ok(custom_value.bytes.to_vec()) +} + +#[cfg(test)] +mod tests { + use super::*; + + use alloc::collections::BTreeMap; + use codec::Encode; + use scale_decode::DecodeAsType; + use scale_info::{TypeInfo, form::PortableForm}; + + use alloc::{borrow::ToOwned, string::String, vec}; + + use crate::custom_values; + + #[derive(Debug, Clone, PartialEq, Eq, Encode, TypeInfo, DecodeAsType)] + pub struct Person { + age: u16, + name: String, + } + + fn mock_metadata() -> Metadata { + let person_ty = scale_info::MetaType::new::(); + let unit = scale_info::MetaType::new::<()>(); + let mut types = scale_info::Registry::new(); + let person_ty_id = types.register_type(&person_ty); + let unit_id = types.register_type(&unit); + let types: scale_info::PortableRegistry = types.into(); + + let person = Person { age: 42, name: "Neo".into() }; + + let person_value_metadata: frame_metadata::v15::CustomValueMetadata = + frame_metadata::v15::CustomValueMetadata { ty: person_ty_id, value: person.encode() }; + + let frame_metadata = frame_metadata::v15::RuntimeMetadataV15 { + types, + pallets: vec![], + extrinsic: frame_metadata::v15::ExtrinsicMetadata { + version: 0, + address_ty: unit_id, + call_ty: unit_id, + signature_ty: unit_id, + extra_ty: unit_id, + signed_extensions: vec![], + }, + ty: unit_id, + apis: vec![], + outer_enums: frame_metadata::v15::OuterEnums { + call_enum_ty: unit_id, + event_enum_ty: unit_id, + error_enum_ty: unit_id, + }, + custom: frame_metadata::v15::CustomMetadata { + map: BTreeMap::from_iter([("Mr. Robot".to_owned(), person_value_metadata)]), + }, + }; + + let metadata: pezkuwi_subxt_metadata::Metadata = frame_metadata.try_into().unwrap(); + metadata + } + + #[test] + fn test_decoding() { + let metadata = mock_metadata(); + + assert!(custom_values::get("Invalid Address", &metadata).is_err()); + + let person_addr = custom_values::address::dynamic::("Mr. Robot"); + let person = custom_values::get(&person_addr, &metadata).unwrap(); + assert_eq!(person, Person { age: 42, name: "Neo".into() }) + } +} diff --git a/vendor/pezkuwi-subxt/core/src/dynamic.rs b/vendor/pezkuwi-subxt/core/src/dynamic.rs new file mode 100644 index 00000000..27819620 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/dynamic.rs @@ -0,0 +1,26 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module provides the entry points to create dynamic +//! transactions, storage and constant lookups. + +pub use scale_value::{At, Value}; + +// Submit dynamic transactions. +pub use crate::tx::payload::dynamic as tx; + +// Lookup constants dynamically. +pub use crate::constants::address::dynamic as constant; + +// Lookup storage values dynamically. +pub use crate::storage::address::dynamic as storage; + +// Execute runtime API function call dynamically. +pub use crate::runtime_api::payload::dynamic as runtime_api_call; + +// Execute View Function API function call dynamically. +pub use crate::view_functions::payload::dynamic as view_function_call; + +/// Obtain a custom value from the metadata. +pub use crate::custom_values::address::dynamic as custom_value; diff --git a/vendor/pezkuwi-subxt/core/src/error.rs b/vendor/pezkuwi-subxt/core/src/error.rs new file mode 100644 index 00000000..2ae61a1a --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/error.rs @@ -0,0 +1,286 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! The errors that can be emitted in this crate. + +use alloc::{boxed::Box, string::String, vec::Vec}; +use thiserror::Error as DeriveError; + +/// The error emitted when something goes wrong. +#[derive(Debug, DeriveError)] +#[allow(missing_docs)] +pub enum Error { + #[error(transparent)] + StorageError(#[from] StorageError), + #[error(transparent)] + Extrinsic(#[from] ExtrinsicError), + #[error(transparent)] + Constant(#[from] ConstantError), + #[error(transparent)] + CustomValue(#[from] CustomValueError), + #[error(transparent)] + RuntimeApi(#[from] RuntimeApiError), + #[error(transparent)] + ViewFunction(#[from] ViewFunctionError), + #[error(transparent)] + Events(#[from] EventsError), +} + +#[derive(Debug, DeriveError)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum EventsError { + #[error("Can't decode event: can't decode phase: {0}")] + CannotDecodePhase(codec::Error), + #[error("Can't decode event: can't decode pallet index: {0}")] + CannotDecodePalletIndex(codec::Error), + #[error("Can't decode event: can't decode variant index: {0}")] + CannotDecodeVariantIndex(codec::Error), + #[error("Can't decode event: can't find pallet with index {0}")] + CannotFindPalletWithIndex(u8), + #[error( + "Can't decode event: can't find variant with index {variant_index} in pallet {pallet_name}" + )] + CannotFindVariantWithIndex { pallet_name: String, variant_index: u8 }, + #[error("Can't decode field {field_name:?} in event {pallet_name}.{event_name}: {reason}")] + CannotDecodeFieldInEvent { + pallet_name: String, + event_name: String, + field_name: String, + reason: scale_decode::visitor::DecodeError, + }, + #[error("Can't decode event topics: {0}")] + CannotDecodeEventTopics(codec::Error), + #[error("Can't decode the fields of event {pallet_name}.{event_name}: {reason}")] + CannotDecodeEventFields { pallet_name: String, event_name: String, reason: scale_decode::Error }, + #[error("Can't decode event {pallet_name}.{event_name} to Event enum: {reason}")] + CannotDecodeEventEnum { pallet_name: String, event_name: String, reason: scale_decode::Error }, +} + +#[derive(Debug, DeriveError)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum ViewFunctionError { + #[error("The static View Function address used is not compatible with the live chain")] + IncompatibleCodegen, + #[error("Can't find View Function: pallet {0} not found")] + PalletNotFound(String), + #[error("Can't find View Function {function_name} in pallet {pallet_name}")] + ViewFunctionNotFound { pallet_name: String, function_name: String }, + #[error("Failed to encode View Function inputs: {0}")] + CouldNotEncodeInputs(frame_decode::view_functions::ViewFunctionInputsEncodeError), + #[error("Failed to decode View Function: {0}")] + CouldNotDecodeResponse(frame_decode::view_functions::ViewFunctionDecodeError), +} + +#[derive(Debug, DeriveError)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum RuntimeApiError { + #[error("The static Runtime API address used is not compatible with the live chain")] + IncompatibleCodegen, + #[error("Runtime API trait not found: {0}")] + TraitNotFound(String), + #[error("Runtime API method {method_name} not found in trait {trait_name}")] + MethodNotFound { trait_name: String, method_name: String }, + #[error("Failed to encode Runtime API inputs: {0}")] + CouldNotEncodeInputs(frame_decode::runtime_apis::RuntimeApiInputsEncodeError), + #[error("Failed to decode Runtime API: {0}")] + CouldNotDecodeResponse(frame_decode::runtime_apis::RuntimeApiDecodeError), +} + +#[derive(Debug, DeriveError)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum CustomValueError { + #[error("The static custom value address used is not compatible with the live chain")] + IncompatibleCodegen, + #[error("The custom value '{0}' was not found")] + NotFound(String), + #[error("Failed to decode custom value: {0}")] + CouldNotDecodeCustomValue(frame_decode::custom_values::CustomValueDecodeError), +} + +/// Something went wrong working with a constant. +#[derive(Debug, DeriveError)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum ConstantError { + #[error("The static constant address used is not compatible with the live chain")] + IncompatibleCodegen, + #[error("Can't find constant: pallet with name {0} not found")] + PalletNameNotFound(String), + #[error( + "Constant '{constant_name}' not found in pallet {pallet_name} in the live chain metadata" + )] + ConstantNameNotFound { pallet_name: String, constant_name: String }, + #[error("Failed to decode constant: {0}")] + CouldNotDecodeConstant(frame_decode::constants::ConstantDecodeError), + #[error("Cannot obtain constant information from metadata: {0}")] + ConstantInfoError(frame_decode::constants::ConstantInfoError<'static>), +} + +/// Something went wrong trying to encode or decode a storage address. +#[derive(Debug, DeriveError)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum StorageError { + #[error("The static storage address used is not compatible with the live chain")] + IncompatibleCodegen, + #[error("Can't find storage value: pallet with name {0} not found")] + PalletNameNotFound(String), + #[error( + "Storage entry '{entry_name}' not found in pallet {pallet_name} in the live chain metadata" + )] + StorageEntryNotFound { pallet_name: String, entry_name: String }, + #[error("Cannot obtain storage information from metadata: {0}")] + StorageInfoError(frame_decode::storage::StorageInfoError<'static>), + #[error("Cannot encode storage key: {0}")] + StorageKeyEncodeError(frame_decode::storage::StorageKeyEncodeError), + #[error("Cannot create a key to iterate over a plain entry")] + CannotIterPlainEntry { pallet_name: String, entry_name: String }, + #[error( + "Wrong number of key parts provided to iterate a storage address. We expected at most {max_expected} key parts but got {got} key parts" + )] + WrongNumberOfKeyPartsProvidedForIterating { max_expected: usize, got: usize }, + #[error( + "Wrong number of key parts provided to fetch a storage address. We expected {expected} key parts but got {got} key parts" + )] + WrongNumberOfKeyPartsProvidedForFetching { expected: usize, got: usize }, +} + +#[derive(Debug, DeriveError)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum StorageKeyError { + #[error("Can't decode the storage key: {error}")] + StorageKeyDecodeError { + bytes: Vec, + error: frame_decode::storage::StorageKeyDecodeError, + }, + #[error("Can't decode the values from the storage key: {0}")] + CannotDecodeValuesInKey(frame_decode::storage::StorageKeyValueDecodeError), + #[error( + "Cannot decode storage key: there were leftover bytes, indicating that the decoding failed" + )] + LeftoverBytes { bytes: Vec }, + #[error("Can't decode a single value from the storage key part at index {index}: {error}")] + CannotDecodeValueInKey { index: usize, error: scale_decode::Error }, +} + +#[derive(Debug, DeriveError)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum StorageValueError { + #[error("Cannot decode storage value: {0}")] + CannotDecode(frame_decode::storage::StorageValueDecodeError), + #[error( + "Cannot decode storage value: there were leftover bytes, indicating that the decoding failed" + )] + LeftoverBytes { bytes: Vec }, +} + +/// An error that can be encountered when constructing a transaction. +#[derive(Debug, DeriveError)] +#[allow(missing_docs)] +pub enum ExtrinsicError { + #[error("The extrinsic payload is not compatible with the live chain")] + IncompatibleCodegen, + #[error("Can't find extrinsic: pallet with name {0} not found")] + PalletNameNotFound(String), + #[error("Can't find extrinsic: call name {call_name} doesn't exist in pallet {pallet_name}")] + CallNameNotFound { pallet_name: String, call_name: String }, + #[error("Can't encode the extrinsic call data: {0}")] + CannotEncodeCallData(scale_encode::Error), + #[error("Subxt does not support the extrinsic versions expected by the chain")] + UnsupportedVersion, + #[error("Cannot construct the required transaction extensions: {0}")] + Params(#[from] ExtrinsicParamsError), + #[error("Cannot decode transaction extension '{name}': {error}")] + CouldNotDecodeTransactionExtension { + /// The extension name. + name: String, + /// The decode error. + error: scale_decode::Error, + }, + #[error( + "After decoding the extrinsic at index {extrinsic_index}, {num_leftover_bytes} bytes were left, suggesting that decoding may have failed" + )] + LeftoverBytes { + /// Index of the extrinsic that failed to decode. + extrinsic_index: usize, + /// Number of bytes leftover after decoding the extrinsic. + num_leftover_bytes: usize, + }, + #[error("{0}")] + ExtrinsicDecodeErrorAt(#[from] ExtrinsicDecodeErrorAt), + #[error("Failed to decode the fields of an extrinsic at index {extrinsic_index}: {error}")] + CannotDecodeFields { + /// Index of the extrinsic whose fields we could not decode + extrinsic_index: usize, + /// The decode error. + error: scale_decode::Error, + }, + #[error("Failed to decode the extrinsic at index {extrinsic_index} to a root enum: {error}")] + CannotDecodeIntoRootExtrinsic { + /// Index of the extrinsic that we failed to decode + extrinsic_index: usize, + /// The decode error. + error: scale_decode::Error, + }, +} + +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +#[error("Cannot decode extrinsic at index {extrinsic_index}: {error}")] +pub struct ExtrinsicDecodeErrorAt { + pub extrinsic_index: usize, + pub error: ExtrinsicDecodeErrorAtReason, +} + +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum ExtrinsicDecodeErrorAtReason { + #[error("{0}")] + DecodeError(frame_decode::extrinsics::ExtrinsicDecodeError), + #[error("Leftover bytes")] + LeftoverBytes(Vec), +} + +/// An error that can be emitted when trying to construct an instance of +/// [`crate::config::ExtrinsicParams`], encode data from the instance, or match on signed +/// extensions. +#[derive(Debug, DeriveError)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum ExtrinsicParamsError { + #[error("Cannot find type id '{type_id} in the metadata (context: {context})")] + MissingTypeId { + /// Type ID. + type_id: u32, + /// Some arbitrary context to help narrow the source of the error. + context: &'static str, + }, + #[error("The chain expects a signed extension with the name {0}, but we did not provide one")] + UnknownTransactionExtension(String), + #[error("Error constructing extrinsic parameters: {0}")] + Custom(Box), +} + +impl ExtrinsicParamsError { + /// Create a custom [`ExtrinsicParamsError`] from a string. + pub fn custom>(error: S) -> Self { + let error: String = error.into(); + let error: Box = Box::from(error); + ExtrinsicParamsError::Custom(error) + } +} + +impl From for ExtrinsicParamsError { + fn from(value: core::convert::Infallible) -> Self { + match value {} + } +} diff --git a/vendor/pezkuwi-subxt/core/src/events.rs b/vendor/pezkuwi-subxt/core/src/events.rs new file mode 100644 index 00000000..573ccdfe --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/events.rs @@ -0,0 +1,996 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Decode and work with events. +//! +//! # Example +//! +//! ```rust +//! use pezkuwi_subxt_macro::subxt; +//! use pezkuwi_subxt_core::config::PezkuwiConfig; +//! use pezkuwi_subxt_core::events; +//! use pezkuwi_subxt_core::Metadata; +//! use pezkuwi_subxt_core::dynamic::Value; +//! +//! // If we generate types without `subxt`, we need to point to `::pezkuwi_subxt_core`: +//! #[subxt( +//! crate = "::pezkuwi_subxt_core", +//! runtime_metadata_path = "../artifacts/pezkuwi_metadata_full.scale", +//! )] +//! pub mod pezkuwi {} +//! +//! // Some metadata we'll use to work with storage entries: +//! let metadata_bytes = include_bytes!("../../artifacts/pezkuwi_metadata_full.scale"); +//! let metadata = Metadata::decode_from(&metadata_bytes[..]).unwrap(); +//! +//! // Some bytes representing events (located in System.Events storage): +//! let event_bytes = hex::decode("1c00000000000000a2e9b53d5517020000000100000000000310c96d901d0102000000020000000408d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27dbeea5a030000000000000000000000000000020000000402d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48102700000000000000000000000000000000020000000407be5ddb1579b72e84524fc29e78609e3caf42e85aa118ebfe0b0ad404b5bdd25fbeea5a030000000000000000000000000000020000002100d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27dbeea5a03000000000000000000000000000000000000000000000000000000000000020000000000426df03e00000000").unwrap(); +//! +//! // We can decode these bytes like so: +//! let evs = events::decode_from::(event_bytes, metadata); +//! +//! // And then do things like iterate over them and inspect details: +//! for ev in evs.iter() { +//! let ev = ev.unwrap(); +//! println!("Index: {}", ev.index()); +//! println!("Name: {}.{}", ev.pallet_name(), ev.variant_name()); +//! println!("Fields: {:?}", ev.decode_as_fields::().unwrap()); +//! } +//! ``` + +use alloc::{string::ToString, sync::Arc, vec::Vec}; +use codec::{Compact, Decode, Encode}; +use derive_where::derive_where; +use pezkuwi_subxt_metadata::PalletMetadata; +use scale_decode::{DecodeAsFields, DecodeAsType}; + +use crate::{ + Metadata, + config::{Config, HashFor}, + error::EventsError, +}; + +/// Create a new [`Events`] instance from the given bytes. +/// +/// This is a shortcut for [`Events::decode_from`]. +pub fn decode_from(event_bytes: Vec, metadata: Metadata) -> Events { + Events::decode_from(event_bytes, metadata) +} + +/// Trait to uniquely identify the events's identity from the runtime metadata. +/// +/// Generated API structures that represent an event implement this trait. +/// +/// The trait is utilized to decode emitted events from a block, via obtaining the +/// form of the `Event` from the metadata. +pub trait StaticEvent: DecodeAsFields { + /// Pallet name. + const PALLET: &'static str; + /// Event name. + const EVENT: &'static str; + + /// Returns true if the given pallet and event names match this event. + fn is_event(pallet: &str, event: &str) -> bool { + Self::PALLET == pallet && Self::EVENT == event + } +} + +/// A collection of events obtained from a block, bundled with the necessary +/// information needed to decode and iterate over them. +#[derive_where(Clone)] +pub struct Events { + metadata: Metadata, + // Note; raw event bytes are prefixed with a Compact containing + // the number of events to be decoded. The start_idx reflects that, so + // that we can skip over those bytes when decoding them + event_bytes: Arc<[u8]>, + start_idx: usize, + num_events: u32, + marker: core::marker::PhantomData, +} + +// Ignore the Metadata when debug-logging events; it's big and distracting. +impl core::fmt::Debug for Events { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("Events") + .field("event_bytes", &self.event_bytes) + .field("start_idx", &self.start_idx) + .field("num_events", &self.num_events) + .finish() + } +} + +impl Events { + /// Create a new [`Events`] instance from the given bytes. + pub fn decode_from(event_bytes: Vec, metadata: Metadata) -> Self { + // event_bytes is a SCALE encoded vector of events. So, pluck the + // compact encoded length from the front, leaving the remaining bytes + // for our iterating to decode. + // + // Note: if we get no bytes back, avoid an error reading vec length + // and default to 0 events. + let cursor = &mut &*event_bytes; + let num_events = >::decode(cursor).unwrap_or(Compact(0)).0; + + // Start decoding after the compact encoded bytes. + let start_idx = event_bytes.len() - cursor.len(); + + Self { + metadata, + event_bytes: event_bytes.into(), + start_idx, + num_events, + marker: core::marker::PhantomData, + } + } + + /// The number of events. + pub fn len(&self) -> u32 { + self.num_events + } + + /// Are there no events in this block? + // Note: mainly here to satisfy clippy. + pub fn is_empty(&self) -> bool { + self.num_events == 0 + } + + /// Return the bytes representing all of the events. + pub fn bytes(&self) -> &[u8] { + &self.event_bytes + } + + /// Iterate over all of the events, using metadata to dynamically + /// decode them as we go, and returning the raw bytes and other associated + /// details. If an error occurs, all subsequent iterations return `None`. + // Dev note: The returned iterator is 'static + Send so that we can box it up and make + // use of it with our `FilterEvents` stuff. + pub fn iter( + &self, + ) -> impl Iterator, EventsError>> + Send + Sync + 'static { + // The event bytes ignoring the compact encoded length on the front: + let event_bytes = self.event_bytes.clone(); + let metadata = self.metadata.clone(); + let num_events = self.num_events; + + let mut pos = self.start_idx; + let mut index = 0; + core::iter::from_fn(move || { + if event_bytes.len() <= pos || num_events == index { + None + } else { + match EventDetails::decode_from(metadata.clone(), event_bytes.clone(), pos, index) { + Ok(event_details) => { + // Skip over decoded bytes in next iteration: + pos += event_details.bytes().len(); + // Increment the index: + index += 1; + // Return the event details: + Some(Ok(event_details)) + }, + Err(e) => { + // By setting the position to the "end" of the event bytes, + // the cursor len will become 0 and the iterator will return `None` + // from now on: + pos = event_bytes.len(); + Some(Err(e)) + }, + } + } + }) + } + + /// Iterate through the events using metadata to dynamically decode and skip + /// them, and return only those which should decode to the provided `Ev` type. + /// If an error occurs, all subsequent iterations return `None`. + pub fn find(&self) -> impl Iterator> { + self.iter().filter_map(|ev| ev.and_then(|ev| ev.as_event::()).transpose()) + } + + /// Iterate through the events using metadata to dynamically decode and skip + /// them, and return the first event found which decodes to the provided `Ev` type. + pub fn find_first(&self) -> Result, EventsError> { + self.find::().next().transpose() + } + + /// Iterate through the events using metadata to dynamically decode and skip + /// them, and return the last event found which decodes to the provided `Ev` type. + pub fn find_last(&self) -> Result, EventsError> { + self.find::().last().transpose() + } + + /// Find an event that decodes to the type provided. Returns true if it was found. + pub fn has(&self) -> Result { + Ok(self.find::().next().transpose()?.is_some()) + } +} + +/// A phase of a block's execution. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Decode, Encode)] +pub enum Phase { + /// Applying an extrinsic. + ApplyExtrinsic(u32), + /// Finalizing the block. + Finalization, + /// Initializing the block. + Initialization, +} + +/// The event details. +#[derive(Debug, Clone)] +pub struct EventDetails { + phase: Phase, + /// The index of the event in the list of events in a given block. + index: u32, + all_bytes: Arc<[u8]>, + // start of the bytes (phase, pallet/variant index and then fields and then topic to follow). + start_idx: usize, + // start of the event (ie pallet/variant index and then the fields and topic after). + event_start_idx: usize, + // start of the fields (ie after phase and pallet/variant index). + event_fields_start_idx: usize, + // end of the fields. + event_fields_end_idx: usize, + // end of everything (fields + topics) + end_idx: usize, + metadata: Metadata, + topics: Vec>, +} + +impl EventDetails { + /// Attempt to dynamically decode a single event from our events input. + fn decode_from( + metadata: Metadata, + all_bytes: Arc<[u8]>, + start_idx: usize, + index: u32, + ) -> Result, EventsError> { + let input = &mut &all_bytes[start_idx..]; + + let phase = Phase::decode(input).map_err(EventsError::CannotDecodePhase)?; + + let event_start_idx = all_bytes.len() - input.len(); + + let pallet_index = u8::decode(input).map_err(EventsError::CannotDecodePalletIndex)?; + let variant_index = u8::decode(input).map_err(EventsError::CannotDecodeVariantIndex)?; + + let event_fields_start_idx = all_bytes.len() - input.len(); + + // Get metadata for the event: + let event_pallet = metadata + .pallet_by_event_index(pallet_index) + .ok_or_else(|| EventsError::CannotFindPalletWithIndex(pallet_index))?; + let event_variant = + event_pallet.event_variant_by_index(variant_index).ok_or_else(|| { + EventsError::CannotFindVariantWithIndex { + pallet_name: event_pallet.name().to_string(), + variant_index, + } + })?; + + tracing::debug!("Decoding Event '{}::{}'", event_pallet.name(), &event_variant.name); + + // Skip over the bytes belonging to this event. + for field_metadata in &event_variant.fields { + // Skip over the bytes for this field: + scale_decode::visitor::decode_with_visitor( + input, + field_metadata.ty.id, + metadata.types(), + scale_decode::visitor::IgnoreVisitor::new(), + ) + .map_err(|e| EventsError::CannotDecodeFieldInEvent { + pallet_name: event_pallet.name().to_string(), + event_name: event_variant.name.clone(), + field_name: field_metadata.name.clone().unwrap_or("".to_string()), + reason: e, + })?; + } + + // the end of the field bytes. + let event_fields_end_idx = all_bytes.len() - input.len(); + + // topics come after the event data in EventRecord. + let topics = + Vec::>::decode(input).map_err(EventsError::CannotDecodeEventTopics)?; + + // what bytes did we skip over in total, including topics. + let end_idx = all_bytes.len() - input.len(); + + Ok(EventDetails { + phase, + index, + start_idx, + event_start_idx, + event_fields_start_idx, + event_fields_end_idx, + end_idx, + all_bytes, + metadata, + topics, + }) + } + + /// When was the event produced? + pub fn phase(&self) -> Phase { + self.phase + } + + /// What index is this event in the stored events for this block. + pub fn index(&self) -> u32 { + self.index + } + + /// The index of the pallet that the event originated from. + pub fn pallet_index(&self) -> u8 { + // Note: never panics; we expect these bytes to exist + // in order that the EventDetails could be created. + self.all_bytes[self.event_fields_start_idx - 2] + } + + /// The index of the event variant that the event originated from. + pub fn variant_index(&self) -> u8 { + // Note: never panics; we expect these bytes to exist + // in order that the EventDetails could be created. + self.all_bytes[self.event_fields_start_idx - 1] + } + + /// The name of the pallet from whence the Event originated. + pub fn pallet_name(&self) -> &str { + self.event_metadata().pallet.name() + } + + /// Alias for pallet_name() - rebranded terminology (pezpallet) + pub fn pezpallet_name(&self) -> &str { + self.pallet_name() + } + + /// The name of the event (ie the name of the variant that it corresponds to). + pub fn variant_name(&self) -> &str { + &self.event_metadata().variant.name + } + + /// Fetch details from the metadata for this event. + pub fn event_metadata(&self) -> EventMetadataDetails<'_> { + let pallet = self + .metadata + .pallet_by_event_index(self.pallet_index()) + .expect("event pallet to be found; we did this already during decoding"); + let variant = pallet + .event_variant_by_index(self.variant_index()) + .expect("event variant to be found; we did this already during decoding"); + + EventMetadataDetails { pallet, variant } + } + + /// Return _all_ of the bytes representing this event, which include, in order: + /// - The phase. + /// - Pallet and event index. + /// - Event fields. + /// - Event Topics. + pub fn bytes(&self) -> &[u8] { + &self.all_bytes[self.start_idx..self.end_idx] + } + + /// Return the bytes representing the fields stored in this event. + pub fn field_bytes(&self) -> &[u8] { + &self.all_bytes[self.event_fields_start_idx..self.event_fields_end_idx] + } + + /// Decode and provide the event fields back in the form of a [`scale_value::Composite`] + /// type which represents the named or unnamed fields that were present in the event. + pub fn decode_as_fields(&self) -> Result { + let bytes = &mut self.field_bytes(); + let event_metadata = self.event_metadata(); + + let mut fields = event_metadata + .variant + .fields + .iter() + .map(|f| scale_decode::Field::new(f.ty.id, f.name.as_deref())); + + let decoded = + E::decode_as_fields(bytes, &mut fields, self.metadata.types()).map_err(|e| { + EventsError::CannotDecodeEventFields { + pallet_name: event_metadata.pallet.name().to_string(), + event_name: event_metadata.variant.name.clone(), + reason: e, + } + })?; + + Ok(decoded) + } + + /// Attempt to decode these [`EventDetails`] into a type representing the event fields. + /// Such types are exposed in the codegen as `pallet_name::events::EventName` types. + pub fn as_event(&self) -> Result, EventsError> { + let ev_metadata = self.event_metadata(); + if ev_metadata.pallet.name() == E::PALLET && ev_metadata.variant.name == E::EVENT { + let mut fields = ev_metadata + .variant + .fields + .iter() + .map(|f| scale_decode::Field::new(f.ty.id, f.name.as_deref())); + let decoded = + E::decode_as_fields(&mut self.field_bytes(), &mut fields, self.metadata.types()) + .map_err(|e| EventsError::CannotDecodeEventFields { + pallet_name: E::PALLET.to_string(), + event_name: E::EVENT.to_string(), + reason: e, + })?; + Ok(Some(decoded)) + } else { + Ok(None) + } + } + + /// Attempt to decode these [`EventDetails`] into a root event type (which includes + /// the pallet and event enum variants as well as the event fields). A compatible + /// type for this is exposed via static codegen as a root level `Event` type. + pub fn as_root_event(&self) -> Result { + let bytes = &self.all_bytes[self.event_start_idx..self.event_fields_end_idx]; + + let decoded = E::decode_as_type( + &mut &bytes[..], + self.metadata.outer_enums().event_enum_ty(), + self.metadata.types(), + ) + .map_err(|e| { + let md = self.event_metadata(); + EventsError::CannotDecodeEventEnum { + pallet_name: md.pallet.name().to_string(), + event_name: md.variant.name.clone(), + reason: e, + } + })?; + + Ok(decoded) + } + + /// Return the topics associated with this event. + pub fn topics(&self) -> &[HashFor] { + &self.topics + } +} + +/// Details for the given event plucked from the metadata. +pub struct EventMetadataDetails<'a> { + /// Metadata for the pallet that the event belongs to. + pub pallet: PalletMetadata<'a>, + /// Metadata for the variant which describes the pallet events. + pub variant: &'a scale_info::Variant, +} + +/// Event related test utilities used outside this module. +#[cfg(test)] +pub(crate) mod test_utils { + use super::*; + use crate::config::{HashFor, BizinikiwConfig}; + use codec::Encode; + use frame_metadata::{ + RuntimeMetadataPrefixed, + v15::{ + CustomMetadata, ExtrinsicMetadata, OuterEnums, PalletEventMetadata, PalletMetadata, + RuntimeMetadataV15, + }, + }; + use scale_info::{TypeInfo, meta_type}; + + /// An "outer" events enum containing exactly one event. + #[derive( + Encode, + Decode, + TypeInfo, + Clone, + Debug, + PartialEq, + Eq, + scale_encode::EncodeAsType, + scale_decode::DecodeAsType, + )] + pub enum AllEvents { + Test(Ev), + } + + /// This encodes to the same format an event is expected to encode to + /// in node System.Events storage. + #[derive(Encode)] + pub struct EventRecord { + phase: Phase, + event: AllEvents, + topics: Vec>, + } + + impl EventRecord { + /// Create a new event record with the given phase, event, and topics. + pub fn new(phase: Phase, event: E, topics: Vec>) -> Self { + Self { phase, event: AllEvents::Test(event), topics } + } + } + + /// Build an EventRecord, which encoded events in the format expected + /// to be handed back from storage queries to System.Events. + pub fn event_record(phase: Phase, event: E) -> EventRecord { + EventRecord::new(phase, event, vec![]) + } + + /// Build fake metadata consisting of a single pallet that knows + /// about the event type provided. + pub fn metadata() -> Metadata { + // Extrinsic needs to contain at least the generic type parameter "Call" + // for the metadata to be valid. + // The "Call" type from the metadata is used to decode extrinsics. + // In reality, the extrinsic type has "Call", "Address", "Extra", "Signature" generic types. + #[allow(unused)] + #[derive(TypeInfo)] + struct ExtrinsicType { + call: Call, + } + // Because this type is used to decode extrinsics, we expect this to be a TypeDefVariant. + // Each pallet must contain one single variant. + #[allow(unused)] + #[derive(TypeInfo)] + enum RuntimeCall { + PalletName(Pallet), + } + // The calls of the pallet. + #[allow(unused)] + #[derive(TypeInfo)] + enum Pallet { + #[allow(unused)] + SomeCall, + } + + let pallets = vec![PalletMetadata { + name: "Test", + storage: None, + calls: None, + event: Some(PalletEventMetadata { ty: meta_type::() }), + constants: vec![], + error: None, + index: 0, + docs: vec![], + }]; + + let extrinsic = ExtrinsicMetadata { + version: 0, + signed_extensions: vec![], + address_ty: meta_type::<()>(), + call_ty: meta_type::(), + signature_ty: meta_type::<()>(), + extra_ty: meta_type::<()>(), + }; + + let meta = RuntimeMetadataV15::new( + pallets, + extrinsic, + meta_type::<()>(), + vec![], + OuterEnums { + call_enum_ty: meta_type::<()>(), + event_enum_ty: meta_type::>(), + error_enum_ty: meta_type::<()>(), + }, + CustomMetadata { map: Default::default() }, + ); + let runtime_metadata: RuntimeMetadataPrefixed = meta.into(); + let metadata: pezkuwi_subxt_metadata::Metadata = runtime_metadata.try_into().unwrap(); + + metadata + } + + /// Build an `Events` object for test purposes, based on the details provided, + /// and with a default block hash. + pub fn events( + metadata: Metadata, + event_records: Vec>, + ) -> Events { + let num_events = event_records.len() as u32; + let mut event_bytes = Vec::new(); + for ev in event_records { + ev.encode_to(&mut event_bytes); + } + events_raw(metadata, event_bytes, num_events) + } + + /// Much like [`events`], but takes pre-encoded events and event count, so that we can + /// mess with the bytes in tests if we need to. + pub fn events_raw( + metadata: Metadata, + event_bytes: Vec, + num_events: u32, + ) -> Events { + // Prepend compact encoded length to event bytes: + let mut all_event_bytes = Compact(num_events).encode(); + all_event_bytes.extend(event_bytes); + Events::decode_from(all_event_bytes, metadata) + } +} + +#[cfg(test)] +mod tests { + use super::{ + test_utils::{AllEvents, EventRecord, event_record, events, events_raw}, + *, + }; + use crate::{config::BizinikiwConfig, events::Phase}; + use codec::Encode; + use primitive_types::H256; + use scale_info::TypeInfo; + use scale_value::Value; + + /// Build a fake wrapped metadata. + fn metadata() -> Metadata { + test_utils::metadata::() + } + + /// [`RawEventDetails`] can be annoying to test, because it contains + /// type info in the decoded field Values. Strip that here so that + /// we can compare fields more easily. + #[derive(Debug, PartialEq, Eq, Clone)] + pub struct TestRawEventDetails { + pub phase: Phase, + pub index: u32, + pub pallet: String, + pub pallet_index: u8, + pub variant: String, + pub variant_index: u8, + pub fields: Vec, + } + + /// Compare some actual [`RawEventDetails`] with a hand-constructed + /// (probably) [`TestRawEventDetails`]. + pub fn assert_raw_events_match( + actual: EventDetails, + expected: TestRawEventDetails, + ) { + let actual_fields_no_context: Vec<_> = actual + .decode_as_fields::>() + .expect("can decode field values (2)") + .into_values() + .map(|value| value.remove_context()) + .collect(); + + // Check each of the other fields: + assert_eq!(actual.phase(), expected.phase); + assert_eq!(actual.index(), expected.index); + assert_eq!(actual.pallet_name(), expected.pallet); + assert_eq!(actual.pallet_index(), expected.pallet_index); + assert_eq!(actual.variant_name(), expected.variant); + assert_eq!(actual.variant_index(), expected.variant_index); + assert_eq!(actual_fields_no_context, expected.fields); + } + + #[test] + fn statically_decode_single_root_event() { + #[derive(Clone, Debug, PartialEq, Decode, Encode, TypeInfo, scale_decode::DecodeAsType)] + enum Event { + A(u8, bool, Vec), + } + + // Create fake metadata that knows about our single event, above: + let metadata = metadata::(); + + // Encode our events in the format we expect back from a node, and + // construct an Events object to iterate them: + let event = Event::A(1, true, vec!["Hi".into()]); + let events = events::( + metadata, + vec![event_record(Phase::ApplyExtrinsic(123), event.clone())], + ); + + let ev = events + .iter() + .next() + .expect("one event expected") + .expect("event should be extracted OK"); + + // This is the line we're testing: + let decoded_event = ev + .as_root_event::>() + .expect("can decode event into root enum again"); + + // It should equal the event we put in: + assert_eq!(decoded_event, AllEvents::Test(event)); + } + + #[test] + fn dynamically_decode_single_event() { + #[derive(Clone, Debug, PartialEq, Decode, Encode, TypeInfo)] + enum Event { + A(u8, bool, Vec), + } + + // Create fake metadata that knows about our single event, above: + let metadata = metadata::(); + + // Encode our events in the format we expect back from a node, and + // construct an Events object to iterate them: + let event = Event::A(1, true, vec!["Hi".into()]); + let events = + events::(metadata, vec![event_record(Phase::ApplyExtrinsic(123), event)]); + + let mut event_details = events.iter(); + assert_raw_events_match( + event_details.next().unwrap().unwrap(), + TestRawEventDetails { + phase: Phase::ApplyExtrinsic(123), + index: 0, + pallet: "Test".to_string(), + pallet_index: 0, + variant: "A".to_string(), + variant_index: 0, + fields: vec![ + Value::u128(1), + Value::bool(true), + Value::unnamed_composite(vec![Value::string("Hi")]), + ], + }, + ); + assert!(event_details.next().is_none()); + } + + #[test] + fn dynamically_decode_multiple_events() { + #[derive(Clone, Copy, Debug, PartialEq, Decode, Encode, TypeInfo)] + enum Event { + A(u8), + B(bool), + } + + // Create fake metadata that knows about our single event, above: + let metadata = metadata::(); + + // Encode our events in the format we expect back from a node, and + // construct an Events object to iterate them: + let event1 = Event::A(1); + let event2 = Event::B(true); + let event3 = Event::A(234); + + let events = events::( + metadata, + vec![ + event_record(Phase::Initialization, event1), + event_record(Phase::ApplyExtrinsic(123), event2), + event_record(Phase::Finalization, event3), + ], + ); + + let mut event_details = events.iter(); + + assert_raw_events_match( + event_details.next().unwrap().unwrap(), + TestRawEventDetails { + index: 0, + phase: Phase::Initialization, + pallet: "Test".to_string(), + pallet_index: 0, + variant: "A".to_string(), + variant_index: 0, + fields: vec![Value::u128(1)], + }, + ); + assert_raw_events_match( + event_details.next().unwrap().unwrap(), + TestRawEventDetails { + index: 1, + phase: Phase::ApplyExtrinsic(123), + pallet: "Test".to_string(), + pallet_index: 0, + variant: "B".to_string(), + variant_index: 1, + fields: vec![Value::bool(true)], + }, + ); + assert_raw_events_match( + event_details.next().unwrap().unwrap(), + TestRawEventDetails { + index: 2, + phase: Phase::Finalization, + pallet: "Test".to_string(), + pallet_index: 0, + variant: "A".to_string(), + variant_index: 0, + fields: vec![Value::u128(234)], + }, + ); + assert!(event_details.next().is_none()); + } + + #[test] + fn dynamically_decode_multiple_events_until_error() { + #[derive(Clone, Debug, PartialEq, Decode, Encode, TypeInfo)] + enum Event { + A(u8), + B(bool), + } + + // Create fake metadata that knows about our single event, above: + let metadata = metadata::(); + + // Encode 2 events: + let mut event_bytes = vec![]; + event_record(Phase::Initialization, Event::A(1)).encode_to(&mut event_bytes); + event_record(Phase::ApplyExtrinsic(123), Event::B(true)).encode_to(&mut event_bytes); + + // Push a few naff bytes to the end (a broken third event): + event_bytes.extend_from_slice(&[3, 127, 45, 0, 2]); + + // Encode our events in the format we expect back from a node, and + // construct an Events object to iterate them: + let events = events_raw( + metadata, + event_bytes, + 3, // 2 "good" events, and then it'll hit the naff bytes. + ); + + let mut events_iter = events.iter(); + assert_raw_events_match( + events_iter.next().unwrap().unwrap(), + TestRawEventDetails { + index: 0, + phase: Phase::Initialization, + pallet: "Test".to_string(), + pallet_index: 0, + variant: "A".to_string(), + variant_index: 0, + fields: vec![Value::u128(1)], + }, + ); + assert_raw_events_match( + events_iter.next().unwrap().unwrap(), + TestRawEventDetails { + index: 1, + phase: Phase::ApplyExtrinsic(123), + pallet: "Test".to_string(), + pallet_index: 0, + variant: "B".to_string(), + variant_index: 1, + fields: vec![Value::bool(true)], + }, + ); + + // We'll hit an error trying to decode the third event: + assert!(events_iter.next().unwrap().is_err()); + // ... and then "None" from then on. + assert!(events_iter.next().is_none()); + assert!(events_iter.next().is_none()); + } + + #[test] + fn compact_event_field() { + #[derive(Clone, Debug, PartialEq, Encode, Decode, TypeInfo)] + enum Event { + A(#[codec(compact)] u32), + } + + // Create fake metadata that knows about our single event, above: + let metadata = metadata::(); + + // Encode our events in the format we expect back from a node, and + // construct an Events object to iterate them: + let events = + events::(metadata, vec![event_record(Phase::Finalization, Event::A(1))]); + + // Dynamically decode: + let mut event_details = events.iter(); + assert_raw_events_match( + event_details.next().unwrap().unwrap(), + TestRawEventDetails { + index: 0, + phase: Phase::Finalization, + pallet: "Test".to_string(), + pallet_index: 0, + variant: "A".to_string(), + variant_index: 0, + fields: vec![Value::u128(1)], + }, + ); + assert!(event_details.next().is_none()); + } + + #[test] + fn compact_wrapper_struct_field() { + #[derive(Clone, Decode, Debug, PartialEq, Encode, TypeInfo)] + enum Event { + A(#[codec(compact)] CompactWrapper), + } + + #[derive(Clone, Decode, Debug, PartialEq, codec::CompactAs, Encode, TypeInfo)] + struct CompactWrapper(u64); + + // Create fake metadata that knows about our single event, above: + let metadata = metadata::(); + + // Encode our events in the format we expect back from a node, and + // construct an Events object to iterate them: + let events = events::( + metadata, + vec![event_record(Phase::Finalization, Event::A(CompactWrapper(1)))], + ); + + // Dynamically decode: + let mut event_details = events.iter(); + assert_raw_events_match( + event_details.next().unwrap().unwrap(), + TestRawEventDetails { + index: 0, + phase: Phase::Finalization, + pallet: "Test".to_string(), + pallet_index: 0, + variant: "A".to_string(), + variant_index: 0, + fields: vec![Value::unnamed_composite(vec![Value::u128(1)])], + }, + ); + assert!(event_details.next().is_none()); + } + + #[test] + fn event_containing_explicit_index() { + #[derive(Clone, Debug, PartialEq, Eq, Decode, Encode, TypeInfo)] + #[repr(u8)] + #[allow(trivial_numeric_casts, clippy::unnecessary_cast)] // required because the Encode derive produces a warning otherwise + pub enum MyType { + B = 10u8, + } + + #[derive(Clone, Debug, PartialEq, Decode, Encode, TypeInfo)] + enum Event { + A(MyType), + } + + // Create fake metadata that knows about our single event, above: + let metadata = metadata::(); + + // Encode our events in the format we expect back from a node, and + // construct an Events object to iterate them: + let events = + events::(metadata, vec![event_record(Phase::Finalization, Event::A(MyType::B))]); + + // Dynamically decode: + let mut event_details = events.iter(); + assert_raw_events_match( + event_details.next().unwrap().unwrap(), + TestRawEventDetails { + index: 0, + phase: Phase::Finalization, + pallet: "Test".to_string(), + pallet_index: 0, + variant: "A".to_string(), + variant_index: 0, + fields: vec![Value::unnamed_variant("B", vec![])], + }, + ); + assert!(event_details.next().is_none()); + } + + #[test] + fn topics() { + #[derive(Clone, Debug, PartialEq, Decode, Encode, TypeInfo, scale_decode::DecodeAsType)] + enum Event { + A(u8, bool, Vec), + } + + // Create fake metadata that knows about our single event, above: + let metadata = metadata::(); + + // Encode our events in the format we expect back from a node, and + // construct an Events object to iterate them: + let event = Event::A(1, true, vec!["Hi".into()]); + let topics = vec![H256::from_low_u64_le(123), H256::from_low_u64_le(456)]; + let events = events::( + metadata, + vec![EventRecord::new(Phase::ApplyExtrinsic(123), event, topics.clone())], + ); + + let ev = events + .iter() + .next() + .expect("one event expected") + .expect("event should be extracted OK"); + + assert_eq!(topics, ev.topics()); + } +} diff --git a/vendor/pezkuwi-subxt/core/src/lib.rs b/vendor/pezkuwi-subxt/core/src/lib.rs new file mode 100644 index 00000000..68555ce7 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/lib.rs @@ -0,0 +1,48 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! # subxt-core +//! +//! A `#[no_std]` compatible subset of the functionality provided in the `subxt` crate. This +//! contains the core logic for encoding and decoding things, but nothing related to networking. +//! +//! Here's an overview of the main things exposed here: +//! +//! - [`blocks`]: decode and explore block bodies. +//! - [`constants`]: access and validate the constant addresses in some metadata. +//! - [`custom_values`]: access and validate the custom value addresses in some metadata. +//! - [`storage`]: construct storage request payloads and decode the results you'd get back. +//! - [`tx`]: construct and sign transactions (extrinsics). +//! - [`runtime_api`]: construct runtime API request payloads and decode the results you'd get back. +//! - [`events`]: decode and explore events. + +#![deny(missing_docs)] +#![cfg_attr(not(feature = "std"), no_std)] +pub extern crate alloc; + +pub mod blocks; +pub mod client; +pub mod config; +pub mod constants; +pub mod custom_values; +pub mod dynamic; +pub mod error; +pub mod events; +pub mod runtime_api; +pub mod storage; +pub mod tx; +pub mod utils; +pub mod view_functions; + +pub use config::Config; +pub use error::Error; +pub use pezkuwi_subxt_metadata::Metadata; + +/// Re-exports of some of the key external crates. +pub mod ext { + pub use codec; + pub use scale_decode; + pub use scale_encode; + pub use scale_value; +} diff --git a/vendor/pezkuwi-subxt/core/src/runtime_api/mod.rs b/vendor/pezkuwi-subxt/core/src/runtime_api/mod.rs new file mode 100644 index 00000000..8605e86e --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/runtime_api/mod.rs @@ -0,0 +1,117 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Encode runtime API payloads, decode the associated values returned from them, and validate +//! static runtime API payloads. +//! +//! # Example +//! +//! ```rust +//! use pezkuwi_subxt_macro::subxt; +//! use pezkuwi_subxt_core::runtime_api; +//! use pezkuwi_subxt_core::Metadata; +//! +//! // If we generate types without `subxt`, we need to point to `::pezkuwi_subxt_core`: +//! #[subxt( +//! crate = "::pezkuwi_subxt_core", +//! runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale", +//! )] +//! pub mod pezkuwi {} +//! +//! // Some metadata we'll use to work with storage entries: +//! let metadata_bytes = include_bytes!("../../../artifacts/pezkuwi_metadata_small.scale"); +//! let metadata = Metadata::decode_from(&metadata_bytes[..]).unwrap(); +//! +//! // Build a storage query to access account information. +//! let payload = pezkuwi::apis().metadata().metadata_versions(); +//! +//! // We can validate that the payload is compatible with the given metadata. +//! runtime_api::validate(&payload, &metadata).unwrap(); +//! +//! // Encode the payload name and arguments to hand to a node: +//! let _call_name = runtime_api::call_name(&payload); +//! let _call_args = runtime_api::call_args(&payload, &metadata).unwrap(); +//! +//! // If we were to obtain a value back from the node, we could +//! // then decode it using the same payload and metadata like so: +//! let value_bytes = hex::decode("080e0000000f000000").unwrap(); +//! let value = runtime_api::decode_value(&mut &*value_bytes, &payload, &metadata).unwrap(); +//! +//! println!("Available metadata versions: {value:?}"); +//! ``` + +pub mod payload; + +use crate::{Metadata, error::RuntimeApiError}; +use alloc::{ + format, + string::{String, ToString}, + vec::Vec, +}; +use payload::Payload; +use scale_decode::IntoVisitor; + +/// Run the validation logic against some runtime API payload you'd like to use. Returns `Ok(())` +/// if the payload is valid (or if it's not possible to check since the payload has no validation +/// hash). Return an error if the payload was not valid or something went wrong trying to validate +/// it (ie the runtime API in question do not exist at all) +pub fn validate(payload: P, metadata: &Metadata) -> Result<(), RuntimeApiError> { + let Some(hash) = payload.validation_hash() else { + return Ok(()); + }; + + let trait_name = payload.trait_name(); + let method_name = payload.method_name(); + + let api_trait = metadata + .runtime_api_trait_by_name(trait_name) + .ok_or_else(|| RuntimeApiError::TraitNotFound(trait_name.to_string()))?; + let api_method = + api_trait + .method_by_name(method_name) + .ok_or_else(|| RuntimeApiError::MethodNotFound { + trait_name: trait_name.to_string(), + method_name: method_name.to_string(), + })?; + + if hash != api_method.hash() { Err(RuntimeApiError::IncompatibleCodegen) } else { Ok(()) } +} + +/// Return the name of the runtime API call from the payload. +pub fn call_name(payload: P) -> String { + format!("{}_{}", payload.trait_name(), payload.method_name()) +} + +/// Return the encoded call args given a runtime API payload. +pub fn call_args(payload: P, metadata: &Metadata) -> Result, RuntimeApiError> { + let value = frame_decode::runtime_apis::encode_runtime_api_inputs( + payload.trait_name(), + payload.method_name(), + payload.args(), + metadata, + metadata.types(), + ) + .map_err(RuntimeApiError::CouldNotEncodeInputs)?; + + Ok(value) +} + +/// Decode the value bytes at the location given by the provided runtime API payload. +pub fn decode_value( + bytes: &mut &[u8], + payload: P, + metadata: &Metadata, +) -> Result { + let value = frame_decode::runtime_apis::decode_runtime_api_response( + payload.trait_name(), + payload.method_name(), + bytes, + metadata, + metadata.types(), + P::ReturnType::into_visitor(), + ) + .map_err(RuntimeApiError::CouldNotDecodeResponse)?; + + Ok(value) +} diff --git a/vendor/pezkuwi-subxt/core/src/runtime_api/payload.rs b/vendor/pezkuwi-subxt/core/src/runtime_api/payload.rs new file mode 100644 index 00000000..f3d914de --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/runtime_api/payload.rs @@ -0,0 +1,158 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module contains the trait and types used to represent +//! runtime API calls that can be made. + +use alloc::{borrow::Cow, string::String}; +use core::marker::PhantomData; +use derive_where::derive_where; +use frame_decode::runtime_apis::IntoEncodableValues; +use scale_decode::DecodeAsType; + +/// This represents a runtime API payload that can be used to call a Runtime API on +/// a chain and decode the response. +pub trait Payload { + /// Type of the arguments. + type ArgsType: IntoEncodableValues; + /// The return type of the function call. + type ReturnType: DecodeAsType; + + /// The runtime API trait name. + fn trait_name(&self) -> &str; + + /// The runtime API method name. + fn method_name(&self) -> &str; + + /// The input arguments. + fn args(&self) -> &Self::ArgsType; + + /// Returns the statically generated validation hash. + fn validation_hash(&self) -> Option<[u8; 32]> { + None + } +} + +// Any reference to a payload is a valid payload. +impl Payload for &'_ P { + type ArgsType = P::ArgsType; + type ReturnType = P::ReturnType; + + fn trait_name(&self) -> &str { + P::trait_name(*self) + } + + fn method_name(&self) -> &str { + P::method_name(*self) + } + + fn args(&self) -> &Self::ArgsType { + P::args(*self) + } + + fn validation_hash(&self) -> Option<[u8; 32]> { + P::validation_hash(*self) + } +} + +/// A runtime API payload containing the generic argument data +/// and interpreting the result of the call as `ReturnTy`. +/// +/// This can be created from static values (ie those generated +/// via the `subxt` macro) or dynamic values via [`dynamic`]. +#[derive_where(Clone, Debug, Eq, Ord, PartialEq, PartialOrd; ArgsType)] +pub struct StaticPayload { + trait_name: Cow<'static, str>, + method_name: Cow<'static, str>, + args: ArgsType, + validation_hash: Option<[u8; 32]>, + _marker: PhantomData, +} + +/// A dynamic runtime API payload. +pub type DynamicPayload = StaticPayload; + +impl Payload + for StaticPayload +{ + type ArgsType = ArgsType; + type ReturnType = ReturnType; + + fn trait_name(&self) -> &str { + &self.trait_name + } + + fn method_name(&self) -> &str { + &self.method_name + } + + fn args(&self) -> &Self::ArgsType { + &self.args + } + + fn validation_hash(&self) -> Option<[u8; 32]> { + self.validation_hash + } +} + +impl StaticPayload { + /// Create a new [`StaticPayload`]. + pub fn new( + trait_name: impl Into, + method_name: impl Into, + args: ArgsType, + ) -> Self { + StaticPayload { + trait_name: trait_name.into().into(), + method_name: method_name.into().into(), + args, + validation_hash: None, + _marker: PhantomData, + } + } + + /// Create a new static [`StaticPayload`] using static function name + /// and scale-encoded argument data. + /// + /// This is only expected to be used from codegen. + #[doc(hidden)] + pub fn new_static( + trait_name: &'static str, + method_name: &'static str, + args: ArgsType, + hash: [u8; 32], + ) -> StaticPayload { + StaticPayload { + trait_name: Cow::Borrowed(trait_name), + method_name: Cow::Borrowed(method_name), + args, + validation_hash: Some(hash), + _marker: core::marker::PhantomData, + } + } + + /// Do not validate this call prior to submitting it. + pub fn unvalidated(self) -> Self { + Self { validation_hash: None, ..self } + } + + /// Returns the trait name. + pub fn trait_name(&self) -> &str { + &self.trait_name + } + + /// Returns the method name. + pub fn method_name(&self) -> &str { + &self.method_name + } +} + +/// Create a new [`DynamicPayload`]. +pub fn dynamic( + trait_name: impl Into, + method_name: impl Into, + args_data: ArgsType, +) -> DynamicPayload { + DynamicPayload::new(trait_name, method_name, args_data) +} diff --git a/vendor/pezkuwi-subxt/core/src/storage/address.rs b/vendor/pezkuwi-subxt/core/src/storage/address.rs new file mode 100644 index 00000000..cab64445 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/storage/address.rs @@ -0,0 +1,169 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Construct addresses to access storage entries with. + +use crate::utils::{Maybe, YesMaybe}; +use alloc::{borrow::Cow, string::String, vec::Vec}; +use frame_decode::storage::{IntoDecodableValues, IntoEncodableValues}; +use scale_decode::DecodeAsType; + +/// A storage address. This allows access to a given storage entry, which can then +/// be iterated over or fetched from by providing the relevant set of keys, or +/// otherwise inspected. +pub trait Address { + /// All of the keys required to get to an individual value at this address. + /// Keys must always impl [`IntoEncodableValues`], and for iteration must + /// also impl [`frame_decode::storage::IntoDecodableValues`]. + type KeyParts: IntoEncodableValues + IntoDecodableValues; + /// Type of the storage value at this location. + type Value: DecodeAsType; + /// Does the address point to a plain value (as opposed to a map)? + /// Set to [`crate::utils::Yes`] to enable APIs which require a map, + /// or [`crate::utils::Maybe`] to enable APIs which allow a map. + type IsPlain: YesMaybe; + + /// The pallet containing this storage entry. + fn pallet_name(&self) -> &str; + + /// The name of the storage entry. + fn entry_name(&self) -> &str; + + /// Return a unique hash for this address which can be used to validate it against metadata. + fn validation_hash(&self) -> Option<[u8; 32]>; +} + +// Any reference to an address is a valid address. +impl Address for &'_ A { + type KeyParts = A::KeyParts; + type Value = A::Value; + type IsPlain = A::IsPlain; + + fn pallet_name(&self) -> &str { + A::pallet_name(*self) + } + + fn entry_name(&self) -> &str { + A::entry_name(*self) + } + + fn validation_hash(&self) -> Option<[u8; 32]> { + A::validation_hash(*self) + } +} + +/// An address which is generated by the static APIs. +pub struct StaticAddress { + pallet_name: Cow<'static, str>, + entry_name: Cow<'static, str>, + validation_hash: Option<[u8; 32]>, + marker: core::marker::PhantomData<(KeyParts, Value, IsPlain)>, +} + +impl Clone for StaticAddress { + fn clone(&self) -> Self { + Self { + pallet_name: self.pallet_name.clone(), + entry_name: self.entry_name.clone(), + validation_hash: self.validation_hash, + marker: self.marker, + } + } +} + +impl core::fmt::Debug for StaticAddress { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("StaticAddress") + .field("pallet_name", &self.pallet_name) + .field("entry_name", &self.entry_name) + .field("validation_hash", &self.validation_hash) + .finish() + } +} + +impl StaticAddress { + /// Create a new [`StaticAddress`] using static strings for the pallet and call name. + /// This is only expected to be used from codegen. + #[doc(hidden)] + pub fn new_static(pallet_name: &'static str, entry_name: &'static str, hash: [u8; 32]) -> Self { + Self { + pallet_name: Cow::Borrowed(pallet_name), + entry_name: Cow::Borrowed(entry_name), + validation_hash: Some(hash), + marker: core::marker::PhantomData, + } + } + + /// Create a new address. + pub fn new(pallet_name: impl Into, entry_name: impl Into) -> Self { + Self { + pallet_name: pallet_name.into().into(), + entry_name: entry_name.into().into(), + validation_hash: None, + marker: core::marker::PhantomData, + } + } + + /// Do not validate this storage entry prior to accessing it. + pub fn unvalidated(mut self) -> Self { + self.validation_hash = None; + self + } +} + +impl Address for StaticAddress +where + KeyParts: IntoEncodableValues + IntoDecodableValues, + Value: DecodeAsType, + IsPlain: YesMaybe, +{ + type KeyParts = KeyParts; + type Value = Value; + type IsPlain = IsPlain; + + fn pallet_name(&self) -> &str { + &self.pallet_name + } + + fn entry_name(&self) -> &str { + &self.entry_name + } + + fn validation_hash(&self) -> Option<[u8; 32]> { + self.validation_hash + } +} + +impl, B: AsRef> Address for (A, B) { + type KeyParts = Vec; + type Value = scale_value::Value; + type IsPlain = Maybe; + + fn pallet_name(&self) -> &str { + self.0.as_ref() + } + + fn entry_name(&self) -> &str { + self.1.as_ref() + } + + fn validation_hash(&self) -> Option<[u8; 32]> { + None + } +} + +/// A dynamic address is simply a [`StaticAddress`] which asserts that the +/// entry *might* be a map and *might* have a default value. +pub type DynamicAddress, Value = scale_value::Value> = + StaticAddress; + +/// Construct a new dynamic storage address. You can define the type of the +/// storage keys and value yourself here, but have no guarantee that they will +/// be correct. +pub fn dynamic( + pallet_name: impl Into, + entry_name: impl Into, +) -> DynamicAddress { + DynamicAddress::::new(pallet_name.into(), entry_name.into()) +} diff --git a/vendor/pezkuwi-subxt/core/src/storage/mod.rs b/vendor/pezkuwi-subxt/core/src/storage/mod.rs new file mode 100644 index 00000000..3271f21d --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/storage/mod.rs @@ -0,0 +1,90 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Encode storage keys, decode storage values, and validate static storage addresses. +//! +//! # Example +//! +//! ```rust +//! use pezkuwi_subxt_signer::sr25519::dev; +//! use pezkuwi_subxt_macro::subxt; +//! use pezkuwi_subxt_core::storage; +//! use pezkuwi_subxt_core::Metadata; +//! +//! // If we generate types without `subxt`, we need to point to `::pezkuwi_subxt_core`: +//! #[subxt( +//! crate = "::pezkuwi_subxt_core", +//! runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale", +//! )] +//! pub mod pezkuwi {} +//! +//! // Some metadata we'll use to work with storage entries: +//! let metadata_bytes = include_bytes!("../../../artifacts/pezkuwi_metadata_small.scale"); +//! let metadata = Metadata::decode_from(&metadata_bytes[..]).unwrap(); +//! +//! // Build a storage query to access account information. +//! let address = pezkuwi::storage().system().account(); +//! +//! // We can validate that the address is compatible with the given metadata. +//! storage::validate(&address, &metadata).unwrap(); +//! +//! // We can fetch details about the storage entry associated with this address: +//! let entry = storage::entry(address, &metadata).unwrap(); +//! +//! // .. including generating a key to fetch the entry with: +//! let fetch_key = entry.fetch_key((dev::alice().public_key().into(),)).unwrap(); +//! +//! // .. or generating a key to iterate over entries with at a given depth: +//! let iter_key = entry.iter_key(()).unwrap(); +//! +//! // Given a value, we can decode it: +//! let value_bytes = hex::decode("00000000000000000100000000000000000064a7b3b6e00d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080").unwrap(); +//! let value = entry.value(value_bytes).decode().unwrap(); +//! +//! println!("Alice's account info: {value:?}"); +//! ``` + +mod prefix_of; +mod storage_entry; +mod storage_key; +mod storage_key_value; +mod storage_value; + +pub mod address; + +use crate::{Metadata, error::StorageError}; +use address::Address; +use alloc::string::ToString; + +pub use prefix_of::{EqualOrPrefixOf, PrefixOf}; +pub use storage_entry::{StorageEntry, entry}; +pub use storage_key::{StorageHasher, StorageKey, StorageKeyPart}; +pub use storage_key_value::StorageKeyValue; +pub use storage_value::StorageValue; + +/// When the provided `address` is statically generated via the `#[subxt]` macro, this validates +/// that the shape of the storage value is the same as the shape expected by the static address. +/// +/// When the provided `address` is dynamic (and thus does not come with any expectation of the +/// shape of the constant value), this just returns `Ok(())` +pub fn validate(address: Addr, metadata: &Metadata) -> Result<(), StorageError> { + let Some(hash) = address.validation_hash() else { + return Ok(()); + }; + + let pallet_name = address.pallet_name(); + let entry_name = address.entry_name(); + + let pallet_metadata = metadata + .pallet_by_name(pallet_name) + .ok_or_else(|| StorageError::PalletNameNotFound(pallet_name.to_string()))?; + let storage_hash = pallet_metadata.storage_hash(entry_name).ok_or_else(|| { + StorageError::StorageEntryNotFound { + pallet_name: pallet_name.to_string(), + entry_name: entry_name.to_string(), + } + })?; + + if storage_hash != hash { Err(StorageError::IncompatibleCodegen) } else { Ok(()) } +} diff --git a/vendor/pezkuwi-subxt/core/src/storage/prefix_of.rs b/vendor/pezkuwi-subxt/core/src/storage/prefix_of.rs new file mode 100644 index 00000000..4e04e220 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/storage/prefix_of.rs @@ -0,0 +1,195 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use alloc::vec::Vec; +use frame_decode::helpers::IntoEncodableValues; +use scale_encode::EncodeAsType; + +/// For a given set of values that can be used as keys for a storage entry, +/// this is implemented for any prefixes of that set. ie if the keys `(A,B,C)` +/// would access a storage value, then `PrefixOf<(A,B,C)>` is implemented for +/// `(A,B)`, `(A,)` and `()`. +pub trait PrefixOf: IntoEncodableValues {} + +// If T impls PrefixOf, &T impls PrefixOf. +impl> PrefixOf for &T {} + +// Impls for tuples up to length 6 (storage maps rarely require more than 2 entries +// so it's very unlikely we'll ever need to go this deep). +impl PrefixOf<(A,)> for () {} + +impl PrefixOf<(A, B)> for () {} +impl PrefixOf<(A, B)> for (A,) where (A,): IntoEncodableValues {} + +impl PrefixOf<(A, B, C)> for () {} +impl PrefixOf<(A, B, C)> for (A,) where (A,): IntoEncodableValues {} +impl PrefixOf<(A, B, C)> for (A, B) where (A, B): IntoEncodableValues {} + +impl PrefixOf<(A, B, C, D)> for () {} +impl PrefixOf<(A, B, C, D)> for (A,) where (A,): IntoEncodableValues {} +impl PrefixOf<(A, B, C, D)> for (A, B) where (A, B): IntoEncodableValues {} +impl PrefixOf<(A, B, C, D)> for (A, B, C) where (A, B, C): IntoEncodableValues {} + +impl PrefixOf<(A, B, C, D, E)> for () {} +impl PrefixOf<(A, B, C, D, E)> for (A,) where (A,): IntoEncodableValues {} +impl PrefixOf<(A, B, C, D, E)> for (A, B) where (A, B): IntoEncodableValues {} +impl PrefixOf<(A, B, C, D, E)> for (A, B, C) where (A, B, C): IntoEncodableValues {} +impl PrefixOf<(A, B, C, D, E)> for (A, B, C, D) where + (A, B, C, D): IntoEncodableValues +{ +} + +impl PrefixOf<(A, B, C, D, E, F)> for () {} +impl PrefixOf<(A, B, C, D, E, F)> for (A,) where (A,): IntoEncodableValues {} +impl PrefixOf<(A, B, C, D, E, F)> for (A, B) where (A, B): IntoEncodableValues {} +impl PrefixOf<(A, B, C, D, E, F)> for (A, B, C) where + (A, B, C): IntoEncodableValues +{ +} +impl PrefixOf<(A, B, C, D, E, F)> for (A, B, C, D) where + (A, B, C, D): IntoEncodableValues +{ +} +impl PrefixOf<(A, B, C, D, E, F)> for (A, B, C, D, E) where + (A, B, C, D, E): IntoEncodableValues +{ +} + +// Vecs are prefixes of vecs. The length is not statically known and so +// these would be given dynamically only, leaving the correct length to the user. +impl PrefixOf> for Vec {} + +// We don't use arrays in Subxt for storage entry access, but `IntoEncodableValues` +// supports them so let's allow impls which do use them to benefit too. +macro_rules! array_impl { + ($n:literal: $($p:literal)+) => { + $( + impl PrefixOf<[T; $n]> for [T; $p] {} + )+ + } +} + +array_impl!(1: 0); +array_impl!(2: 1 0); +array_impl!(3: 2 1 0); +array_impl!(4: 3 2 1 0); +array_impl!(5: 4 3 2 1 0); +array_impl!(6: 5 4 3 2 1 0); + +/// This is much like [`PrefixOf`] except that it also includes `Self` as an allowed type, +/// where `Self` must impl [`IntoEncodableValues`] just as every [`PrefixOf`] does. +pub trait EqualOrPrefixOf: IntoEncodableValues {} + +// Tuples +macro_rules! tuple_impl_eq { + ($($t:ident)+) => { + // Any T that is a PrefixOf impls EqualOrPrefixOf too + impl <$($t,)+ T: PrefixOf<($($t,)+)>> EqualOrPrefixOf<($($t,)+)> for T {} + // Keys impls EqualOrPrefixOf + impl <$($t),+> EqualOrPrefixOf<($($t,)+)> for ($($t,)+) where ($($t,)+): IntoEncodableValues {} + // &'a Keys impls EqualOrPrefixOf + impl <'a, $($t),+> EqualOrPrefixOf<($($t,)+)> for &'a ($($t,)+) where ($($t,)+): IntoEncodableValues {} + } +} + +tuple_impl_eq!(A); +tuple_impl_eq!(A B); +tuple_impl_eq!(A B C); +tuple_impl_eq!(A B C D); +tuple_impl_eq!(A B C D E); +tuple_impl_eq!(A B C D E F); + +// Vec +impl EqualOrPrefixOf> for Vec {} +impl EqualOrPrefixOf> for &Vec {} + +// Arrays +macro_rules! array_impl_eq { + ($($n:literal)+) => { + $( + impl EqualOrPrefixOf<[A; $n]> for [A; $n] {} + impl <'a, A: EncodeAsType> EqualOrPrefixOf<[A; $n]> for &'a [A; $n] {} + )+ + } +} + +impl EqualOrPrefixOf<[A; N]> for T where T: PrefixOf<[A; N]> {} +array_impl_eq!(1 2 3 4 5 6); + +#[cfg(test)] +mod test { + use super::*; + + struct Test(core::marker::PhantomData); + + impl Test { + fn new() -> Self { + Test(core::marker::PhantomData) + } + fn accepts_prefix_of>(&self, keys: P) { + let _encoder = keys.into_encodable_values(); + } + fn accepts_eq_or_prefix_of>(&self, keys: P) { + let _encoder = keys.into_encodable_values(); + } + } + + #[test] + fn test_prefix_of() { + // In real life we'd have a struct a bit like this: + let t = Test::<(bool, String, u64)>::new(); + + // And we'd want to be able to call some method like this: + //// This shouldn't work: + // t.accepts_prefix_of((true, String::from("hi"), 0)); + t.accepts_prefix_of(&(true, String::from("hi"))); + t.accepts_prefix_of((true, String::from("hi"))); + t.accepts_prefix_of((true,)); + t.accepts_prefix_of(()); + + let t = Test::<[u64; 5]>::new(); + + //// This shouldn't work: + // t.accepts_prefix_of([0,1,2,3,4]); + t.accepts_prefix_of([0, 1, 2, 3]); + t.accepts_prefix_of([0, 1, 2, 3]); + t.accepts_prefix_of([0, 1, 2]); + t.accepts_prefix_of([0, 1]); + t.accepts_prefix_of([0]); + t.accepts_prefix_of([]); + } + + #[test] + fn test_eq_or_prefix_of() { + // In real life we'd have a struct a bit like this: + let t = Test::<(bool, String, u64)>::new(); + + // And we'd want to be able to call some method like this: + t.accepts_eq_or_prefix_of(&(true, String::from("hi"), 0)); + t.accepts_eq_or_prefix_of(&(true, String::from("hi"))); + t.accepts_eq_or_prefix_of((true,)); + t.accepts_eq_or_prefix_of(()); + + t.accepts_eq_or_prefix_of((true, String::from("hi"), 0)); + t.accepts_eq_or_prefix_of((true, String::from("hi"))); + t.accepts_eq_or_prefix_of((true,)); + t.accepts_eq_or_prefix_of(()); + + let t = Test::<[u64; 5]>::new(); + + t.accepts_eq_or_prefix_of([0, 1, 2, 3, 4]); + t.accepts_eq_or_prefix_of([0, 1, 2, 3]); + t.accepts_eq_or_prefix_of([0, 1, 2]); + t.accepts_eq_or_prefix_of([0, 1]); + t.accepts_eq_or_prefix_of([0]); + t.accepts_eq_or_prefix_of([]); + + t.accepts_eq_or_prefix_of([0, 1, 2, 3, 4]); + t.accepts_eq_or_prefix_of([0, 1, 2, 3]); + t.accepts_eq_or_prefix_of([0, 1, 2]); + t.accepts_eq_or_prefix_of([0, 1]); + t.accepts_eq_or_prefix_of([0]); + t.accepts_eq_or_prefix_of([]); + } +} diff --git a/vendor/pezkuwi-subxt/core/src/storage/storage_entry.rs b/vendor/pezkuwi-subxt/core/src/storage/storage_entry.rs new file mode 100644 index 00000000..443499d9 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/storage/storage_entry.rs @@ -0,0 +1,144 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::{PrefixOf, StorageKeyValue, StorageValue, address::Address}; +use crate::{error::StorageError, utils::YesMaybe}; +use alloc::{sync::Arc, vec::Vec}; +use frame_decode::storage::{IntoEncodableValues, StorageInfo}; +use pezkuwi_subxt_metadata::Metadata; +use scale_info::PortableRegistry; + +/// Create a [`StorageEntry`] to work with a given storage entry. +pub fn entry<'info, Addr: Address>( + address: Addr, + metadata: &'info Metadata, +) -> Result, StorageError> { + super::validate(&address, metadata)?; + + use frame_decode::storage::StorageTypeInfo; + let types = metadata.types(); + let info = metadata + .storage_info(address.pallet_name(), address.entry_name()) + .map_err(|e| StorageError::StorageInfoError(e.into_owned()))?; + + Ok(StorageEntry(Arc::new(StorageEntryInner { address, info: Arc::new(info), types }))) +} + +/// This represents a single storage entry (be it a plain value or map). +pub struct StorageEntry<'info, Addr>(Arc>); + +impl<'info, Addr> Clone for StorageEntry<'info, Addr> { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +struct StorageEntryInner<'info, Addr> { + address: Addr, + info: Arc>, + types: &'info PortableRegistry, +} + +impl<'info, Addr: Address> StorageEntry<'info, Addr> { + /// Name of the pallet containing this storage entry. + pub fn pallet_name(&self) -> &str { + self.0.address.pallet_name() + } + + /// Name of the storage entry. + pub fn entry_name(&self) -> &str { + self.0.address.entry_name() + } + + /// Is the storage entry a plain value? + pub fn is_plain(&self) -> bool { + self.0.info.keys.is_empty() + } + + /// Is the storage entry a map? + pub fn is_map(&self) -> bool { + !self.is_plain() + } + + /// Instantiate a [`StorageKeyValue`] for this entry. + /// + /// It is expected that the bytes are obtained by iterating key/value pairs at this address. + pub fn key_value( + &self, + key_bytes: impl Into>, + value_bytes: Vec, + ) -> StorageKeyValue<'info, Addr> { + StorageKeyValue::new(self.0.info.clone(), self.0.types, key_bytes.into(), value_bytes) + } + + /// Instantiate a [`StorageValue`] for this entry. + /// + /// It is expected that the bytes are obtained by fetching a value at this address. + pub fn value(&self, bytes: Vec) -> StorageValue<'info, Addr::Value> { + StorageValue::new(self.0.info.clone(), self.0.types, bytes) + } + + /// Return the default [`StorageValue`] for this storage entry, if there is one. + pub fn default_value(&self) -> Option> { + self.0.info.default_value.as_deref().map(|default_bytes| { + StorageValue::new(self.0.info.clone(), self.0.types, default_bytes.to_vec()) + }) + } + + /// The keys for plain storage values are always 32 byte hashes. + pub fn key_prefix(&self) -> [u8; 32] { + frame_decode::storage::encode_storage_key_prefix( + self.0.address.pallet_name(), + self.0.address.entry_name(), + ) + } + + // This has a less "strict" type signature and so is just used under the hood. + fn key(&self, key_parts: Keys) -> Result, StorageError> { + let key = frame_decode::storage::encode_storage_key_with_info( + self.0.address.pallet_name(), + self.0.address.entry_name(), + key_parts, + &self.0.info, + self.0.types, + ) + .map_err(StorageError::StorageKeyEncodeError)?; + + Ok(key) + } + + /// This constructs a key suitable for fetching a value at the given map storage address. This + /// will error if we can see that the wrong number of key parts are provided. + pub fn fetch_key(&self, key_parts: Addr::KeyParts) -> Result, StorageError> { + if key_parts.num_encodable_values() != self.0.info.keys.len() { + Err(StorageError::WrongNumberOfKeyPartsProvidedForFetching { + expected: self.0.info.keys.len(), + got: key_parts.num_encodable_values(), + }) + } else { + self.key(key_parts) + } + } + + /// This constructs a key suitable for iterating at the given storage address. This will error + /// if we can see that too many key parts are provided. + pub fn iter_key>( + &self, + key_parts: Keys, + ) -> Result, StorageError> { + if Addr::IsPlain::is_yes() { + Err(StorageError::CannotIterPlainEntry { + pallet_name: self.0.address.pallet_name().into(), + entry_name: self.0.address.entry_name().into(), + }) + } else if key_parts.num_encodable_values() >= self.0.info.keys.len() { + Err(StorageError::WrongNumberOfKeyPartsProvidedForIterating { + max_expected: self.0.info.keys.len() - 1, + got: key_parts.num_encodable_values(), + }) + } else { + self.key(key_parts) + } + } +} diff --git a/vendor/pezkuwi-subxt/core/src/storage/storage_key.rs b/vendor/pezkuwi-subxt/core/src/storage/storage_key.rs new file mode 100644 index 00000000..4de62917 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/storage/storage_key.rs @@ -0,0 +1,123 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::error::StorageKeyError; +use alloc::sync::Arc; +use core::marker::PhantomData; +use frame_decode::storage::{IntoDecodableValues, StorageInfo, StorageKey as StorageKeyPartInfo}; +use scale_info::PortableRegistry; + +pub use frame_decode::storage::StorageHasher; + +/// This represents the different parts of a storage key. +pub struct StorageKey<'info, KeyParts> { + info: Arc>, + types: &'info PortableRegistry, + bytes: Arc<[u8]>, + marker: PhantomData, +} + +impl<'info, KeyParts: IntoDecodableValues> StorageKey<'info, KeyParts> { + pub(crate) fn new( + info: &StorageInfo<'info, u32>, + types: &'info PortableRegistry, + bytes: Arc<[u8]>, + ) -> Result { + let cursor = &mut &*bytes; + let storage_key_info = frame_decode::storage::decode_storage_key_with_info( + cursor, info, types, + ) + .map_err(|e| StorageKeyError::StorageKeyDecodeError { bytes: bytes.to_vec(), error: e })?; + + if !cursor.is_empty() { + return Err(StorageKeyError::LeftoverBytes { bytes: cursor.to_vec() }); + } + + Ok(StorageKey { info: Arc::new(storage_key_info), types, bytes, marker: PhantomData }) + } + + /// Attempt to decode the values contained within this storage key. The target type is + /// given by the storage address used to access this entry. To decode into a custom type, + /// use [`Self::parts()`] or [`Self::part()`] and decode each part. + pub fn decode(&self) -> Result { + let values = + frame_decode::storage::decode_storage_key_values(&self.bytes, &self.info, self.types) + .map_err(StorageKeyError::CannotDecodeValuesInKey)?; + + Ok(values) + } + + /// Iterate over the parts of this storage key. Each part of a storage key corresponds to a + /// single value that has been hashed. + pub fn parts(&self) -> impl ExactSizeIterator> { + let parts_len = self.info.parts().len(); + (0..parts_len).map(move |index| StorageKeyPart { + index, + info: self.info.clone(), + types: self.types, + bytes: self.bytes.clone(), + }) + } + + /// Return the part of the storage key at the provided index, or `None` if the index is out of + /// bounds. + pub fn part(&self, index: usize) -> Option> { + if index < self.parts().len() { + Some(StorageKeyPart { + index, + info: self.info.clone(), + types: self.types, + bytes: self.bytes.clone(), + }) + } else { + None + } + } +} + +/// This represents a part of a storage key. +pub struct StorageKeyPart<'info> { + index: usize, + info: Arc>, + types: &'info PortableRegistry, + bytes: Arc<[u8]>, +} + +impl<'info> StorageKeyPart<'info> { + /// Get the raw bytes for this part of the storage key. + pub fn bytes(&self) -> &[u8] { + let part = &self.info[self.index]; + let hash_range = part.hash_range(); + let value_range = part + .value() + .map(|v| v.range()) + .unwrap_or(core::ops::Range { start: hash_range.end, end: hash_range.end }); + let combined_range = core::ops::Range { start: hash_range.start, end: value_range.end }; + &self.bytes[combined_range] + } + + /// Get the hasher that was used to construct this part of the storage key. + pub fn hasher(&self) -> StorageHasher { + self.info[self.index].hasher() + } + + /// For keys that were produced using "concat" or "identity" hashers, the value + /// is available as a part of the key hash, allowing us to decode it into anything + /// implementing [`scale_decode::DecodeAsType`]. If the key was produced using a + /// different hasher, this will return `None`. + pub fn decode_as(&self) -> Result, StorageKeyError> { + let part_info = &self.info[self.index]; + let Some(value_info) = part_info.value() else { + return Ok(None); + }; + + let value_bytes = &self.bytes[value_info.range()]; + let value_ty = *value_info.ty(); + + let decoded_key_part = T::decode_as_type(&mut &*value_bytes, value_ty, self.types) + .map_err(|e| StorageKeyError::CannotDecodeValueInKey { index: self.index, error: e })?; + + Ok(Some(decoded_key_part)) + } +} diff --git a/vendor/pezkuwi-subxt/core/src/storage/storage_key_value.rs b/vendor/pezkuwi-subxt/core/src/storage/storage_key_value.rs new file mode 100644 index 00000000..268b6beb --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/storage/storage_key_value.rs @@ -0,0 +1,45 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::{Address, StorageKey, StorageValue}; +use crate::error::StorageKeyError; +use alloc::{sync::Arc, vec::Vec}; +use frame_decode::storage::StorageInfo; +use scale_info::PortableRegistry; + +/// This represents a storage key/value pair, which is typically returned from +/// iterating over values in some storage map. +#[derive(Debug)] +pub struct StorageKeyValue<'info, Addr: Address> { + key: Arc<[u8]>, + // This contains the storage information already: + value: StorageValue<'info, Addr::Value>, +} + +impl<'info, Addr: Address> StorageKeyValue<'info, Addr> { + pub(crate) fn new( + info: Arc>, + types: &'info PortableRegistry, + key_bytes: Arc<[u8]>, + value_bytes: Vec, + ) -> Self { + StorageKeyValue { key: key_bytes, value: StorageValue::new(info, types, value_bytes) } + } + + /// Get the raw bytes for this storage entry's key. + pub fn key_bytes(&self) -> &[u8] { + &self.key + } + + /// Decode the key for this storage entry. This gives back a type from which we can + /// decode specific parts of the key hash (where applicable). + pub fn key(&'_ self) -> Result, StorageKeyError> { + StorageKey::new(&self.value.info, self.value.types, self.key.clone()) + } + + /// Return the storage value. + pub fn value(&self) -> &StorageValue<'info, Addr::Value> { + &self.value + } +} diff --git a/vendor/pezkuwi-subxt/core/src/storage/storage_value.rs b/vendor/pezkuwi-subxt/core/src/storage/storage_value.rs new file mode 100644 index 00000000..298fd88a --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/storage/storage_value.rs @@ -0,0 +1,63 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::error::StorageValueError; +use alloc::{sync::Arc, vec::Vec}; +use core::marker::PhantomData; +use frame_decode::storage::StorageInfo; +use scale_decode::DecodeAsType; +use scale_info::PortableRegistry; + +/// This represents a storage value. +#[derive(Debug)] +pub struct StorageValue<'info, Value> { + pub(crate) info: Arc>, + pub(crate) types: &'info PortableRegistry, + bytes: Vec, + marker: PhantomData, +} + +impl<'info, Value: DecodeAsType> StorageValue<'info, Value> { + pub(crate) fn new( + info: Arc>, + types: &'info PortableRegistry, + bytes: Vec, + ) -> StorageValue<'info, Value> { + StorageValue { info, types, bytes, marker: PhantomData } + } + + /// Get the raw bytes for this storage value. + pub fn bytes(&self) -> &[u8] { + &self.bytes + } + + /// Consume this storage value and return the raw bytes. + pub fn into_bytes(self) -> Vec { + self.bytes.to_vec() + } + + /// Decode this storage value into the provided response type. + pub fn decode(&self) -> Result { + self.decode_as::() + } + + /// Decode this storage value into an arbitrary type. + pub fn decode_as(&self) -> Result { + let cursor = &mut &*self.bytes; + + let value = frame_decode::storage::decode_storage_value_with_info( + cursor, + &self.info, + self.types, + T::into_visitor(), + ) + .map_err(StorageValueError::CannotDecode)?; + + if !cursor.is_empty() { + return Err(StorageValueError::LeftoverBytes { bytes: cursor.to_vec() }); + } + + Ok(value) + } +} diff --git a/vendor/pezkuwi-subxt/core/src/tx/mod.rs b/vendor/pezkuwi-subxt/core/src/tx/mod.rs new file mode 100644 index 00000000..31cc6bcc --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/tx/mod.rs @@ -0,0 +1,437 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Construct and sign transactions. +//! +//! # Example +//! +//! ```rust +//! use pezkuwi_subxt_signer::sr25519::dev; +//! use pezkuwi_subxt_macro::subxt; +//! use pezkuwi_subxt_core::config::{PezkuwiConfig, HashFor}; +//! use pezkuwi_subxt_core::config::DefaultExtrinsicParamsBuilder as Params; +//! use pezkuwi_subxt_core::tx; +//! use pezkuwi_subxt_core::utils::H256; +//! use pezkuwi_subxt_core::Metadata; +//! +//! // If we generate types without `subxt`, we need to point to `::pezkuwi_subxt_core`: +//! #[subxt( +//! crate = "::pezkuwi_subxt_core", +//! runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale", +//! )] +//! pub mod pezkuwi {} +//! +//! // Gather some other information about the chain that we'll need to construct valid extrinsics: +//! let state = tx::ClientState:: { +//! metadata: { +//! let metadata_bytes = include_bytes!("../../../artifacts/pezkuwi_metadata_small.scale"); +//! Metadata::decode_from(&metadata_bytes[..]).unwrap() +//! }, +//! genesis_hash: { +//! let h = "91b171bb158e2d3848fa23a9f1c25182fb8e20313b2c1eb49219da7a70ce90c3"; +//! let bytes = hex::decode(h).unwrap(); +//! H256::from_slice(&bytes) +//! }, +//! runtime_version: tx::RuntimeVersion { +//! spec_version: 9370, +//! transaction_version: 20, +//! } +//! }; +//! +//! // Now we can build a balance transfer extrinsic. +//! let dest = dev::bob().public_key().into(); +//! let call = pezkuwi::tx().balances().transfer_allow_death(dest, 10_000); +//! let params = Params::new().tip(1_000).nonce(0).build(); +//! +//! // We can validate that this lines up with the given metadata: +//! tx::validate(&call, &state.metadata).unwrap(); +//! +//! // We can build a signed transaction: +//! let signed_call = tx::create_v4_signed(&call, &state, params) +//! .unwrap() +//! .sign(&dev::alice()); +//! +//! // And log it: +//! println!("Tx: 0x{}", hex::encode(signed_call.encoded())); +//! ``` + +pub mod payload; +pub mod signer; + +use crate::{ + Metadata, + config::{Config, ExtrinsicParams, ExtrinsicParamsEncoder, HashFor, Hasher}, + error::ExtrinsicError, + utils::Encoded, +}; +use alloc::{borrow::Cow, string::ToString, vec::Vec}; +use codec::{Compact, Encode}; +use payload::Payload; +use pezsp_crypto_hashing::blake2_256; +use signer::Signer as SignerT; + +// Expose these here since we expect them in some calls below. +pub use crate::client::{ClientState, RuntimeVersion}; + +/// Run the validation logic against some extrinsic you'd like to submit. Returns `Ok(())` +/// if the call is valid (or if it's not possible to check since the call has no validation hash). +/// Return an error if the call was not valid or something went wrong trying to validate it (ie +/// the pallet or call in question do not exist at all). +pub fn validate(call: &Call, metadata: &Metadata) -> Result<(), ExtrinsicError> { + let Some(details) = call.validation_details() else { + return Ok(()); + }; + + let pallet_name = details.pallet_name; + let call_name = details.call_name; + + let expected_hash = metadata + .pallet_by_name(pallet_name) + .ok_or_else(|| ExtrinsicError::PalletNameNotFound(pallet_name.to_string()))? + .call_hash(call_name) + .ok_or_else(|| ExtrinsicError::CallNameNotFound { + pallet_name: pallet_name.to_string(), + call_name: call_name.to_string(), + })?; + + if details.hash != expected_hash { Err(ExtrinsicError::IncompatibleCodegen) } else { Ok(()) } +} + +/// Returns the suggested transaction versions to build for a given chain, or an error +/// if Subxt doesn't support any version expected by the chain. +/// +/// If the result is [`TransactionVersion::V4`], use the `v4` methods in this module. If it's +/// [`TransactionVersion::V5`], use the `v5` ones. +pub fn suggested_version(metadata: &Metadata) -> Result { + let versions = metadata.extrinsic().supported_versions(); + + if versions.contains(&4) { + Ok(TransactionVersion::V4) + } else if versions.contains(&5) { + Ok(TransactionVersion::V5) + } else { + Err(ExtrinsicError::UnsupportedVersion) + } +} + +/// The transaction versions supported by Subxt. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub enum TransactionVersion { + /// v4 transactions (signed and unsigned transactions) + V4, + /// v5 transactions (bare and general transactions) + V5, +} + +/// Return the SCALE encoded bytes representing the call data of the transaction. +pub fn call_data( + call: &Call, + metadata: &Metadata, +) -> Result, ExtrinsicError> { + let mut bytes = Vec::new(); + call.encode_call_data_to(metadata, &mut bytes)?; + Ok(bytes) +} + +/// Creates a V4 "unsigned" transaction without submitting it. +pub fn create_v4_unsigned( + call: &Call, + metadata: &Metadata, +) -> Result, ExtrinsicError> { + create_unsigned_at_version(call, 4, metadata) +} + +/// Creates a V5 "bare" transaction without submitting it. +pub fn create_v5_bare( + call: &Call, + metadata: &Metadata, +) -> Result, ExtrinsicError> { + create_unsigned_at_version(call, 5, metadata) +} + +// Create a V4 "unsigned" transaction or V5 "bare" transaction. +fn create_unsigned_at_version( + call: &Call, + tx_version: u8, + metadata: &Metadata, +) -> Result, ExtrinsicError> { + // 1. Validate this call against the current node metadata if the call comes + // with a hash allowing us to do so. + validate(call, metadata)?; + + // 2. Encode extrinsic + let extrinsic = { + let mut encoded_inner = Vec::new(); + // encode the transaction version first. + tx_version.encode_to(&mut encoded_inner); + // encode call data after this byte. + call.encode_call_data_to(metadata, &mut encoded_inner)?; + // now, prefix byte length: + let len = Compact( + u32::try_from(encoded_inner.len()).expect("extrinsic size expected to be <4GB"), + ); + let mut encoded = Vec::new(); + len.encode_to(&mut encoded); + encoded.extend(encoded_inner); + encoded + }; + + // Wrap in Encoded to ensure that any more "encode" calls leave it in the right state. + Ok(Transaction::from_bytes(extrinsic)) +} + +/// Construct a v4 extrinsic, ready to be signed. +pub fn create_v4_signed( + call: &Call, + client_state: &ClientState, + params: >::Params, +) -> Result, ExtrinsicError> { + // 1. Validate this call against the current node metadata if the call comes + // with a hash allowing us to do so. + validate(call, &client_state.metadata)?; + + // 2. SCALE encode call data to bytes (pallet u8, call u8, call params). + let call_data = call_data(call, &client_state.metadata)?; + + // 3. Construct our custom additional/extra params. + let additional_and_extra_params = + >::new(client_state, params)?; + + // Return these details, ready to construct a signed extrinsic from. + Ok(PartialTransactionV4 { call_data, additional_and_extra_params }) +} + +/// Construct a v5 "general" extrinsic, ready to be signed or emitted as is. +pub fn create_v5_general( + call: &Call, + client_state: &ClientState, + params: >::Params, +) -> Result, ExtrinsicError> { + // 1. Validate this call against the current node metadata if the call comes + // with a hash allowing us to do so. + validate(call, &client_state.metadata)?; + + // 2. Work out which TX extension version to target based on metadata. + let tx_extensions_version = client_state + .metadata + .extrinsic() + .transaction_extension_version_to_use_for_encoding(); + + // 3. SCALE encode call data to bytes (pallet u8, call u8, call params). + let call_data = call_data(call, &client_state.metadata)?; + + // 4. Construct our custom additional/extra params. + let additional_and_extra_params = + >::new(client_state, params)?; + + // Return these details, ready to construct a signed extrinsic from. + Ok(PartialTransactionV5 { call_data, additional_and_extra_params, tx_extensions_version }) +} + +/// A partially constructed V4 extrinsic, ready to be signed. +pub struct PartialTransactionV4 { + call_data: Vec, + additional_and_extra_params: T::ExtrinsicParams, +} + +impl PartialTransactionV4 { + /// Return the bytes representing the call data for this partially constructed + /// extrinsic. + pub fn call_data(&self) -> &[u8] { + &self.call_data + } + + // Obtain bytes representing the signer payload and run call some function + // with them. This can avoid an allocation in some cases. + fn with_signer_payload(&self, f: F) -> R + where + F: for<'a> FnOnce(Cow<'a, [u8]>) -> R, + { + let mut bytes = self.call_data.clone(); + self.additional_and_extra_params.encode_signer_payload_value_to(&mut bytes); + self.additional_and_extra_params.encode_implicit_to(&mut bytes); + + if bytes.len() > 256 { f(Cow::Borrowed(&blake2_256(&bytes))) } else { f(Cow::Owned(bytes)) } + } + + /// Return the V4 signer payload for this extrinsic. These are the bytes that must + /// be signed in order to produce a valid signature for the extrinsic. + pub fn signer_payload(&self) -> Vec { + self.with_signer_payload(|bytes| bytes.to_vec()) + } + + /// Convert this [`PartialTransactionV4`] into a V4 signed [`Transaction`], ready to submit. + /// The provided `signer` is responsible for providing the "from" address for the transaction, + /// as well as providing a signature to attach to it. + pub fn sign(&self, signer: &Signer) -> Transaction + where + Signer: SignerT, + { + // Given our signer, we can sign the payload representing this extrinsic. + let signature = self.with_signer_payload(|bytes| signer.sign(&bytes)); + // Now, use the signature and "from" address to build the extrinsic. + self.sign_with_account_and_signature(signer.account_id(), &signature) + } + + /// Convert this [`PartialTransactionV4`] into a V4 signed [`Transaction`], ready to submit. + /// The provided `address` and `signature` will be used. + pub fn sign_with_account_and_signature( + &self, + account_id: T::AccountId, + signature: &T::Signature, + ) -> Transaction { + let extrinsic = { + let mut encoded_inner = Vec::new(); + // "is signed" + transaction protocol version (4) + (0b10000000 + 4u8).encode_to(&mut encoded_inner); + // from address for signature + let address: T::Address = account_id.into(); + address.encode_to(&mut encoded_inner); + // the signature + signature.encode_to(&mut encoded_inner); + // attach custom extra params + self.additional_and_extra_params.encode_value_to(&mut encoded_inner); + // and now, call data (remembering that it's been encoded already and just needs + // appending) + encoded_inner.extend(&self.call_data); + // now, prefix byte length: + let len = Compact( + u32::try_from(encoded_inner.len()).expect("extrinsic size expected to be <4GB"), + ); + let mut encoded = Vec::new(); + len.encode_to(&mut encoded); + encoded.extend(encoded_inner); + encoded + }; + + // Return an extrinsic ready to be submitted. + Transaction::from_bytes(extrinsic) + } +} + +/// A partially constructed V5 general extrinsic, ready to be signed or emitted as-is. +pub struct PartialTransactionV5 { + call_data: Vec, + additional_and_extra_params: T::ExtrinsicParams, + tx_extensions_version: u8, +} + +impl PartialTransactionV5 { + /// Return the bytes representing the call data for this partially constructed + /// extrinsic. + pub fn call_data(&self) -> &[u8] { + &self.call_data + } + + /// Return the V5 signer payload for this extrinsic. These are the bytes that must + /// be signed in order to produce a valid signature for the extrinsic. + pub fn signer_payload(&self) -> [u8; 32] { + let mut bytes = self.call_data.clone(); + + self.additional_and_extra_params.encode_signer_payload_value_to(&mut bytes); + self.additional_and_extra_params.encode_implicit_to(&mut bytes); + + blake2_256(&bytes) + } + + /// Convert this [`PartialTransactionV5`] into a V5 "general" [`Transaction`]. + /// + /// This transaction has not been explicitly signed. Use [`Self::sign`] + /// or [`Self::sign_with_account_and_signature`] if you wish to provide a + /// signature (this is usually a necessary step). + pub fn to_transaction(&self) -> Transaction { + let extrinsic = { + let mut encoded_inner = Vec::new(); + // "is general" + transaction protocol version (5) + (0b01000000 + 5u8).encode_to(&mut encoded_inner); + // Encode versions for the transaction extensions + self.tx_extensions_version.encode_to(&mut encoded_inner); + // Encode the actual transaction extensions values + self.additional_and_extra_params.encode_value_to(&mut encoded_inner); + // and now, call data (remembering that it's been encoded already and just needs + // appending) + encoded_inner.extend(&self.call_data); + // now, prefix byte length: + let len = Compact( + u32::try_from(encoded_inner.len()).expect("extrinsic size expected to be <4GB"), + ); + let mut encoded = Vec::new(); + len.encode_to(&mut encoded); + encoded.extend(encoded_inner); + encoded + }; + + // Return an extrinsic ready to be submitted. + Transaction::from_bytes(extrinsic) + } + + /// Convert this [`PartialTransactionV5`] into a V5 "general" [`Transaction`] with a signature. + /// + /// Signing the transaction injects the signature into the transaction extension data, which is + /// why this method borrows self mutably. Signing repeatedly will override the previous + /// signature. + pub fn sign(&mut self, signer: &Signer) -> Transaction + where + Signer: SignerT, + { + // Given our signer, we can sign the payload representing this extrinsic. + let signature = signer.sign(&self.signer_payload()); + // Now, use the signature and "from" account to build the extrinsic. + self.sign_with_account_and_signature(&signer.account_id(), &signature) + } + + /// Convert this [`PartialTransactionV5`] into a V5 "general" [`Transaction`] with a signature. + /// Prefer [`Self::sign`] if you have a [`SignerT`] instance to use. + /// + /// Signing the transaction injects the signature into the transaction extension data, which is + /// why this method borrows self mutably. Signing repeatedly will override the previous + /// signature. + pub fn sign_with_account_and_signature( + &mut self, + account_id: &T::AccountId, + signature: &T::Signature, + ) -> Transaction { + // Inject the signature into the transaction extensions + // before constructing it. + self.additional_and_extra_params.inject_signature(account_id, signature); + + self.to_transaction() + } +} + +/// This represents a signed transaction that's ready to be submitted. +/// Use [`Transaction::encoded()`] or [`Transaction::into_encoded()`] to +/// get the bytes for it, or [`Transaction::hash_with()`] to hash the transaction +/// given an instance of [`Config::Hasher`]. +pub struct Transaction { + encoded: Encoded, + marker: core::marker::PhantomData, +} + +impl Transaction { + /// Create a [`Transaction`] from some already-signed and prepared + /// extrinsic bytes, + pub fn from_bytes(tx_bytes: Vec) -> Self { + Self { encoded: Encoded(tx_bytes), marker: core::marker::PhantomData } + } + + /// Calculate and return the hash of the extrinsic, based on the provided hasher. + /// If you don't have a hasher to hand, you can construct one using the metadata + /// with `T::Hasher::new(&metadata)`. This will create a hasher suitable for the + /// current chain where possible. + pub fn hash_with(&self, hasher: T::Hasher) -> HashFor { + hasher.hash_of(&self.encoded) + } + + /// Returns the SCALE encoded extrinsic bytes. + pub fn encoded(&self) -> &[u8] { + &self.encoded.0 + } + + /// Consumes this [`Transaction`] and returns the SCALE encoded + /// extrinsic bytes. + pub fn into_encoded(self) -> Vec { + self.encoded.0 + } +} diff --git a/vendor/pezkuwi-subxt/core/src/tx/payload.rs b/vendor/pezkuwi-subxt/core/src/tx/payload.rs new file mode 100644 index 00000000..931f571f --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/tx/payload.rs @@ -0,0 +1,267 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module contains the trait and types used to represent +//! transactions that can be submitted. + +use crate::{Metadata, error::ExtrinsicError}; +use alloc::{ + borrow::Cow, + boxed::Box, + string::{String, ToString}, +}; + +use alloc::vec::Vec; +use codec::Encode; +use scale_encode::EncodeAsFields; +use scale_value::{Composite, Value, ValueDef, Variant}; + +/// This represents a transaction payload that can be submitted +/// to a node. +pub trait Payload { + /// Encode call data to the provided output. + fn encode_call_data_to( + &self, + metadata: &Metadata, + out: &mut Vec, + ) -> Result<(), ExtrinsicError>; + + /// Encode call data and return the output. This is a convenience + /// wrapper around [`Payload::encode_call_data_to`]. + fn encode_call_data(&self, metadata: &Metadata) -> Result, ExtrinsicError> { + let mut v = Vec::new(); + self.encode_call_data_to(metadata, &mut v)?; + Ok(v) + } + + /// Returns the details needed to validate the call, which + /// include a statically generated hash, the pallet name, + /// and the call name. + fn validation_details(&self) -> Option> { + None + } +} + +macro_rules! boxed_payload { + ($ty:path) => { + impl Payload for $ty { + fn encode_call_data_to( + &self, + metadata: &Metadata, + out: &mut Vec, + ) -> Result<(), ExtrinsicError> { + self.as_ref().encode_call_data_to(metadata, out) + } + fn encode_call_data(&self, metadata: &Metadata) -> Result, ExtrinsicError> { + self.as_ref().encode_call_data(metadata) + } + fn validation_details(&self) -> Option> { + self.as_ref().validation_details() + } + } + }; +} + +boxed_payload!(Box); +#[cfg(feature = "std")] +boxed_payload!(std::sync::Arc); +#[cfg(feature = "std")] +boxed_payload!(std::rc::Rc); + +/// Details required to validate the shape of a transaction payload against some metadata. +pub struct ValidationDetails<'a> { + /// The pallet name. + pub pallet_name: &'a str, + /// The call name. + pub call_name: &'a str, + /// A hash (this is generated at compile time in our codegen) + /// to compare against the runtime code. + pub hash: [u8; 32], +} + +/// A transaction payload containing some generic `CallData`. +#[derive(Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] +pub struct DefaultPayload { + pallet_name: Cow<'static, str>, + call_name: Cow<'static, str>, + call_data: CallData, + validation_hash: Option<[u8; 32]>, +} + +/// The payload type used by static codegen. +pub type StaticPayload = DefaultPayload; +/// The type of a payload typically used for dynamic transaction payloads. +pub type DynamicPayload = DefaultPayload>; + +impl DefaultPayload { + /// Create a new [`DefaultPayload`]. + pub fn new( + pallet_name: impl Into, + call_name: impl Into, + call_data: CallData, + ) -> Self { + DefaultPayload { + pallet_name: Cow::Owned(pallet_name.into()), + call_name: Cow::Owned(call_name.into()), + call_data, + validation_hash: None, + } + } + + /// Create a new [`DefaultPayload`] using static strings for the pallet and call name. + /// This is only expected to be used from codegen. + #[doc(hidden)] + pub fn new_static( + pallet_name: &'static str, + call_name: &'static str, + call_data: CallData, + validation_hash: [u8; 32], + ) -> Self { + DefaultPayload { + pallet_name: Cow::Borrowed(pallet_name), + call_name: Cow::Borrowed(call_name), + call_data, + validation_hash: Some(validation_hash), + } + } + + /// Do not validate this call prior to submitting it. + pub fn unvalidated(self) -> Self { + Self { validation_hash: None, ..self } + } + + /// Returns the call data. + pub fn call_data(&self) -> &CallData { + &self.call_data + } + + /// Returns the pallet name. + pub fn pallet_name(&self) -> &str { + &self.pallet_name + } + + /// Returns the call name. + pub fn call_name(&self) -> &str { + &self.call_name + } +} + +impl DefaultPayload> { + /// Convert the dynamic `Composite` payload into a [`Value`]. + /// This is useful if you want to use this as an argument for a + /// larger dynamic call that wants to use this as a nested call. + pub fn into_value(self) -> Value<()> { + let call = Value { + context: (), + value: ValueDef::Variant(Variant { + name: self.call_name.into_owned(), + values: self.call_data, + }), + }; + + Value::unnamed_variant(self.pallet_name, [call]) + } +} + +impl Payload for DefaultPayload { + fn encode_call_data_to( + &self, + metadata: &Metadata, + out: &mut Vec, + ) -> Result<(), ExtrinsicError> { + let pallet = metadata + .pallet_by_name(&self.pallet_name) + .ok_or_else(|| ExtrinsicError::PalletNameNotFound(self.pallet_name.to_string()))?; + let call = pallet.call_variant_by_name(&self.call_name).ok_or_else(|| { + ExtrinsicError::CallNameNotFound { + pallet_name: pallet.name().to_string(), + call_name: self.call_name.to_string(), + } + })?; + + let pallet_index = pallet.call_index(); + let call_index = call.index; + + pallet_index.encode_to(out); + call_index.encode_to(out); + + let mut fields = + call.fields.iter().map(|f| scale_encode::Field::new(f.ty.id, f.name.as_deref())); + + self.call_data + .encode_as_fields_to(&mut fields, metadata.types(), out) + .map_err(ExtrinsicError::CannotEncodeCallData)?; + Ok(()) + } + + fn validation_details(&self) -> Option> { + self.validation_hash.map(|hash| ValidationDetails { + pallet_name: &self.pallet_name, + call_name: &self.call_name, + hash, + }) + } +} + +/// Construct a transaction at runtime; essentially an alias to [`DefaultPayload::new()`] +/// which provides a [`Composite`] value for the call data. +pub fn dynamic( + pallet_name: impl Into, + call_name: impl Into, + call_data: impl Into>, +) -> DynamicPayload { + DefaultPayload::new(pallet_name, call_name, call_data.into()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::Metadata; + use codec::Decode; + use scale_value::Composite; + + fn test_metadata() -> Metadata { + let metadata_bytes = include_bytes!("../../../artifacts/pezkuwi_metadata_small.scale"); + Metadata::decode(&mut &metadata_bytes[..]).expect("Valid metadata") + } + + #[test] + fn encode_call_with_incompatible_types_returns_error() { + let metadata = test_metadata(); + + let incompatible_data = Composite::named([ + ("dest", scale_value::Value::bool(true)), // Boolean instead of MultiAddress + ("value", scale_value::Value::string("not_a_number")), // String instead of u128 + ]); + + let payload = DefaultPayload::new("Balances", "transfer_allow_death", incompatible_data); + + let mut out = Vec::new(); + let result = payload.encode_call_data_to(&metadata, &mut out); + + assert!(result.is_err(), "Expected error when encoding with incompatible types"); + } + + #[test] + fn encode_call_with_valid_data_succeeds() { + let metadata = test_metadata(); + + // Create a valid payload to ensure our error handling doesn't break valid cases + // For MultiAddress, we'll use the Id variant with a 32-byte account + let valid_address = + scale_value::Value::unnamed_variant("Id", [scale_value::Value::from_bytes([0u8; 32])]); + + let valid_data = + Composite::named([("dest", valid_address), ("value", scale_value::Value::u128(1000))]); + + let payload = DefaultPayload::new("Balances", "transfer_allow_death", valid_data); + + // This should succeed + let mut out = Vec::new(); + let result = payload.encode_call_data_to(&metadata, &mut out); + + assert!(result.is_ok(), "Expected success when encoding with valid data"); + assert!(!out.is_empty(), "Expected encoded output to be non-empty"); + } +} diff --git a/vendor/pezkuwi-subxt/core/src/tx/signer.rs b/vendor/pezkuwi-subxt/core/src/tx/signer.rs new file mode 100644 index 00000000..4bda9bf9 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/tx/signer.rs @@ -0,0 +1,22 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! A library to **sub**mit e**xt**rinsics to a +//! [bizinikiwi](https://github.com/pezkuwichain/bizinikiwi) node via RPC. + +use crate::Config; + +/// Signing transactions requires a [`Signer`]. This is responsible for +/// providing the "from" account that the transaction is being signed by, +/// as well as actually signing a SCALE encoded payload. +pub trait Signer { + /// Return the "from" account ID. + fn account_id(&self) -> T::AccountId; + + /// Takes a signer payload for an extrinsic, and returns a signature based on it. + /// + /// Some signers may fail, for instance because the hardware on which the keys are located has + /// refused the operation. + fn sign(&self, signer_payload: &[u8]) -> T::Signature; +} diff --git a/vendor/pezkuwi-subxt/core/src/utils/account_id.rs b/vendor/pezkuwi-subxt/core/src/utils/account_id.rs new file mode 100644 index 00000000..91abd6aa --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/utils/account_id.rs @@ -0,0 +1,188 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! The "default" Bizinikiwi/Pezkuwi AccountId. This is used in codegen, as well as signing related +//! bits. This doesn't contain much functionality itself, but is easy to convert to/from an +//! `sp_core::AccountId32` for instance, to gain functionality without forcing a dependency on +//! Bizinikiwi crates here. + +use alloc::{format, string::String, vec, vec::Vec}; +use codec::{Decode, Encode}; +use serde::{Deserialize, Serialize}; +use thiserror::Error as DeriveError; + +/// A 32-byte cryptographic identifier. This is a simplified version of Bizinikiwi's +/// `sp_core::crypto::AccountId32`. To obtain more functionality, convert this into +/// that type. +#[derive( + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + Debug, + scale_encode::EncodeAsType, + scale_decode::DecodeAsType, + scale_info::TypeInfo, +)] +pub struct AccountId32(pub [u8; 32]); + +impl AsRef<[u8]> for AccountId32 { + fn as_ref(&self) -> &[u8] { + &self.0[..] + } +} + +impl AsRef<[u8; 32]> for AccountId32 { + fn as_ref(&self) -> &[u8; 32] { + &self.0 + } +} + +impl From<[u8; 32]> for AccountId32 { + fn from(x: [u8; 32]) -> Self { + AccountId32(x) + } +} + +impl AccountId32 { + // Return the ss58-check string for this key. Adapted from `sp_core::crypto`. We need this to + // serialize our account appropriately but otherwise don't care. + fn to_ss58check(&self) -> String { + // For serializing to a string to obtain the account nonce, we use the default bizinikiwi + // prefix (since we have no way to otherwise pick one). It doesn't really matter, since when + // it's deserialized back in system_accountNextIndex, we ignore this (so long as it's + // valid). + const SUBSTRATE_SS58_PREFIX: u8 = 42; + // prefix <= 63 just take up one byte at the start: + let mut v = vec![SUBSTRATE_SS58_PREFIX]; + // then push the account ID bytes. + v.extend(self.0); + // then push a 2 byte checksum of what we have so far. + let r = ss58hash(&v); + v.extend(&r[0..2]); + // then encode to base58. + use base58::ToBase58; + v.to_base58() + } + + // This isn't strictly needed, but to give our AccountId32 a little more usefulness, we also + // implement the logic needed to decode an AccountId32 from an SS58 encoded string. This is + // exposed via a `FromStr` impl. + fn from_ss58check(s: &str) -> Result { + const CHECKSUM_LEN: usize = 2; + let body_len = 32; + + use base58::FromBase58; + let data = s.from_base58().map_err(|_| FromSs58Error::BadBase58)?; + if data.len() < 2 { + return Err(FromSs58Error::BadLength); + } + let prefix_len = match data[0] { + 0..=63 => 1, + 64..=127 => 2, + _ => return Err(FromSs58Error::InvalidPrefix), + }; + if data.len() != prefix_len + body_len + CHECKSUM_LEN { + return Err(FromSs58Error::BadLength); + } + let hash = ss58hash(&data[0..body_len + prefix_len]); + let checksum = &hash[0..CHECKSUM_LEN]; + if data[body_len + prefix_len..body_len + prefix_len + CHECKSUM_LEN] != *checksum { + // Invalid checksum. + return Err(FromSs58Error::InvalidChecksum); + } + + let result = data[prefix_len..body_len + prefix_len] + .try_into() + .map_err(|_| FromSs58Error::BadLength)?; + Ok(AccountId32(result)) + } +} + +/// An error obtained from trying to interpret an SS58 encoded string into an AccountId32 +#[derive(Clone, Copy, Eq, PartialEq, Debug, DeriveError)] +#[allow(missing_docs)] +pub enum FromSs58Error { + #[error("Base 58 requirement is violated")] + BadBase58, + #[error("Length is bad")] + BadLength, + #[error("Invalid checksum")] + InvalidChecksum, + #[error("Invalid SS58 prefix byte.")] + InvalidPrefix, +} + +// We do this just to get a checksum to help verify the validity of the address in to_ss58check +fn ss58hash(data: &[u8]) -> Vec { + use blake2::{Blake2b512, Digest}; + const PREFIX: &[u8] = b"SS58PRE"; + let mut ctx = Blake2b512::new(); + ctx.update(PREFIX); + ctx.update(data); + ctx.finalize().to_vec() +} + +impl Serialize for AccountId32 { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.to_ss58check()) + } +} + +impl<'de> Deserialize<'de> for AccountId32 { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + AccountId32::from_ss58check(&String::deserialize(deserializer)?) + .map_err(|e| serde::de::Error::custom(format!("{e:?}"))) + } +} + +impl core::fmt::Display for AccountId32 { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "{}", self.to_ss58check()) + } +} + +impl core::str::FromStr for AccountId32 { + type Err = FromSs58Error; + fn from_str(s: &str) -> Result { + AccountId32::from_ss58check(s) + } +} + +#[cfg(test)] +mod test { + use super::*; + use sp_core::{self, crypto::Ss58Codec}; + use sp_keyring::sr25519::Keyring; + + #[test] + fn ss58_is_compatible_with_bizinikiwi_impl() { + let keyrings = vec![Keyring::Alice, Keyring::Bob, Keyring::Charlie]; + + for keyring in keyrings { + let bizinikiwi_account = keyring.to_account_id(); + let local_account = AccountId32(bizinikiwi_account.clone().into()); + + // Both should encode to ss58 the same way: + let bizinikiwi_ss58 = bizinikiwi_account.to_ss58check(); + assert_eq!(bizinikiwi_ss58, local_account.to_ss58check()); + + // Both should decode from ss58 back to the same: + assert_eq!( + sp_core::crypto::AccountId32::from_ss58check(&bizinikiwi_ss58).unwrap(), + bizinikiwi_account + ); + assert_eq!(AccountId32::from_ss58check(&bizinikiwi_ss58).unwrap(), local_account); + } + } +} diff --git a/vendor/pezkuwi-subxt/core/src/utils/account_id20.rs b/vendor/pezkuwi-subxt/core/src/utils/account_id20.rs new file mode 100644 index 00000000..d5722793 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/utils/account_id20.rs @@ -0,0 +1,151 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! `AccountId20` is a representation of Ethereum address derived from hashing the public key. + +use alloc::{format, string::String}; +use codec::{Decode, Encode}; +use keccak_hash::keccak; +use serde::{Deserialize, Serialize}; +use thiserror::Error as DeriveError; + +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + Debug, + scale_encode::EncodeAsType, + scale_decode::DecodeAsType, + scale_info::TypeInfo, +)] +/// Ethereum-compatible `AccountId`. +pub struct AccountId20(pub [u8; 20]); + +impl AsRef<[u8]> for AccountId20 { + fn as_ref(&self) -> &[u8] { + &self.0[..] + } +} + +impl AsRef<[u8; 20]> for AccountId20 { + fn as_ref(&self) -> &[u8; 20] { + &self.0 + } +} + +impl From<[u8; 20]> for AccountId20 { + fn from(x: [u8; 20]) -> Self { + AccountId20(x) + } +} + +impl AccountId20 { + /// Convert to a public key hash + pub fn checksum(&self) -> String { + let hex_address = hex::encode(self.0); + let hash = keccak(hex_address.as_bytes()); + + let mut checksum_address = String::with_capacity(42); + checksum_address.push_str("0x"); + + for (i, ch) in hex_address.chars().enumerate() { + // Get the corresponding nibble from the hash + let nibble = (hash[i / 2] >> (if i % 2 == 0 { 4 } else { 0 })) & 0xf; + + if nibble >= 8 { + checksum_address.push(ch.to_ascii_uppercase()); + } else { + checksum_address.push(ch); + } + } + + checksum_address + } +} + +/// An error obtained from trying to interpret a hex encoded string into an AccountId20 +#[derive(Clone, Copy, Eq, PartialEq, Debug, DeriveError)] +#[allow(missing_docs)] +pub enum FromChecksumError { + #[error("Length is bad")] + BadLength, + #[error("Invalid checksum")] + InvalidChecksum, + #[error("Invalid checksum prefix byte.")] + InvalidPrefix, +} + +impl Serialize for AccountId20 { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.checksum()) + } +} + +impl<'de> Deserialize<'de> for AccountId20 { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + String::deserialize(deserializer)? + .parse::() + .map_err(|e| serde::de::Error::custom(format!("{e:?}"))) + } +} + +impl core::fmt::Display for AccountId20 { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "{}", self.checksum()) + } +} + +impl core::str::FromStr for AccountId20 { + type Err = FromChecksumError; + fn from_str(s: &str) -> Result { + if s.len() != 42 { + return Err(FromChecksumError::BadLength); + } + if !s.starts_with("0x") { + return Err(FromChecksumError::InvalidPrefix); + } + hex::decode(&s.as_bytes()[2..]) + .map_err(|_| FromChecksumError::InvalidChecksum)? + .try_into() + .map(AccountId20) + .map_err(|_| FromChecksumError::BadLength) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn deserialisation() { + let key_hashes = vec![ + "0xf24FF3a9CF04c71Dbc94D0b566f7A27B94566cac", + "0x3Cd0A705a2DC65e5b1E1205896BaA2be8A07c6e0", + "0x798d4Ba9baf0064Ec19eB4F0a1a45785ae9D6DFc", + "0x773539d4Ac0e786233D90A233654ccEE26a613D9", + "0xFf64d3F6efE2317EE2807d223a0Bdc4c0c49dfDB", + "0xC0F0f4ab324C46e55D02D0033343B4Be8A55532d", + ]; + + for key_hash in key_hashes { + let parsed: AccountId20 = key_hash.parse().expect("Failed to parse"); + + let encoded = parsed.checksum(); + + // `encoded` should be equal to the initial key_hash + assert_eq!(encoded, key_hash); + } + } +} diff --git a/vendor/pezkuwi-subxt/core/src/utils/bits.rs b/vendor/pezkuwi-subxt/core/src/utils/bits.rs new file mode 100644 index 00000000..245dd574 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/utils/bits.rs @@ -0,0 +1,256 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Generic `scale_bits` over `bitvec`-like `BitOrder` and `BitFormat` types. + +use alloc::{vec, vec::Vec}; +use codec::{Compact, Input}; +use core::marker::PhantomData; +use scale_bits::{ + Bits, + scale::format::{Format, OrderFormat, StoreFormat}, +}; +use scale_decode::{IntoVisitor, TypeResolver}; + +/// Associates `bitvec::store::BitStore` trait with corresponding, type-erased +/// `scale_bits::StoreFormat` enum. +/// +/// Used to decode bit sequences by providing `scale_bits::StoreFormat` using +/// `bitvec`-like type type parameters. +pub trait BitStore { + /// Corresponding `scale_bits::StoreFormat` value. + const FORMAT: StoreFormat; + /// Number of bits that the backing store types holds. + const BITS: u32; +} +macro_rules! impl_store { + ($ty:ident, $wrapped:ty) => { + impl BitStore for $wrapped { + const FORMAT: StoreFormat = StoreFormat::$ty; + const BITS: u32 = <$wrapped>::BITS; + } + }; +} +impl_store!(U8, u8); +impl_store!(U16, u16); +impl_store!(U32, u32); +impl_store!(U64, u64); + +/// Associates `bitvec::order::BitOrder` trait with corresponding, type-erased +/// `scale_bits::OrderFormat` enum. +/// +/// Used to decode bit sequences in runtime by providing `scale_bits::OrderFormat` using +/// `bitvec`-like type type parameters. +pub trait BitOrder { + /// Corresponding `scale_bits::OrderFormat` value. + const FORMAT: OrderFormat; +} +macro_rules! impl_order { + ($ty:ident) => { + #[doc = concat!("Type-level value that corresponds to `scale_bits::OrderFormat::", stringify!($ty), "` at run-time")] + #[doc = concat!(" and `bitvec::order::BitOrder::", stringify!($ty), "` at the type level.")] + #[derive(Clone, Debug, PartialEq, Eq)] + pub enum $ty {} + impl BitOrder for $ty { + const FORMAT: OrderFormat = OrderFormat::$ty; + } + }; +} +impl_order!(Lsb0); +impl_order!(Msb0); + +/// Constructs a run-time format parameters based on the corresponding type-level parameters. +fn bit_format() -> Format { + Format { order: Order::FORMAT, store: Store::FORMAT } +} + +/// `scale_bits::Bits` generic over the bit store (`u8`/`u16`/`u32`/`u64`) and bit order (LSB, MSB) +/// used for SCALE encoding/decoding. Uses `scale_bits::Bits`-default `u8` and LSB format +/// underneath. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct DecodedBits { + bits: Bits, + _marker: PhantomData<(Store, Order)>, +} + +impl DecodedBits { + /// Extracts the underlying `scale_bits::Bits` value. + pub fn into_bits(self) -> Bits { + self.bits + } + + /// References the underlying `scale_bits::Bits` value. + pub fn as_bits(&self) -> &Bits { + &self.bits + } +} + +impl core::iter::FromIterator for DecodedBits { + fn from_iter>(iter: T) -> Self { + DecodedBits { bits: Bits::from_iter(iter), _marker: PhantomData } + } +} + +impl codec::Decode for DecodedBits { + fn decode(input: &mut I) -> Result { + /// Equivalent of `BitSlice::MAX_BITS` on 32bit machine. + const ARCH32BIT_BITSLICE_MAX_BITS: u32 = 0x1fff_ffff; + + let Compact(bits) = >::decode(input)?; + // Otherwise it is impossible to store it on 32bit machine. + if bits > ARCH32BIT_BITSLICE_MAX_BITS { + return Err("Attempt to decode a BitVec with too many bits".into()); + } + // NOTE: Replace with `bits.div_ceil(Store::BITS)` if `int_roundings` is stabilised + let elements = (bits / Store::BITS) + u32::from(bits % Store::BITS != 0); + let bytes_in_elem = Store::BITS.saturating_div(u8::BITS); + let bytes_needed = (elements * bytes_in_elem) as usize; + + // NOTE: We could reduce allocations if it would be possible to directly + // decode from an `Input` type using a custom format (rather than default ) + // for the `Bits` type. + let mut storage = codec::Encode::encode(&Compact(bits)); + let prefix_len = storage.len(); + storage.reserve_exact(bytes_needed); + storage.extend(vec![0; bytes_needed]); + input.read(&mut storage[prefix_len..])?; + + let decoder = scale_bits::decode_using_format_from(&storage, bit_format::())?; + let bits = decoder.collect::, _>>()?; + let bits = Bits::from_iter(bits); + + Ok(DecodedBits { bits, _marker: PhantomData }) + } +} + +impl codec::Encode for DecodedBits { + fn size_hint(&self) -> usize { + self.bits.size_hint() + } + + fn encoded_size(&self) -> usize { + self.bits.encoded_size() + } + + fn encode(&self) -> Vec { + scale_bits::encode_using_format(self.bits.iter(), bit_format::()) + } +} + +#[doc(hidden)] +pub struct DecodedBitsVisitor(core::marker::PhantomData<(S, O, R)>); + +impl scale_decode::Visitor for DecodedBitsVisitor { + type Value<'scale, 'info> = DecodedBits; + type Error = scale_decode::Error; + type TypeResolver = R; + + fn unchecked_decode_as_type<'scale, 'info>( + self, + input: &mut &'scale [u8], + type_id: R::TypeId, + types: &'info R, + ) -> scale_decode::visitor::DecodeAsTypeResult< + Self, + Result, Self::Error>, + > { + let res = + scale_decode::visitor::decode_with_visitor(input, type_id, types, Bits::into_visitor()) + .map(|bits| DecodedBits { bits, _marker: PhantomData }); + scale_decode::visitor::DecodeAsTypeResult::Decoded(res) + } +} +impl scale_decode::IntoVisitor for DecodedBits { + type AnyVisitor = DecodedBitsVisitor; + fn into_visitor() -> DecodedBitsVisitor { + DecodedBitsVisitor(PhantomData) + } +} + +impl scale_encode::EncodeAsType for DecodedBits { + fn encode_as_type_to( + &self, + type_id: R::TypeId, + types: &R, + out: &mut Vec, + ) -> Result<(), scale_encode::Error> { + self.bits.encode_as_type_to(type_id, types, out) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use core::fmt::Debug; + + use bitvec::vec::BitVec; + use codec::Decode as _; + + // NOTE: We don't use `bitvec::order` types in our implementation, since we + // don't want to depend on `bitvec`. Rather than reimplementing the unsafe + // trait on our types here for testing purposes, we simply convert and + // delegate to `bitvec`'s own types. + trait ToBitVec { + type Order: bitvec::order::BitOrder; + } + impl ToBitVec for Lsb0 { + type Order = bitvec::order::Lsb0; + } + impl ToBitVec for Msb0 { + type Order = bitvec::order::Msb0; + } + + fn scales_like_bitvec_and_roundtrips< + 'a, + Store: BitStore + bitvec::store::BitStore + PartialEq, + Order: BitOrder + ToBitVec + Debug + PartialEq, + >( + input: impl IntoIterator, + ) where + BitVec::Order>: codec::Encode + codec::Decode, + { + let input: Vec<_> = input.into_iter().copied().collect(); + + let decoded_bits = DecodedBits::::from_iter(input.clone()); + let bitvec = BitVec::::Order>::from_iter(input); + + let decoded_bits_encoded = codec::Encode::encode(&decoded_bits); + let bitvec_encoded = codec::Encode::encode(&bitvec); + assert_eq!(decoded_bits_encoded, bitvec_encoded); + + let decoded_bits_decoded = + DecodedBits::::decode(&mut &decoded_bits_encoded[..]) + .expect("SCALE-encoding DecodedBits to roundtrip"); + let bitvec_decoded = + BitVec::::Order>::decode(&mut &bitvec_encoded[..]) + .expect("SCALE-encoding BitVec to roundtrip"); + assert_eq!(decoded_bits, decoded_bits_decoded); + assert_eq!(bitvec, bitvec_decoded); + } + + #[test] + fn decoded_bitvec_scales_and_roundtrips() { + let test_cases = [ + vec![], + vec![true], + vec![false], + vec![true, false, true], + vec![true, false, true, false, false, false, false, false, true], + [vec![true; 5], vec![false; 5], vec![true; 1], vec![false; 3]].concat(), + [vec![true; 9], vec![false; 9], vec![true; 9], vec![false; 9]].concat(), + ]; + + for test_case in &test_cases { + scales_like_bitvec_and_roundtrips::(test_case); + scales_like_bitvec_and_roundtrips::(test_case); + scales_like_bitvec_and_roundtrips::(test_case); + scales_like_bitvec_and_roundtrips::(test_case); + scales_like_bitvec_and_roundtrips::(test_case); + scales_like_bitvec_and_roundtrips::(test_case); + scales_like_bitvec_and_roundtrips::(test_case); + scales_like_bitvec_and_roundtrips::(test_case); + } + } +} diff --git a/vendor/pezkuwi-subxt/core/src/utils/era.rs b/vendor/pezkuwi-subxt/core/src/utils/era.rs new file mode 100644 index 00000000..9bf2f14d --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/utils/era.rs @@ -0,0 +1,227 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use alloc::{format, vec::Vec}; +use codec::{Decode, Encode}; +use scale_decode::{ + IntoVisitor, TypeResolver, Visitor, + ext::scale_type_resolver, + visitor::{ + TypeIdFor, + types::{Composite, Variant}, + }, +}; +use scale_encode::EncodeAsType; + +// Dev note: This and related bits taken from `sp_runtime::generic::Era` +/// An era to describe the longevity of a transaction. +#[derive( + PartialEq, + Default, + Eq, + Clone, + Copy, + Debug, + serde::Serialize, + serde::Deserialize, + scale_info::TypeInfo, +)] +pub enum Era { + /// The transaction is valid forever. The genesis hash must be present in the signed content. + #[default] + Immortal, + + /// The transaction will expire. Use [`Era::mortal`] to construct this with correct values. + /// + /// When used on `FRAME`-based runtimes, `period` cannot exceed `BlockHashCount` parameter + /// of `system` module. + Mortal { + /// The number of blocks that the tx will be valid for after the checkpoint block + /// hash found in the signer payload. + period: u64, + /// The phase in the period that this transaction's lifetime begins (and, importantly, + /// implies which block hash is included in the signature material). If the `period` is + /// greater than 1 << 12, then it will be a factor of the times greater than 1<<12 that + /// `period` is. + phase: u64, + }, +} + +// E.g. with period == 4: +// 0 10 20 30 40 +// 0123456789012345678901234567890123456789012 +// |...| +// authored -/ \- expiry +// phase = 1 +// n = Q(current - phase, period) + phase +impl Era { + /// Create a new era based on a period (which should be a power of two between 4 and 65536 + /// inclusive) and a block number on which it should start (or, for long periods, be shortly + /// after the start). + /// + /// If using `Era` in the context of `FRAME` runtime, make sure that `period` + /// does not exceed `BlockHashCount` parameter passed to `system` module, since that + /// prunes old blocks and renders transactions immediately invalid. + pub fn mortal(period: u64, current: u64) -> Self { + let period = period.checked_next_power_of_two().unwrap_or(1 << 16).clamp(4, 1 << 16); + let phase = current % period; + let quantize_factor = (period >> 12).max(1); + let quantized_phase = phase / quantize_factor * quantize_factor; + + Self::Mortal { period, phase: quantized_phase } + } +} + +// Both copied from `sp_runtime::generic::Era`; this is the wire interface and so +// it's really the most important bit here. +impl codec::Encode for Era { + fn encode_to(&self, output: &mut T) { + match self { + Self::Immortal => output.push_byte(0), + Self::Mortal { period, phase } => { + let quantize_factor = (*period >> 12).max(1); + let encoded = (period.trailing_zeros() - 1).clamp(1, 15) as u16 | + ((phase / quantize_factor) << 4) as u16; + encoded.encode_to(output); + }, + } + } +} +impl codec::Decode for Era { + fn decode(input: &mut I) -> Result { + let first = input.read_byte()?; + if first == 0 { + Ok(Self::Immortal) + } else { + let encoded = first as u64 + ((input.read_byte()? as u64) << 8); + let period = 2 << (encoded % (1 << 4)); + let quantize_factor = (period >> 12).max(1); + let phase = (encoded >> 4) * quantize_factor; + if period >= 4 && phase < period { + Ok(Self::Mortal { period, phase }) + } else { + Err("Invalid period and phase".into()) + } + } + } +} + +/// Define manually how to encode an Era given some type information. Here we +/// basically check that the type we're targeting is called "Era" and then codec::Encode. +impl EncodeAsType for Era { + fn encode_as_type_to( + &self, + type_id: R::TypeId, + types: &R, + out: &mut Vec, + ) -> Result<(), scale_encode::Error> { + // Visit the type to check that it is an Era. This is only a rough check. + let visitor = scale_type_resolver::visitor::new((), |_, _| false) + .visit_variant(|_, path, _variants| path.last() == Some("Era")); + + let is_era = types.resolve_type(type_id.clone(), visitor).unwrap_or_default(); + if !is_era { + return Err(scale_encode::Error::custom_string(format!( + "Type {type_id:?} is not a valid Era type; expecting either Immortal or MortalX variant" + ))); + } + + // if the type looks valid then just scale encode our Era. + self.encode_to(out); + Ok(()) + } +} + +/// Define manually how to decode an Era given some type information. Here we check that the +/// variant we're decoding is one of the expected Era variants, and that the field is correct if so, +/// ensuring that this will fail if trying to decode something that isn't an Era. +pub struct EraVisitor(core::marker::PhantomData); + +impl IntoVisitor for Era { + type AnyVisitor = EraVisitor; + fn into_visitor() -> Self::AnyVisitor { + EraVisitor(core::marker::PhantomData) + } +} + +impl Visitor for EraVisitor { + type Value<'scale, 'resolver> = Era; + type Error = scale_decode::Error; + type TypeResolver = R; + + // Unwrap any newtype wrappers around the era, eg the CheckMortality extension (which actually + // has 2 fields, but scale_info seems to automatically ignore the PhantomData field). This + // allows us to decode directly from CheckMortality into Era. + fn visit_composite<'scale, 'resolver>( + self, + value: &mut Composite<'scale, 'resolver, Self::TypeResolver>, + _type_id: TypeIdFor, + ) -> Result, Self::Error> { + if value.remaining() != 1 { + return Err(scale_decode::Error::custom_string(format!( + "Expected any wrapper around Era to have exactly one field, but got {} fields", + value.remaining() + ))); + } + + value.decode_item(self).expect("1 field expected; checked above.") + } + + fn visit_variant<'scale, 'resolver>( + self, + value: &mut Variant<'scale, 'resolver, Self::TypeResolver>, + _type_id: TypeIdFor, + ) -> Result, Self::Error> { + let variant = value.name(); + + // If the variant is immortal, we know the outcome. + if variant == "Immortal" { + return Ok(Era::Immortal); + } + + // Otherwise, we expect a variant Mortal1..Mortal255 where the number + // here is the first byte, and the second byte is conceptually a field of this variant. + // This weird encoding is because the Era is compressed to just 1 byte if immortal and + // just 2 bytes if mortal. + // + // Note: We _could_ just assume we'll have 2 bytes to work with and decode the era directly, + // but checking the variant names ensures that the thing we think is an Era actually _is_ + // one, based on the type info for it. + let first_byte = variant + .strip_prefix("Mortal") + .and_then(|s| s.parse::().ok()) + .ok_or_else(|| { + scale_decode::Error::custom_string(format!( + "Expected MortalX variant, but got {variant}" + )) + })?; + + // We need 1 field in the MortalN variant containing the second byte. + let mortal_fields = value.fields(); + if mortal_fields.remaining() != 1 { + return Err(scale_decode::Error::custom_string(format!( + "Expected Mortal{} to have one u8 field, but got {} fields", + first_byte, + mortal_fields.remaining() + ))); + } + + let second_byte = mortal_fields + .decode_item(u8::into_visitor()) + .expect("At least one field should exist; checked above.") + .map_err(|e| { + scale_decode::Error::custom_string(format!( + "Expected mortal variant field to be u8, but: {e}" + )) + })?; + + // Now that we have both bytes we can decode them into the era using + // the same logic as the codec::Decode impl does. + Era::decode(&mut &[first_byte, second_byte][..]).map_err(|e| { + scale_decode::Error::custom_string(format!( + "Failed to codec::Decode Era from Mortal bytes: {e}" + )) + }) + } +} diff --git a/vendor/pezkuwi-subxt/core/src/utils/mod.rs b/vendor/pezkuwi-subxt/core/src/utils/mod.rs new file mode 100644 index 00000000..11dc8dc9 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/utils/mod.rs @@ -0,0 +1,78 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Miscellaneous utility helpers. + +mod account_id; +mod account_id20; +pub mod bits; +mod era; +mod multi_address; +mod multi_signature; +mod static_type; +mod unchecked_extrinsic; +mod wrapper_opaque; +mod yesnomaybe; + +use alloc::{borrow::ToOwned, format, string::String, vec::Vec}; +use codec::{Compact, Decode, Encode}; +use derive_where::derive_where; + +pub use account_id::AccountId32; +pub use account_id20::AccountId20; +pub use era::Era; +pub use multi_address::MultiAddress; +pub use multi_signature::MultiSignature; +pub use primitive_types::{H160, H256, H512}; +pub use static_type::Static; +pub use unchecked_extrinsic::UncheckedExtrinsic; +pub use wrapper_opaque::WrapperKeepOpaque; +pub use yesnomaybe::{Maybe, No, NoMaybe, Yes, YesMaybe, YesNo}; + +/// Wraps an already encoded byte vector, prevents being encoded as a raw byte vector as part of +/// the transaction payload +#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] +pub struct Encoded(pub Vec); + +impl codec::Encode for Encoded { + fn encode(&self) -> Vec { + self.0.to_owned() + } +} + +/// Decodes a compact encoded value from the beginning of the provided bytes, +/// returning the value and any remaining bytes. +pub fn strip_compact_prefix(bytes: &[u8]) -> Result<(u64, &[u8]), codec::Error> { + let cursor = &mut &*bytes; + let val = >::decode(cursor)?; + Ok((val.0, *cursor)) +} + +/// A version of [`core::marker::PhantomData`] that is also Send and Sync (which is fine +/// because regardless of the generic param, it is always possible to Send + Sync this +/// 0 size type). +#[derive(Encode, Decode, scale_info::TypeInfo)] +#[derive_where(Clone, PartialEq, Debug, Eq, Default, Hash)] +#[scale_info(skip_type_params(T))] +#[doc(hidden)] +pub struct PhantomDataSendSync(core::marker::PhantomData); + +impl PhantomDataSendSync { + pub fn new() -> Self { + Self(core::marker::PhantomData) + } +} + +unsafe impl Send for PhantomDataSendSync {} +unsafe impl Sync for PhantomDataSendSync {} + +/// This represents a key-value collection and is SCALE compatible +/// with collections like BTreeMap. This has the same type params +/// as `BTreeMap` which allows us to easily swap the two during codegen. +pub type KeyedVec = Vec<(K, V)>; + +/// A quick helper to encode some bytes to hex. +pub fn to_hex(bytes: impl AsRef<[u8]>) -> String { + format!("0x{}", hex::encode(bytes.as_ref())) +} diff --git a/vendor/pezkuwi-subxt/core/src/utils/multi_address.rs b/vendor/pezkuwi-subxt/core/src/utils/multi_address.rs new file mode 100644 index 00000000..22bbb355 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/utils/multi_address.rs @@ -0,0 +1,45 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! The "default" Bizinikiwi/Pezkuwi Address type. This is used in codegen, as well as signing +//! related bits. This doesn't contain much functionality itself, but is easy to convert to/from an +//! `sp_runtime::MultiAddress` for instance, to gain functionality without forcing a dependency on +//! Bizinikiwi crates here. + +use alloc::vec::Vec; +use codec::{Decode, Encode}; + +/// A multi-format address wrapper for on-chain accounts. This is a simplified version of +/// Bizinikiwi's `sp_runtime::MultiAddress`. +#[derive( + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + Debug, + scale_encode::EncodeAsType, + scale_decode::DecodeAsType, + scale_info::TypeInfo, +)] +pub enum MultiAddress { + /// It's an account ID (pubkey). + Id(AccountId), + /// It's an account index. + Index(#[codec(compact)] AccountIndex), + /// It's some arbitrary raw bytes. + Raw(Vec), + /// It's a 32 byte representation. + Address32([u8; 32]), + /// Its a 20 byte representation. + Address20([u8; 20]), +} + +impl From for MultiAddress { + fn from(a: AccountId) -> Self { + Self::Id(a) + } +} diff --git a/vendor/pezkuwi-subxt/core/src/utils/multi_signature.rs b/vendor/pezkuwi-subxt/core/src/utils/multi_signature.rs new file mode 100644 index 00000000..43434bdb --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/utils/multi_signature.rs @@ -0,0 +1,22 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! The "default" Bizinikiwi/Pezkuwi Signature type. This is used in codegen, as well as signing +//! related bits. This doesn't contain much functionality itself, but is easy to convert to/from an +//! `sp_runtime::MultiSignature` for instance, to gain functionality without forcing a dependency on +//! Bizinikiwi crates here. + +use codec::{Decode, Encode}; + +/// Signature container that can store known signature types. This is a simplified version of +/// `sp_runtime::MultiSignature`. To obtain more functionality, convert this into that type. +#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, scale_info::TypeInfo)] +pub enum MultiSignature { + /// An Ed25519 signature. + Ed25519([u8; 64]), + /// An Sr25519 signature. + Sr25519([u8; 64]), + /// An ECDSA/SECP256k1 signature (a 512-bit value, plus 8 bits for recovery ID). + Ecdsa([u8; 65]), +} diff --git a/vendor/pezkuwi-subxt/core/src/utils/static_type.rs b/vendor/pezkuwi-subxt/core/src/utils/static_type.rs new file mode 100644 index 00000000..520146e6 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/utils/static_type.rs @@ -0,0 +1,82 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use codec::{Decode, Encode}; +use scale_decode::{IntoVisitor, TypeResolver, Visitor, visitor::DecodeAsTypeResult}; +use scale_encode::EncodeAsType; + +use alloc::vec::Vec; + +/// If the type inside this implements [`Encode`], this will implement +/// [`scale_encode::EncodeAsType`]. If the type inside this implements [`Decode`], this will +/// implement [`scale_decode::DecodeAsType`]. +/// +/// In either direction, we ignore any type information and just attempt to encode/decode statically +/// via the [`Encode`] and [`Decode`] implementations. This can be useful as an adapter for types +/// which do not implement [`scale_encode::EncodeAsType`] and [`scale_decode::DecodeAsType`] +/// themselves, but it's best to avoid using it where possible as it will not take into account any +/// type information, and is thus more likely to encode or decode incorrectly. +#[derive(Debug, Encode, Decode, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)] +pub struct Static(pub T); + +impl EncodeAsType for Static { + fn encode_as_type_to( + &self, + _type_id: R::TypeId, + _types: &R, + out: &mut Vec, + ) -> Result<(), scale_encode::Error> { + self.0.encode_to(out); + Ok(()) + } +} + +pub struct StaticDecodeAsTypeVisitor(core::marker::PhantomData<(T, R)>); + +impl Visitor for StaticDecodeAsTypeVisitor { + type Value<'scale, 'info> = Static; + type Error = scale_decode::Error; + type TypeResolver = R; + + fn unchecked_decode_as_type<'scale, 'info>( + self, + input: &mut &'scale [u8], + _type_id: R::TypeId, + _types: &'info R, + ) -> DecodeAsTypeResult, Self::Error>> { + use scale_decode::{Error, visitor::DecodeError}; + let decoded = T::decode(input) + .map(Static) + .map_err(|e| Error::new(DecodeError::CodecError(e).into())); + DecodeAsTypeResult::Decoded(decoded) + } +} + +impl IntoVisitor for Static { + type AnyVisitor = StaticDecodeAsTypeVisitor; + fn into_visitor() -> StaticDecodeAsTypeVisitor { + StaticDecodeAsTypeVisitor(core::marker::PhantomData) + } +} + +// Make it easy to convert types into Static where required. +impl From for Static { + fn from(value: T) -> Self { + Static(value) + } +} + +// Static is just a marker type and should be as transparent as possible: +impl core::ops::Deref for Static { + type Target = T; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl core::ops::DerefMut for Static { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} diff --git a/vendor/pezkuwi-subxt/core/src/utils/unchecked_extrinsic.rs b/vendor/pezkuwi-subxt/core/src/utils/unchecked_extrinsic.rs new file mode 100644 index 00000000..cceb9176 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/utils/unchecked_extrinsic.rs @@ -0,0 +1,142 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! The "default" Bizinikiwi/Pezkuwi UncheckedExtrinsic. +//! This is used in codegen for runtime API calls. +//! +//! The inner bytes represent the encoded extrinsic expected by the +//! runtime APIs. Deriving `EncodeAsType` would lead to the inner +//! bytes to be re-encoded (length prefixed). + +use core::marker::PhantomData; + +use codec::{Decode, Encode}; +use scale_decode::{DecodeAsType, IntoVisitor, TypeResolver, Visitor, visitor::DecodeAsTypeResult}; + +use super::{Encoded, Static}; +use alloc::vec::Vec; + +/// The unchecked extrinsic from bizinikiwi. +#[derive(Clone, Debug, Eq, PartialEq, Encode)] +pub struct UncheckedExtrinsic( + Static, + #[codec(skip)] PhantomData<(Address, Call, Signature, Extra)>, +); + +impl UncheckedExtrinsic { + /// Construct a new [`UncheckedExtrinsic`]. + pub fn new(bytes: Vec) -> Self { + Self(Static(Encoded(bytes)), PhantomData) + } + + /// Get the bytes of the encoded extrinsic. + pub fn bytes(&self) -> &[u8] { + self.0.0.0.as_slice() + } +} + +impl Decode + for UncheckedExtrinsic +{ + fn decode(input: &mut I) -> Result { + // The bytes for an UncheckedExtrinsic are first a compact + // encoded length, and then the bytes following. This is the + // same encoding as a Vec, so easiest ATM is just to decode + // into that, and then encode the vec bytes to get our extrinsic + // bytes, which we save into an `Encoded` to preserve as-is. + let xt_vec: Vec = Decode::decode(input)?; + Ok(UncheckedExtrinsic::new(xt_vec)) + } +} + +impl scale_encode::EncodeAsType + for UncheckedExtrinsic +{ + fn encode_as_type_to( + &self, + type_id: R::TypeId, + types: &R, + out: &mut Vec, + ) -> Result<(), scale_encode::Error> { + self.0.encode_as_type_to(type_id, types, out) + } +} + +impl From> + for UncheckedExtrinsic +{ + fn from(bytes: Vec) -> Self { + UncheckedExtrinsic::new(bytes) + } +} + +impl From> + for Vec +{ + fn from(bytes: UncheckedExtrinsic) -> Self { + bytes.0.0.0 + } +} + +pub struct UncheckedExtrinsicDecodeAsTypeVisitor( + PhantomData<(Address, Call, Signature, Extra, R)>, +); + +impl Visitor + for UncheckedExtrinsicDecodeAsTypeVisitor +{ + type Value<'scale, 'info> = UncheckedExtrinsic; + type Error = scale_decode::Error; + type TypeResolver = R; + + fn unchecked_decode_as_type<'scale, 'info>( + self, + input: &mut &'scale [u8], + type_id: R::TypeId, + types: &'info R, + ) -> DecodeAsTypeResult, Self::Error>> { + DecodeAsTypeResult::Decoded(Self::Value::decode_as_type(input, type_id, types)) + } +} + +impl IntoVisitor + for UncheckedExtrinsic +{ + type AnyVisitor = + UncheckedExtrinsicDecodeAsTypeVisitor; + + fn into_visitor() + -> UncheckedExtrinsicDecodeAsTypeVisitor { + UncheckedExtrinsicDecodeAsTypeVisitor(PhantomData) + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + + use alloc::vec; + + #[test] + fn unchecked_extrinsic_encoding() { + // A tx is basically some bytes with a compact length prefix; ie an encoded vec: + let tx_bytes = vec![1u8, 2, 3].encode(); + + let unchecked_extrinsic = UncheckedExtrinsic::<(), (), (), ()>::new(tx_bytes.clone()); + let encoded_tx_bytes = unchecked_extrinsic.encode(); + + // The encoded representation must not alter the provided bytes. + assert_eq!(tx_bytes, encoded_tx_bytes); + + // However, for decoding we expect to be able to read the extrinsic from the wire + // which would be length prefixed. + let decoded_tx = UncheckedExtrinsic::<(), (), (), ()>::decode(&mut &tx_bytes[..]).unwrap(); + let decoded_tx_bytes = decoded_tx.bytes(); + let encoded_tx_bytes = decoded_tx.encode(); + + assert_eq!(decoded_tx_bytes, encoded_tx_bytes); + // Ensure we can decode the tx and fetch only the tx bytes. + assert_eq!(vec![1, 2, 3], encoded_tx_bytes); + } +} diff --git a/vendor/pezkuwi-subxt/core/src/utils/wrapper_opaque.rs b/vendor/pezkuwi-subxt/core/src/utils/wrapper_opaque.rs new file mode 100644 index 00000000..a8dfeeaf --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/utils/wrapper_opaque.rs @@ -0,0 +1,221 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::PhantomDataSendSync; +use codec::{Compact, Decode, DecodeAll, Encode}; +use derive_where::derive_where; +use scale_decode::{IntoVisitor, TypeResolver, Visitor, ext::scale_type_resolver::visitor}; +use scale_encode::EncodeAsType; + +use alloc::{format, vec::Vec}; + +/// A wrapper for any type `T` which implement encode/decode in a way compatible with `Vec`. +/// [`WrapperKeepOpaque`] stores the type only in its opaque format, aka as a `Vec`. To +/// access the real type `T` [`Self::try_decode`] needs to be used. +// Dev notes: +// +// - This is adapted from [here](https://github.com/pezkuwichain/bizinikiwi/blob/master/frame/support/src/traits/misc.rs). +// - The encoded bytes will be a compact encoded length followed by that number of bytes. +// - However, the TypeInfo describes the type as a composite with first a compact encoded length and +// next the type itself. +// [`Encode`] and [`Decode`] impls will "just work" to take this into a `Vec`, but we need a +// custom [`EncodeAsType`] and [`Visitor`] implementation to encode and decode based on TypeInfo. +#[derive(Encode, Decode)] +#[derive_where(Debug, Clone, PartialEq, Eq, Default, Hash)] +pub struct WrapperKeepOpaque { + data: Vec, + _phantom: PhantomDataSendSync, +} + +impl WrapperKeepOpaque { + /// Try to decode the wrapped type from the inner `data`. + /// + /// Returns `None` if the decoding failed. + pub fn try_decode(&self) -> Option + where + T: Decode, + { + T::decode_all(&mut &self.data[..]).ok() + } + + /// Returns the length of the encoded `T`. + pub fn encoded_len(&self) -> usize { + self.data.len() + } + + /// Returns the encoded data. + pub fn encoded(&self) -> &[u8] { + &self.data + } + + /// Create from the given encoded `data`. + pub fn from_encoded(data: Vec) -> Self { + Self { data, _phantom: PhantomDataSendSync::new() } + } + + /// Create from some raw value by encoding it. + pub fn from_value(value: T) -> Self + where + T: Encode, + { + Self { data: value.encode(), _phantom: PhantomDataSendSync::new() } + } +} + +impl EncodeAsType for WrapperKeepOpaque { + fn encode_as_type_to( + &self, + type_id: R::TypeId, + types: &R, + out: &mut Vec, + ) -> Result<(), scale_encode::Error> { + use scale_encode::error::{Error, ErrorKind, Kind}; + + let ctx = (type_id.clone(), out); + let visitor = visitor::new(ctx, |(type_id, _out), _| { + // Check that the target shape lines up: any other shape but composite is wrong. + Err(Error::new(ErrorKind::WrongShape { + actual: Kind::Struct, + expected_id: format!("{type_id:?}"), + })) + }) + .visit_composite(|(_type_id, out), _path, _fields| { + self.data.encode_to(out); + Ok(()) + }); + + types + .resolve_type(type_id.clone(), visitor) + .map_err(|_| Error::new(ErrorKind::TypeNotFound(format!("{type_id:?}"))))? + } +} + +pub struct WrapperKeepOpaqueVisitor(core::marker::PhantomData<(T, R)>); +impl Visitor for WrapperKeepOpaqueVisitor { + type Value<'scale, 'info> = WrapperKeepOpaque; + type Error = scale_decode::Error; + type TypeResolver = R; + + fn visit_composite<'scale, 'info>( + self, + value: &mut scale_decode::visitor::types::Composite<'scale, 'info, R>, + _type_id: R::TypeId, + ) -> Result, Self::Error> { + use scale_decode::{ + error::{Error, ErrorKind}, + visitor::DecodeError, + }; + + if value.name() != Some("WrapperKeepOpaque") { + return Err(Error::new(ErrorKind::VisitorDecodeError(DecodeError::TypeResolvingError( + format!("Expected a type named 'WrapperKeepOpaque', got: {:?}", value.name()), + )))); + } + + if value.remaining() != 2 { + return Err(Error::new(ErrorKind::WrongLength { + actual_len: value.remaining(), + expected_len: 2, + })); + } + + // The field to decode is a compact len followed by bytes. Decode the length, then grab the + // bytes. + let Compact(len) = + value.decode_item(Compact::::into_visitor()).expect("length checked")?; + let field = value.next().expect("length checked")?; + + // Sanity check that the compact length we decoded lines up with the number of bytes encoded + // in the next field. + if field.bytes().len() != len as usize { + return Err(Error::custom_str( + "WrapperTypeKeepOpaque compact encoded length doesn't line up with encoded byte len", + )); + } + + Ok(WrapperKeepOpaque { data: field.bytes().to_vec(), _phantom: PhantomDataSendSync::new() }) + } +} + +impl IntoVisitor for WrapperKeepOpaque { + type AnyVisitor = WrapperKeepOpaqueVisitor; + fn into_visitor() -> WrapperKeepOpaqueVisitor { + WrapperKeepOpaqueVisitor(core::marker::PhantomData) + } +} + +#[cfg(test)] +mod test { + use scale_decode::DecodeAsType; + + use alloc::vec; + + use super::*; + + // Copied from https://github.com/pezkuwichain/bizinikiwi/blob/master/frame/support/src/traits/misc.rs + // and used for tests to check that we can work with the expected TypeInfo without needing to + // import the frame_support crate, which has quite a lot of dependencies. + impl scale_info::TypeInfo for WrapperKeepOpaque { + type Identity = Self; + fn type_info() -> scale_info::Type { + use scale_info::{Path, Type, TypeParameter, build::Fields, meta_type}; + + Type::builder() + .path(Path::new("WrapperKeepOpaque", module_path!())) + .type_params(vec![TypeParameter::new("T", Some(meta_type::()))]) + .composite( + Fields::unnamed() + .field(|f| f.compact::()) + .field(|f| f.ty::().type_name("T")), + ) + } + } + + /// Given a type definition, return type ID and registry representing it. + fn make_type() -> (u32, scale_info::PortableRegistry) { + let m = scale_info::MetaType::new::(); + let mut types = scale_info::Registry::new(); + let id = types.register_type(&m); + let portable_registry: scale_info::PortableRegistry = types.into(); + (id.id, portable_registry) + } + + fn roundtrips_like_scale_codec(t: T) + where + T: EncodeAsType + + DecodeAsType + + Encode + + Decode + + PartialEq + + core::fmt::Debug + + scale_info::TypeInfo + + 'static, + { + let (type_id, types) = make_type::(); + + let scale_codec_encoded = t.encode(); + let encode_as_type_encoded = t.encode_as_type(type_id, &types).unwrap(); + + assert_eq!(scale_codec_encoded, encode_as_type_encoded, "encoded bytes should match"); + + let decode_as_type_bytes = &mut &*scale_codec_encoded; + let decoded_as_type = T::decode_as_type(decode_as_type_bytes, type_id, &types) + .expect("decode-as-type decodes"); + + let decode_scale_codec_bytes = &mut &*scale_codec_encoded; + let decoded_scale_codec = T::decode(decode_scale_codec_bytes).expect("scale-codec decodes"); + + assert!(decode_as_type_bytes.is_empty(), "no bytes should remain in decode-as-type impl"); + assert!(decode_scale_codec_bytes.is_empty(), "no bytes should remain in codec-decode impl"); + + assert_eq!(decoded_as_type, decoded_scale_codec, "decoded values should match"); + } + + #[test] + fn wrapper_keep_opaque_roundtrips_ok() { + roundtrips_like_scale_codec(WrapperKeepOpaque::from_value(123u64)); + roundtrips_like_scale_codec(WrapperKeepOpaque::from_value(true)); + roundtrips_like_scale_codec(WrapperKeepOpaque::from_value(vec![1u8, 2, 3, 4])); + } +} diff --git a/vendor/pezkuwi-subxt/core/src/utils/yesnomaybe.rs b/vendor/pezkuwi-subxt/core/src/utils/yesnomaybe.rs new file mode 100644 index 00000000..23306049 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/utils/yesnomaybe.rs @@ -0,0 +1,82 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +/// A unit marker enum. +pub enum Yes {} +/// A unit marker enum. +pub enum Maybe {} +/// A unit marker enum. +pub enum No {} + +/// This is implemented for [`Yes`] and [`No`] and +/// allows us to check at runtime which of these types is present. +pub trait YesNo { + /// [`Yes`] + fn is_yes() -> bool { + false + } + /// [`No`] + fn is_no() -> bool { + false + } +} + +impl YesNo for Yes { + fn is_yes() -> bool { + true + } +} +impl YesNo for No { + fn is_no() -> bool { + true + } +} + +/// This is implemented for [`Yes`] and [`Maybe`] and +/// allows us to check at runtime which of these types is present. +pub trait YesMaybe { + /// [`Yes`] + fn is_yes() -> bool { + false + } + /// [`Maybe`] + fn is_maybe() -> bool { + false + } +} + +impl YesMaybe for Yes { + fn is_yes() -> bool { + true + } +} +impl YesMaybe for Maybe { + fn is_maybe() -> bool { + true + } +} + +/// This is implemented for [`No`] and [`Maybe`] and +/// allows us to check at runtime which of these types is present. +pub trait NoMaybe { + /// [`No`] + fn is_no() -> bool { + false + } + /// [`Maybe`] + fn is_maybe() -> bool { + false + } +} + +impl NoMaybe for No { + fn is_no() -> bool { + true + } +} +impl NoMaybe for Maybe { + fn is_maybe() -> bool { + true + } +} diff --git a/vendor/pezkuwi-subxt/core/src/view_functions/mod.rs b/vendor/pezkuwi-subxt/core/src/view_functions/mod.rs new file mode 100644 index 00000000..58d2d3a9 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/view_functions/mod.rs @@ -0,0 +1,77 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Encode View Function payloads, decode the associated values returned from them, and validate +//! static View Function payloads. + +pub mod payload; + +use crate::{Metadata, error::ViewFunctionError}; +use alloc::{string::ToString, vec::Vec}; +use payload::Payload; +use scale_decode::IntoVisitor; + +/// Run the validation logic against some View Function payload you'd like to use. Returns `Ok(())` +/// if the payload is valid (or if it's not possible to check since the payload has no validation +/// hash). Return an error if the payload was not valid or something went wrong trying to validate +/// it (ie the View Function in question do not exist at all) +pub fn validate(payload: P, metadata: &Metadata) -> Result<(), ViewFunctionError> { + let Some(hash) = payload.validation_hash() else { + return Ok(()); + }; + + let pallet_name = payload.pallet_name(); + let function_name = payload.function_name(); + + let view_function = metadata + .pallet_by_name(pallet_name) + .ok_or_else(|| ViewFunctionError::PalletNotFound(pallet_name.to_string()))? + .view_function_by_name(function_name) + .ok_or_else(|| ViewFunctionError::ViewFunctionNotFound { + pallet_name: pallet_name.to_string(), + function_name: function_name.to_string(), + })?; + + if hash != view_function.hash() { Err(ViewFunctionError::IncompatibleCodegen) } else { Ok(()) } +} + +/// The name of the Runtime API call which can execute +pub const CALL_NAME: &str = "RuntimeViewFunction_execute_view_function"; + +/// Encode the bytes that will be passed to the "execute_view_function" Runtime API call, +/// to execute the View Function represented by the given payload. +pub fn call_args( + payload: P, + metadata: &Metadata, +) -> Result, ViewFunctionError> { + let inputs = frame_decode::view_functions::encode_view_function_inputs( + payload.pallet_name(), + payload.function_name(), + payload.args(), + metadata, + metadata.types(), + ) + .map_err(ViewFunctionError::CouldNotEncodeInputs)?; + + Ok(inputs) +} + +/// Decode the value bytes at the location given by the provided View Function payload. +pub fn decode_value( + bytes: &mut &[u8], + payload: P, + metadata: &Metadata, +) -> Result { + let value = frame_decode::view_functions::decode_view_function_response( + payload.pallet_name(), + payload.function_name(), + bytes, + metadata, + metadata.types(), + P::ReturnType::into_visitor(), + ) + .map_err(ViewFunctionError::CouldNotDecodeResponse)?; + + Ok(value) +} diff --git a/vendor/pezkuwi-subxt/core/src/view_functions/payload.rs b/vendor/pezkuwi-subxt/core/src/view_functions/payload.rs new file mode 100644 index 00000000..a1055cb3 --- /dev/null +++ b/vendor/pezkuwi-subxt/core/src/view_functions/payload.rs @@ -0,0 +1,161 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module contains the trait and types used to represent +//! View Function calls that can be made. + +use alloc::{borrow::Cow, string::String}; +use core::marker::PhantomData; +use derive_where::derive_where; +use frame_decode::view_functions::IntoEncodableValues; +use scale_decode::DecodeAsType; + +/// This represents a View Function payload that can call into the runtime of node. +/// +/// # Components +/// +/// - associated return type +/// +/// Resulting bytes of the call are interpreted into this type. +/// +/// - query ID +/// +/// The ID used to identify in the runtime which view function to call. +/// +/// - encoded arguments +/// +/// Each argument of the View Function must be scale-encoded. +pub trait Payload { + /// Type of the arguments for this call. + type ArgsType: IntoEncodableValues; + /// The return type of the function call. + type ReturnType: DecodeAsType; + + /// The View Function pallet name. + fn pallet_name(&self) -> &str; + + /// The View Function function name. + fn function_name(&self) -> &str; + + /// The arguments. + fn args(&self) -> &Self::ArgsType; + + /// Returns the statically generated validation hash. + fn validation_hash(&self) -> Option<[u8; 32]> { + None + } +} + +// A reference to a payload is a valid payload. +impl Payload for &'_ P { + type ArgsType = P::ArgsType; + type ReturnType = P::ReturnType; + + fn pallet_name(&self) -> &str { + P::pallet_name(*self) + } + + fn function_name(&self) -> &str { + P::function_name(*self) + } + + fn args(&self) -> &Self::ArgsType { + P::args(*self) + } + + fn validation_hash(&self) -> Option<[u8; 32]> { + P::validation_hash(*self) + } +} + +/// A View Function payload containing the generic argument data +/// and interpreting the result of the call as `ReturnType`. +/// +/// This can be created from static values (ie those generated +/// via the `subxt` macro) or dynamic values via [`dynamic`]. +#[derive_where(Clone, Debug, Eq, Ord, PartialEq, PartialOrd; ArgsType)] +pub struct StaticPayload { + pallet_name: Cow<'static, str>, + function_name: Cow<'static, str>, + args: ArgsType, + validation_hash: Option<[u8; 32]>, + _marker: PhantomData, +} + +/// A dynamic View Function payload. +pub type DynamicPayload = StaticPayload; + +impl Payload + for StaticPayload +{ + type ArgsType = ArgsType; + type ReturnType = ReturnType; + + fn pallet_name(&self) -> &str { + &self.pallet_name + } + + fn function_name(&self) -> &str { + &self.function_name + } + + fn args(&self) -> &Self::ArgsType { + &self.args + } + + fn validation_hash(&self) -> Option<[u8; 32]> { + self.validation_hash + } +} + +impl StaticPayload { + /// Create a new [`StaticPayload`] for a View Function call. + pub fn new( + pallet_name: impl Into, + function_name: impl Into, + args: ArgsType, + ) -> Self { + StaticPayload { + pallet_name: pallet_name.into().into(), + function_name: function_name.into().into(), + args, + validation_hash: None, + _marker: PhantomData, + } + } + + /// Create a new static [`StaticPayload`] for a View Function call + /// using static function name and scale-encoded argument data. + /// + /// This is only expected to be used from codegen. + #[doc(hidden)] + pub fn new_static( + pallet_name: &'static str, + function_name: &'static str, + args: ArgsType, + hash: [u8; 32], + ) -> StaticPayload { + StaticPayload { + pallet_name: Cow::Borrowed(pallet_name), + function_name: Cow::Borrowed(function_name), + args, + validation_hash: Some(hash), + _marker: core::marker::PhantomData, + } + } + + /// Do not validate this call prior to submitting it. + pub fn unvalidated(self) -> Self { + Self { validation_hash: None, ..self } + } +} + +/// Create a new [`DynamicPayload`] to call a View Function. +pub fn dynamic( + pallet_name: impl Into, + function_name: impl Into, + args: ArgsType, +) -> DynamicPayload { + DynamicPayload::new(pallet_name, function_name, args) +} diff --git a/vendor/pezkuwi-subxt/lightclient/Cargo.toml b/vendor/pezkuwi-subxt/lightclient/Cargo.toml new file mode 100644 index 00000000..678dbad6 --- /dev/null +++ b/vendor/pezkuwi-subxt/lightclient/Cargo.toml @@ -0,0 +1,77 @@ +[package] +name = "pezkuwi-subxt-lightclient" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +publish = true + +license.workspace = true +readme = "../README.md" +repository.workspace = true +documentation.workspace = true +homepage.workspace = true +description = "Light Client for chain interaction" +keywords = ["blockchain", "parity", "bizinikiwi"] + +[lints] +workspace = true + +[features] +default = ["native"] + +# Enable this for native (ie non web/wasm builds). +# Exactly 1 of "web" and "native" is expected. +native = [ + "smoldot-light/std", + "tokio/rt", +] + +# Enable this for web/wasm builds. +# Exactly 1 of "web" and "native" is expected. +web = [ + "getrandom/js", + "smoldot/std", + + # For the light-client platform. + "futures-timer/wasm-bindgen", + "pin-project", + "wasm-bindgen-futures", + "web-time", + + # For websocket. + "js-sys", + "send_wrapper", + "wasm-bindgen", + "web-sys", +] + +[dependencies] +futures = { workspace = true, features = ["async-await"] } +futures-util = { workspace = true } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true, features = ["default", "raw_value"] } +smoldot-light = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["sync"] } +tokio-stream = { workspace = true } +tracing = { workspace = true } + +# Only needed for web +futures-timer = { workspace = true, optional = true } +getrandom = { workspace = true, optional = true } +js-sys = { workspace = true, optional = true } +pin-project = { workspace = true, optional = true } +send_wrapper = { workspace = true, optional = true } +smoldot = { workspace = true, optional = true } +wasm-bindgen = { workspace = true, optional = true } +wasm-bindgen-futures = { workspace = true, optional = true } +web-sys = { workspace = true, optional = true } +web-time = { workspace = true, optional = true } + +[package.metadata.docs.rs] +default-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.playground] +default-features = true diff --git a/vendor/pezkuwi-subxt/lightclient/src/background.rs b/vendor/pezkuwi-subxt/lightclient/src/background.rs new file mode 100644 index 00000000..cdf61d6e --- /dev/null +++ b/vendor/pezkuwi-subxt/lightclient/src/background.rs @@ -0,0 +1,486 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::{JsonRpcError, LightClientRpcError, rpc::RpcResponse, shared_client::SharedClient}; +use futures::{FutureExt, stream::StreamExt}; +use serde_json::value::RawValue; +use smoldot_light::platform::PlatformRef; +use std::{collections::HashMap, str::FromStr}; +use tokio::sync::{mpsc, oneshot}; +use tokio_stream::wrappers::UnboundedReceiverStream; + +const LOG_TARGET: &str = "subxt-light-client-background-task"; + +/// Response from [`BackgroundTaskHandle::request()`]. +pub type MethodResponse = Result, LightClientRpcError>; + +/// Response from [`BackgroundTaskHandle::subscribe()`]. +pub type SubscriptionResponse = Result< + (SubscriptionId, mpsc::UnboundedReceiver, JsonRpcError>>), + LightClientRpcError, +>; + +/// Type of subscription IDs we can get back. +pub type SubscriptionId = String; + +/// Message protocol between the front-end client that submits the RPC requests +/// and the background task which fetches responses from Smoldot. Hidden behind +/// the [`BackgroundTaskHandle`]. +#[derive(Debug)] +enum Message { + /// The RPC method request. + Request { + /// The method of the request. + method: String, + /// The parameters of the request. + params: Option>, + /// Channel used to send back the method response. + sender: oneshot::Sender, + }, + /// The RPC subscription (pub/sub) request. + Subscription { + /// The method of the request. + method: String, + /// The method to unsubscribe. + unsubscribe_method: String, + /// The parameters of the request. + params: Option>, + /// Channel used to send back the subscription response. + sender: oneshot::Sender, + }, +} + +/// A handle to communicate with the background task. +#[derive(Clone, Debug)] +pub struct BackgroundTaskHandle { + to_backend: mpsc::UnboundedSender, +} + +impl BackgroundTaskHandle { + /// Make an RPC request via the background task. + pub async fn request(&self, method: String, params: Option>) -> MethodResponse { + let (tx, rx) = oneshot::channel(); + self.to_backend + .send(Message::Request { method, params, sender: tx }) + .map_err(|_e| LightClientRpcError::BackgroundTaskDropped)?; + + match rx.await { + Err(_e) => Err(LightClientRpcError::BackgroundTaskDropped), + Ok(response) => response, + } + } + + /// Subscribe to some RPC method via the background task. + pub async fn subscribe( + &self, + method: String, + params: Option>, + unsubscribe_method: String, + ) -> SubscriptionResponse { + let (tx, rx) = oneshot::channel(); + self.to_backend + .send(Message::Subscription { method, params, unsubscribe_method, sender: tx }) + .map_err(|_e| LightClientRpcError::BackgroundTaskDropped)?; + + match rx.await { + Err(_e) => Err(LightClientRpcError::BackgroundTaskDropped), + Ok(response) => response, + } + } +} + +/// A background task which runs with [`BackgroundTask::run()`] and manages messages +/// coming to/from Smoldot. +#[allow(clippy::type_complexity)] +pub struct BackgroundTask { + channels: BackgroundTaskChannels, + data: BackgroundTaskData, +} + +impl BackgroundTask { + /// Constructs a new [`BackgroundTask`]. + pub(crate) fn new( + client: SharedClient, + chain_id: smoldot_light::ChainId, + from_back: smoldot_light::JsonRpcResponses, + ) -> (BackgroundTask, BackgroundTaskHandle) { + let (tx, rx) = mpsc::unbounded_channel(); + + let bg_task = BackgroundTask { + channels: BackgroundTaskChannels { + from_front: UnboundedReceiverStream::new(rx), + from_back, + }, + data: BackgroundTaskData { + client, + chain_id, + last_request_id: 0, + pending_subscriptions: HashMap::new(), + requests: HashMap::new(), + subscriptions: HashMap::new(), + }, + }; + + let bg_handle = BackgroundTaskHandle { to_backend: tx }; + + (bg_task, bg_handle) + } + + /// Run the background task, which: + /// - Forwards messages/subscription requests to Smoldot from the front end. + /// - Forwards responses back from Smoldot to the front end. + pub async fn run(self) { + let chain_id = self.data.chain_id; + let mut channels = self.channels; + let mut data = self.data; + + loop { + tokio::pin! { + let from_front_fut = channels.from_front.next().fuse(); + let from_back_fut = channels.from_back.next().fuse(); + } + + futures::select! { + // Message coming from the front end/client. + front_message = from_front_fut => { + let Some(message) = front_message else { + tracing::trace!(target: LOG_TARGET, "Subxt channel closed"); + break; + }; + tracing::trace!( + target: LOG_TARGET, + "Received register message {:?}", + message + ); + + data.handle_requests(message).await; + }, + // Message coming from Smoldot. + back_message = from_back_fut => { + let Some(back_message) = back_message else { + tracing::trace!(target: LOG_TARGET, "Smoldot RPC responses channel closed"); + break; + }; + + tracing::trace!( + target: LOG_TARGET, + "Received smoldot RPC chain {chain_id:?} result {}", + trim_message(&back_message), + ); + + data.handle_rpc_response(back_message); + } + } + } + + tracing::trace!(target: LOG_TARGET, "Task closed"); + } +} + +struct BackgroundTaskChannels { + /// Messages sent into this background task from the front end. + from_front: UnboundedReceiverStream, + /// Messages sent into the background task from Smoldot. + from_back: smoldot_light::JsonRpcResponses, +} + +struct BackgroundTaskData { + /// A smoldot light client that can be shared. + client: SharedClient, + /// Knowing the chain ID helps with debugging, but isn't otherwise necessary. + chain_id: smoldot_light::ChainId, + /// Know which Id to use next for new requests/subscriptions. + last_request_id: usize, + /// Map the request ID of a RPC method to the frontend `Sender`. + requests: HashMap>, + /// Subscription calls first need to make a plain RPC method + /// request to obtain the subscription ID. + /// + /// The RPC method request is made in the background and the response should + /// not be sent back to the user. + /// Map the request ID of a RPC method to the frontend `Sender`. + pending_subscriptions: HashMap, + /// Map the subscription ID to the frontend `Sender`. + /// + /// The subscription ID is entirely generated by the node (smoldot). Therefore, it is + /// possible for two distinct subscriptions of different chains to have the same subscription + /// ID. + subscriptions: HashMap, +} + +/// The state needed to resolve the subscription ID and send +/// back the response to frontend. +struct PendingSubscription { + /// Send the method response ID back to the user. + /// + /// It contains the subscription ID if successful, or an JSON RPC error object. + response_sender: oneshot::Sender, + /// The unsubscribe method to call when the user drops the receiver + /// part of the channel. + unsubscribe_method: String, +} + +/// The state of the subscription. +struct ActiveSubscription { + /// Channel to send the subscription notifications back to frontend. + notification_sender: mpsc::UnboundedSender, JsonRpcError>>, + /// The unsubscribe method to call when the user drops the receiver + /// part of the channel. + unsubscribe_method: String, +} + +fn trim_message(s: &str) -> &str { + const MAX_SIZE: usize = 512; + if s.len() < MAX_SIZE { + return s; + } + + match s.char_indices().nth(MAX_SIZE) { + None => s, + Some((idx, _)) => &s[..idx], + } +} + +impl BackgroundTaskData { + /// Fetch and increment the request ID. + fn next_id(&mut self) -> usize { + self.last_request_id = self.last_request_id.wrapping_add(1); + self.last_request_id + } + + /// Handle the registration messages received from the user. + async fn handle_requests(&mut self, message: Message) { + match message { + Message::Request { method, params, sender } => { + let id = self.next_id(); + let chain_id = self.chain_id; + + let params = match ¶ms { + Some(params) => params.get(), + None => "null", + }; + let request = format!( + r#"{{"jsonrpc":"2.0","id":"{id}", "method":"{method}","params":{params}}}"# + ); + + self.requests.insert(id, sender); + tracing::trace!(target: LOG_TARGET, "Tracking request id={id} chain={chain_id:?}"); + + let result = self.client.json_rpc_request(request, chain_id); + if let Err(err) = result { + tracing::warn!( + target: LOG_TARGET, + "Cannot send RPC request to lightclient {:?}", + err.to_string() + ); + + let sender = self.requests.remove(&id).expect("Channel is inserted above; qed"); + + // Send the error back to frontend. + if sender.send(Err(LightClientRpcError::SmoldotError(err.to_string()))).is_err() + { + tracing::warn!( + target: LOG_TARGET, + "Cannot send RPC request error to id={id}", + ); + } + } else { + tracing::trace!(target: LOG_TARGET, "Submitted to smoldot request with id={id}"); + } + }, + Message::Subscription { method, unsubscribe_method, params, sender } => { + let id = self.next_id(); + let chain_id = self.chain_id; + + // For subscriptions we need to make a plain RPC request to the subscription method. + // The server will return as a result the subscription ID. + let params = match ¶ms { + Some(params) => params.get(), + None => "null", + }; + let request = format!( + r#"{{"jsonrpc":"2.0","id":"{id}", "method":"{method}","params":{params}}}"# + ); + + tracing::trace!(target: LOG_TARGET, "Tracking subscription request id={id} chain={chain_id:?}"); + let pending_subscription = + PendingSubscription { response_sender: sender, unsubscribe_method }; + self.pending_subscriptions.insert(id, pending_subscription); + + let result = self.client.json_rpc_request(request, chain_id); + if let Err(err) = result { + tracing::warn!( + target: LOG_TARGET, + "Cannot send RPC request to lightclient {:?}", + err.to_string() + ); + let subscription_id_state = self + .pending_subscriptions + .remove(&id) + .expect("Channels are inserted above; qed"); + + // Send the error back to frontend. + if subscription_id_state + .response_sender + .send(Err(LightClientRpcError::SmoldotError(err.to_string()))) + .is_err() + { + tracing::warn!( + target: LOG_TARGET, + "Cannot send RPC request error to id={id}", + ); + } + } else { + tracing::trace!(target: LOG_TARGET, "Submitted to smoldot subscription request with id={id}"); + } + }, + }; + } + + /// Parse the response received from the light client and sent it to the appropriate user. + fn handle_rpc_response(&mut self, response: String) { + let chain_id = self.chain_id; + tracing::trace!(target: LOG_TARGET, "Received from smoldot response='{}' chain={chain_id:?}", trim_message(&response)); + + match RpcResponse::from_str(&response) { + Ok(RpcResponse::Method { id, result }) => { + let Ok(id) = id.parse::() else { + tracing::warn!(target: LOG_TARGET, "Cannot send response. Id={id} chain={chain_id:?} is not a valid number"); + return; + }; + + // Send the response back. + if let Some(sender) = self.requests.remove(&id) { + if sender.send(Ok(result)).is_err() { + tracing::warn!( + target: LOG_TARGET, + "Cannot send method response to id={id} chain={chain_id:?}", + ); + } + } else if let Some(pending_subscription) = self.pending_subscriptions.remove(&id) { + let Ok(sub_id) = serde_json::from_str::(result.get()) else { + tracing::warn!( + target: LOG_TARGET, + "Subscription id='{result}' chain={chain_id:?} is not a valid string", + ); + return; + }; + + tracing::trace!(target: LOG_TARGET, "Received subscription id={sub_id} chain={chain_id:?}"); + + let (sub_tx, sub_rx) = mpsc::unbounded_channel(); + + // Send the method response and a channel to receive notifications back. + if pending_subscription + .response_sender + .send(Ok((sub_id.clone(), sub_rx))) + .is_err() + { + tracing::warn!( + target: LOG_TARGET, + "Cannot send subscription ID response to id={id} chain={chain_id:?}", + ); + return; + } + + // Store the other end of the notif channel to send future subscription + // notifications to. + self.subscriptions.insert( + sub_id, + ActiveSubscription { + notification_sender: sub_tx, + unsubscribe_method: pending_subscription.unsubscribe_method, + }, + ); + } else { + tracing::warn!( + target: LOG_TARGET, + "Response id={id} chain={chain_id:?} is not tracked", + ); + } + }, + Ok(RpcResponse::MethodError { id, error }) => { + let Ok(id) = id.parse::() else { + tracing::warn!(target: LOG_TARGET, "Cannot send error. Id={id} chain={chain_id:?} is not a valid number"); + return; + }; + + if let Some(sender) = self.requests.remove(&id) { + if sender + .send(Err(LightClientRpcError::JsonRpcError(JsonRpcError(error)))) + .is_err() + { + tracing::warn!( + target: LOG_TARGET, + "Cannot send method response to id={id} chain={chain_id:?}", + ); + } + } else if let Some(subscription_id_state) = self.pending_subscriptions.remove(&id) { + if subscription_id_state + .response_sender + .send(Err(LightClientRpcError::JsonRpcError(JsonRpcError(error)))) + .is_err() + { + tracing::warn!( + target: LOG_TARGET, + "Cannot send method response to id {id} chain={chain_id:?}", + ); + } + } + }, + Ok(RpcResponse::Notification { method, subscription_id, result }) => { + let Some(active_subscription) = self.subscriptions.get_mut(&subscription_id) else { + tracing::warn!( + target: LOG_TARGET, + "Subscription response id={subscription_id} chain={chain_id:?} method={method} is not tracked", + ); + return; + }; + if active_subscription.notification_sender.send(Ok(result)).is_err() { + self.unsubscribe(&subscription_id, chain_id); + } + }, + Ok(RpcResponse::NotificationError { method, subscription_id, error }) => { + let Some(active_subscription) = self.subscriptions.get_mut(&subscription_id) else { + tracing::warn!( + target: LOG_TARGET, + "Subscription error id={subscription_id} chain={chain_id:?} method={method} is not tracked", + ); + return; + }; + if active_subscription.notification_sender.send(Err(JsonRpcError(error))).is_err() { + self.unsubscribe(&subscription_id, chain_id); + } + }, + Err(err) => { + tracing::warn!(target: LOG_TARGET, "cannot decode RPC response {:?}", err); + }, + } + } + + // Unsubscribe from a subscription. + fn unsubscribe(&mut self, subscription_id: &str, chain_id: smoldot_light::ChainId) { + let Some(active_subscription) = self.subscriptions.remove(subscription_id) else { + // Subscription doesn't exist so nothing more to do. + return; + }; + + // Build a call to unsubscribe from this method. + let unsub_id = self.next_id(); + let request = format!( + r#"{{"jsonrpc":"2.0","id":"{}", "method":"{}","params":["{}"]}}"#, + unsub_id, active_subscription.unsubscribe_method, subscription_id + ); + + // Submit it. + if let Err(err) = self.client.json_rpc_request(request, chain_id) { + tracing::warn!( + target: LOG_TARGET, + "Failed to unsubscribe id={subscription_id} chain={chain_id:?} method={:?} err={err:?}", active_subscription.unsubscribe_method + ); + } else { + tracing::debug!(target: LOG_TARGET,"Unsubscribe id={subscription_id} chain={chain_id:?} method={:?}", active_subscription.unsubscribe_method); + } + } +} diff --git a/vendor/pezkuwi-subxt/lightclient/src/chain_config.rs b/vendor/pezkuwi-subxt/lightclient/src/chain_config.rs new file mode 100644 index 00000000..61f738a6 --- /dev/null +++ b/vendor/pezkuwi-subxt/lightclient/src/chain_config.rs @@ -0,0 +1,65 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use serde_json::Value; +use std::borrow::Cow; + +/// Something went wrong building chain config. +#[non_exhaustive] +#[derive(thiserror::Error, Debug)] +pub enum ChainConfigError { + /// The provided chain spec is the wrong shape. + #[error("Invalid chain spec format")] + InvalidSpecFormat, +} + +/// Configuration to connect to a chain. +pub struct ChainConfig<'a> { + // The chain spec to use. + chain_spec: Cow<'a, str>, +} + +impl<'a> From<&'a str> for ChainConfig<'a> { + fn from(chain_spec: &'a str) -> Self { + ChainConfig::chain_spec(chain_spec) + } +} + +impl From for ChainConfig<'_> { + fn from(chain_spec: String) -> Self { + ChainConfig::chain_spec(chain_spec) + } +} + +impl<'a> ChainConfig<'a> { + /// Construct a chain config from a chain spec. + pub fn chain_spec(chain_spec: impl Into>) -> Self { + ChainConfig { chain_spec: chain_spec.into() } + } + + /// Set the bootnodes to the given ones. + pub fn set_bootnodes>( + self, + bootnodes: impl IntoIterator, + ) -> Result { + let mut chain_spec_json: Value = serde_json::from_str(&self.chain_spec) + .map_err(|_e| ChainConfigError::InvalidSpecFormat)?; + + if let Value::Object(map) = &mut chain_spec_json { + let bootnodes = + bootnodes.into_iter().map(|s| Value::String(s.as_ref().to_owned())).collect(); + + map.insert("bootNodes".to_string(), Value::Array(bootnodes)); + } else { + return Err(ChainConfigError::InvalidSpecFormat); + } + + Ok(ChainConfig { chain_spec: Cow::Owned(chain_spec_json.to_string()) }) + } + + // Used internally to fetch the chain spec back out. + pub(crate) fn as_chain_spec(&self) -> &str { + &self.chain_spec + } +} diff --git a/vendor/pezkuwi-subxt/lightclient/src/lib.rs b/vendor/pezkuwi-subxt/lightclient/src/lib.rs new file mode 100644 index 00000000..a210ed8f --- /dev/null +++ b/vendor/pezkuwi-subxt/lightclient/src/lib.rs @@ -0,0 +1,258 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! A wrapper around [`smoldot_light`] which provides an light client capable of connecting +//! to Bizinikiwi based chains. + +#![deny(missing_docs)] +#![cfg_attr(docsrs, feature(doc_cfg))] + +#[cfg(any( + all(feature = "web", feature = "native"), + not(any(feature = "web", feature = "native")) +))] +compile_error!("subxt-lightclient: exactly one of the 'web' and 'native' features should be used."); + +mod platform; +mod shared_client; +// mod receiver; +mod background; +mod chain_config; +mod rpc; + +use background::{BackgroundTask, BackgroundTaskHandle}; +use futures::Stream; +use platform::DefaultPlatform; +use serde_json::value::RawValue; +use shared_client::SharedClient; +use std::future::Future; +use tokio::sync::mpsc; + +pub use chain_config::{ChainConfig, ChainConfigError}; + +/// Things that can go wrong when constructing the [`LightClient`]. +#[derive(Debug, thiserror::Error)] +pub enum LightClientError { + /// Error encountered while adding the chain to the light-client. + #[error("Failed to add the chain to the light client: {0}.")] + AddChainError(String), +} + +/// Things that can go wrong calling methods of [`LightClientRpc`]. +#[derive(Debug, thiserror::Error)] +pub enum LightClientRpcError { + /// Error response from the JSON-RPC server. + #[error(transparent)] + JsonRpcError(JsonRpcError), + /// Smoldot could not handle the RPC call. + #[error("Smoldot could not handle the RPC call: {0}.")] + SmoldotError(String), + /// Background task dropped. + #[error("The background task was dropped.")] + BackgroundTaskDropped, +} + +/// An error response from the JSON-RPC server (ie smoldot) in response to +/// a method call or as a subscription notification. +#[derive(Debug, thiserror::Error)] +#[error("RPC Error: {0}.")] +pub struct JsonRpcError(Box); + +impl JsonRpcError { + /// Attempt to deserialize this error into some type. + pub fn try_deserialize<'a, T: serde::de::Deserialize<'a>>( + &'a self, + ) -> Result { + serde_json::from_str(self.0.get()) + } +} + +/// This represents a single light client connection to the network. Instantiate +/// it with [`LightClient::relay_chain()`] to communicate with a relay chain, and +/// then call [`LightClient::parachain()`] to establish connections to parachains. +#[derive(Clone)] +pub struct LightClient { + client: SharedClient, + relay_chain_id: smoldot_light::ChainId, +} + +impl LightClient { + /// Given a chain spec, establish a connection to a relay chain. Any subsequent calls to + /// [`LightClient::parachain()`] will set this as the relay chain. + /// + /// # Panics + /// + /// The panic behaviour depends on the feature flag being used: + /// + /// ## Native + /// + /// Panics when called outside of a `tokio` runtime context. + /// + /// ## Web + /// + /// If smoldot panics, then the promise created will be leaked. For more details, see + /// . + pub fn relay_chain<'a>( + chain_config: impl Into>, + ) -> Result<(Self, LightClientRpc), LightClientError> { + let mut client = smoldot_light::Client::new(platform::build_platform()); + let chain_config = chain_config.into(); + let chain_spec = chain_config.as_chain_spec(); + + let config = smoldot_light::AddChainConfig { + specification: chain_spec, + json_rpc: smoldot_light::AddChainConfigJsonRpc::Enabled { + max_pending_requests: u32::MAX.try_into().unwrap(), + max_subscriptions: u32::MAX, + }, + database_content: "", + potential_relay_chains: std::iter::empty(), + user_data: (), + }; + + let added_chain = client + .add_chain(config) + .map_err(|err| LightClientError::AddChainError(err.to_string()))?; + + let relay_chain_id = added_chain.chain_id; + let rpc_responses = + added_chain.json_rpc_responses.expect("Light client RPC configured; qed"); + let shared_client: SharedClient<_> = client.into(); + + let light_client_rpc = + LightClientRpc::new_raw(shared_client.clone(), relay_chain_id, rpc_responses); + let light_client = Self { client: shared_client, relay_chain_id }; + + Ok((light_client, light_client_rpc)) + } + + /// Given a chain spec, establish a connection to a parachain. + /// + /// # Panics + /// + /// The panic behaviour depends on the feature flag being used: + /// + /// ## Native + /// + /// Panics when called outside of a `tokio` runtime context. + /// + /// ## Web + /// + /// If smoldot panics, then the promise created will be leaked. For more details, see + /// . + pub fn parachain<'a>( + &self, + chain_config: impl Into>, + ) -> Result { + let chain_config = chain_config.into(); + let chain_spec = chain_config.as_chain_spec(); + + let config = smoldot_light::AddChainConfig { + specification: chain_spec, + json_rpc: smoldot_light::AddChainConfigJsonRpc::Enabled { + max_pending_requests: u32::MAX.try_into().unwrap(), + max_subscriptions: u32::MAX, + }, + database_content: "", + potential_relay_chains: std::iter::once(self.relay_chain_id), + user_data: (), + }; + + let added_chain = self + .client + .add_chain(config) + .map_err(|err| LightClientError::AddChainError(err.to_string()))?; + + let chain_id = added_chain.chain_id; + let rpc_responses = + added_chain.json_rpc_responses.expect("Light client RPC configured; qed"); + + Ok(LightClientRpc::new_raw(self.client.clone(), chain_id, rpc_responses)) + } +} + +/// This represents a single RPC connection to a specific chain, and is constructed by calling +/// one of the methods on [`LightClient`]. Using this, you can make RPC requests to the chain. +#[derive(Clone, Debug)] +pub struct LightClientRpc { + handle: BackgroundTaskHandle, +} + +impl LightClientRpc { + // Dev note: this would provide a "low level" interface if one is needed. + // Do we actually need to provide this, or can we entirely hide Smoldot? + pub(crate) fn new_raw( + client: impl Into>, + chain_id: smoldot_light::ChainId, + rpc_responses: smoldot_light::JsonRpcResponses, + ) -> Self + where + TPlat: smoldot_light::platform::PlatformRef + Send + 'static, + TChain: Send + 'static, + { + let (background_task, background_handle) = + BackgroundTask::new(client.into(), chain_id, rpc_responses); + + // For now we spawn the background task internally, but later we can expose + // methods to give this back to the user so that they can exert backpressure. + spawn(async move { background_task.run().await }); + + LightClientRpc { handle: background_handle } + } + + /// Make an RPC request to a chain, getting back a result. + pub async fn request( + &self, + method: String, + params: Option>, + ) -> Result, LightClientRpcError> { + self.handle.request(method, params).await + } + + /// Subscribe to some RPC method, getting back a stream of notifications. + pub async fn subscribe( + &self, + method: String, + params: Option>, + unsub: String, + ) -> Result { + let (id, notifications) = self.handle.subscribe(method, params, unsub).await?; + Ok(LightClientRpcSubscription { id, notifications }) + } +} + +/// A stream of notifications handed back when [`LightClientRpc::subscribe`] is called. +pub struct LightClientRpcSubscription { + notifications: mpsc::UnboundedReceiver, JsonRpcError>>, + id: String, +} + +impl LightClientRpcSubscription { + /// Return the subscription ID + pub fn id(&self) -> &str { + &self.id + } +} + +impl Stream for LightClientRpcSubscription { + type Item = Result, JsonRpcError>; + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.notifications.poll_recv(cx) + } +} + +/// A quick helper to spawn a task that works for WASM. +fn spawn(future: F) { + #[cfg(feature = "native")] + tokio::spawn(async move { + future.await; + }); + #[cfg(feature = "web")] + wasm_bindgen_futures::spawn_local(async move { + future.await; + }); +} diff --git a/vendor/pezkuwi-subxt/lightclient/src/platform/mod.rs b/vendor/pezkuwi-subxt/lightclient/src/platform/mod.rs new file mode 100644 index 00000000..c13178ab --- /dev/null +++ b/vendor/pezkuwi-subxt/lightclient/src/platform/mod.rs @@ -0,0 +1,37 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Default platform for WASM environments. + +#[cfg(feature = "web")] +mod wasm_helpers; +#[cfg(feature = "web")] +mod wasm_platform; +#[cfg(feature = "web")] +mod wasm_socket; + +pub use helpers::{DefaultPlatform, build_platform}; + +#[cfg(feature = "native")] +mod helpers { + use smoldot_light::platform::default::DefaultPlatform as Platform; + use std::sync::Arc; + + pub type DefaultPlatform = Arc; + + pub fn build_platform() -> DefaultPlatform { + Platform::new("subxt-light-client".into(), env!("CARGO_PKG_VERSION").into()) + } +} + +#[cfg(feature = "web")] +mod helpers { + use super::wasm_platform::SubxtPlatform as Platform; + + pub type DefaultPlatform = Platform; + + pub fn build_platform() -> DefaultPlatform { + Platform::new() + } +} diff --git a/vendor/pezkuwi-subxt/lightclient/src/platform/wasm_helpers.rs b/vendor/pezkuwi-subxt/lightclient/src/platform/wasm_helpers.rs new file mode 100644 index 00000000..aae06d4a --- /dev/null +++ b/vendor/pezkuwi-subxt/lightclient/src/platform/wasm_helpers.rs @@ -0,0 +1,42 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Wasm implementation for the light client's platform using +//! custom websockets. + +use super::wasm_socket::WasmSocket; + +use core::time::Duration; +use futures_util::{FutureExt, future}; + +pub fn now_from_unix_epoch() -> Duration { + web_time::SystemTime::now() + .duration_since(web_time::SystemTime::UNIX_EPOCH) + .unwrap_or_else(|_| { + panic!("Invalid systime cannot be configured earlier than `UNIX_EPOCH`") + }) +} + +pub type Instant = web_time::Instant; + +pub fn now() -> Instant { + web_time::Instant::now() +} + +pub type Delay = future::BoxFuture<'static, ()>; + +pub fn sleep(duration: Duration) -> Delay { + futures_timer::Delay::new(duration).boxed() +} + +/// Implementation detail of a stream from the `SubxtPlatform`. +#[pin_project::pin_project] +pub struct Stream( + #[pin] + pub smoldot::libp2p::with_buffers::WithBuffers< + future::BoxFuture<'static, Result>, + WasmSocket, + Instant, + >, +); diff --git a/vendor/pezkuwi-subxt/lightclient/src/platform/wasm_platform.rs b/vendor/pezkuwi-subxt/lightclient/src/platform/wasm_platform.rs new file mode 100644 index 00000000..d89d46b1 --- /dev/null +++ b/vendor/pezkuwi-subxt/lightclient/src/platform/wasm_platform.rs @@ -0,0 +1,212 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::wasm_socket::WasmSocket; + +use core::{ + fmt::{self, Write as _}, + net::IpAddr, + time::Duration, +}; +use futures::prelude::*; +use smoldot::libp2p::with_buffers; +use smoldot_light::platform::{ + Address, ConnectionType, LogLevel, MultiStreamAddress, MultiStreamWebRtcConnection, + PlatformRef, SubstreamDirection, +}; + +use std::{io, net::SocketAddr, pin::Pin}; + +const LOG_TARGET: &str = "subxt-platform-wasm"; + +/// Subxt platform implementation for wasm. +/// +/// This implementation is a conversion of the implementation from the smoldot: +/// https://github.com/smol-dot/smoldot/blob/6401d4df90569e23073d646b14a8fbf9f7e6bdd3/light-base/src/platform/default.rs#L83. +/// +/// This platform will evolve over time and we'll need to keep this code in sync. +#[derive(Clone)] +pub struct SubxtPlatform {} + +impl SubxtPlatform { + pub fn new() -> Self { + SubxtPlatform {} + } +} + +impl PlatformRef for SubxtPlatform { + type Delay = super::wasm_helpers::Delay; + type Instant = super::wasm_helpers::Instant; + type MultiStream = std::convert::Infallible; + type Stream = super::wasm_helpers::Stream; + type StreamConnectFuture = future::Ready; + type MultiStreamConnectFuture = future::Pending>; + type ReadWriteAccess<'a> = with_buffers::ReadWriteAccess<'a, Self::Instant>; + type StreamUpdateFuture<'a> = future::BoxFuture<'a, ()>; + type StreamErrorRef<'a> = &'a std::io::Error; + type NextSubstreamFuture<'a> = future::Pending>; + + fn now_from_unix_epoch(&self) -> Duration { + super::wasm_helpers::now_from_unix_epoch() + } + + fn now(&self) -> Self::Instant { + super::wasm_helpers::now() + } + + fn fill_random_bytes(&self, buffer: &mut [u8]) { + // This could fail if the system does not have access to a good source of entropy. + // Note: `rand::RngCore::fill_bytes` also panics on errors and `rand::OsCore` calls + // identically into `getrandom::getrandom`. + getrandom::getrandom(buffer).expect("Cannot fill random bytes"); + } + + fn sleep(&self, duration: Duration) -> Self::Delay { + super::wasm_helpers::sleep(duration) + } + + fn sleep_until(&self, when: Self::Instant) -> Self::Delay { + self.sleep(when.saturating_duration_since(self.now())) + } + + fn spawn_task( + &self, + _task_name: std::borrow::Cow<'_, str>, + task: impl future::Future + Send + 'static, + ) { + wasm_bindgen_futures::spawn_local(task); + } + + fn client_name(&self) -> std::borrow::Cow<'_, str> { + "subxt-light-client".into() + } + + fn client_version(&self) -> std::borrow::Cow<'_, str> { + env!("CARGO_PKG_VERSION").into() + } + + fn supports_connection_type(&self, connection_type: ConnectionType) -> bool { + let result = matches!( + connection_type, + ConnectionType::WebSocketIpv4 { .. } | + ConnectionType::WebSocketIpv6 { .. } | + ConnectionType::WebSocketDns { .. } + ); + + tracing::trace!( + target: LOG_TARGET, + "Supports connection type={:?} result={}", + connection_type, result + ); + + result + } + + fn connect_stream(&self, multiaddr: Address) -> Self::StreamConnectFuture { + tracing::trace!(target: LOG_TARGET, "Connect stream to multiaddr={:?}", multiaddr); + + // `PlatformRef` trait guarantees that `connect_stream` is only called with addresses + // stated in `supports_connection_type`. + let addr = match multiaddr { + Address::WebSocketDns { hostname, port, secure: true } => { + format!("wss://{hostname}:{port}") + }, + Address::WebSocketDns { hostname, port, secure: false } => { + format!("ws://{hostname}:{port}") + }, + Address::WebSocketIp { ip: IpAddr::V4(ip), port } => { + let addr = SocketAddr::from((ip, port)); + format!("ws://{addr}") + }, + Address::WebSocketIp { ip: IpAddr::V6(ip), port } => { + let addr = SocketAddr::from((ip, port)); + format!("ws://{addr}") + }, + + // The API user of the `PlatformRef` trait is never supposed to open connections of + // a type that isn't supported. + _ => { + unreachable!( + "Connecting to an address not supported. This code path indicates a bug in smoldot. Please raise an issue at https://github.com/smol-dot/smoldot/issues" + ) + }, + }; + + let socket_future = async move { + tracing::debug!(target: LOG_TARGET, "Connecting to addr={addr}"); + WasmSocket::new(addr.as_str()).map_err(|err| std::io::Error::other(err.to_string())) + }; + + future::ready(super::wasm_helpers::Stream(with_buffers::WithBuffers::new(Box::pin( + socket_future, + )))) + } + + fn connect_multistream(&self, _address: MultiStreamAddress) -> Self::MultiStreamConnectFuture { + panic!( + "Multistreams are not currently supported. This code path indicates a bug in smoldot. Please raise an issue at https://github.com/smol-dot/smoldot/issues" + ) + } + + fn open_out_substream(&self, c: &mut Self::MultiStream) { + // This function can only be called with so-called "multi-stream" connections. We never + // open such connection. + match *c {} + } + + fn next_substream(&self, c: &'_ mut Self::MultiStream) -> Self::NextSubstreamFuture<'_> { + // This function can only be called with so-called "multi-stream" connections. We never + // open such connection. + match *c {} + } + + fn read_write_access<'a>( + &self, + stream: Pin<&'a mut Self::Stream>, + ) -> Result, &'a io::Error> { + let stream = stream.project(); + stream.0.read_write_access(Self::Instant::now()) + } + + fn wait_read_write_again<'a>( + &self, + stream: Pin<&'a mut Self::Stream>, + ) -> Self::StreamUpdateFuture<'a> { + let stream = stream.project(); + Box::pin(stream.0.wait_read_write_again(|when| async move { + let now = super::wasm_helpers::now(); + let duration = when.saturating_duration_since(now); + super::wasm_helpers::sleep(duration).await; + })) + } + + fn log<'a>( + &self, + log_level: LogLevel, + log_target: &'a str, + message: &'a str, + key_values: impl Iterator, + ) { + let mut message_build = String::with_capacity(128); + message_build.push_str(message); + let mut first = true; + for (key, value) in key_values { + if first { + let _ = write!(message_build, "; "); + first = false; + } else { + let _ = write!(message_build, ", "); + } + let _ = write!(message_build, "{key}={value}"); + } + + match log_level { + LogLevel::Error => tracing::error!("target={log_target} {message_build}"), + LogLevel::Warn => tracing::warn!("target={log_target} {message_build}"), + LogLevel::Info => tracing::info!("target={log_target} {message_build}"), + LogLevel::Debug => tracing::debug!("target={log_target} {message_build}"), + LogLevel::Trace => tracing::trace!("target={log_target} {message_build}"), + }; + } +} diff --git a/vendor/pezkuwi-subxt/lightclient/src/platform/wasm_socket.rs b/vendor/pezkuwi-subxt/lightclient/src/platform/wasm_socket.rs new file mode 100644 index 00000000..ebd2ca61 --- /dev/null +++ b/vendor/pezkuwi-subxt/lightclient/src/platform/wasm_socket.rs @@ -0,0 +1,240 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use futures::{io, prelude::*}; +use send_wrapper::SendWrapper; +use wasm_bindgen::{JsCast, prelude::*}; + +use std::{ + collections::VecDeque, + pin::Pin, + sync::{Arc, Mutex}, + task::{Context, Poll, Waker}, +}; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Failed to connect {0}")] + ConnectionError(String), +} + +/// Websocket for WASM environments. +/// +/// This is a rust-based wrapper around browser's WebSocket API. +// Warning: It is not safe to have `Clone` on this structure. +pub struct WasmSocket { + /// Inner data shared between `poll` and web_sys callbacks. + inner: Arc>, + /// This implements `Send` and panics if the value is accessed + /// or dropped from another thread. + /// + /// This is safe in wasm environments. + socket: SendWrapper, + /// In memory callbacks to handle messages from the browser socket. + _callbacks: SendWrapper, +} + +/// The state of the [`WasmSocket`]. +#[derive(PartialEq, Eq, Clone, Copy)] +enum ConnectionState { + /// Initial state of the socket. + Connecting, + /// Socket is fully opened. + Opened, + /// Socket is closed. + Closed, + /// Error reported by callbacks. + Error, +} + +struct InnerWasmSocket { + /// The state of the connection. + state: ConnectionState, + /// Data buffer for the socket. + data: VecDeque, + /// Waker from `poll_read` / `poll_write`. + waker: Option, +} + +/// Registered callbacks of the [`WasmSocket`]. +/// +/// These need to be kept around until the socket is dropped. +type Callbacks = ( + Closure, + Closure, + Closure, + Closure, +); + +impl WasmSocket { + /// Establish a WebSocket connection. + /// + /// The error is a string representing the browser error. + /// Visit [MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/WebSocket/WebSocket#exceptions_thrown) + /// for more info. + pub fn new(addr: &str) -> Result { + let socket = match web_sys::WebSocket::new(addr) { + Ok(socket) => socket, + Err(err) => return Err(Error::ConnectionError(format!("{err:?}"))), + }; + + socket.set_binary_type(web_sys::BinaryType::Arraybuffer); + + let inner = Arc::new(Mutex::new(InnerWasmSocket { + state: ConnectionState::Connecting, + data: VecDeque::with_capacity(16384), + waker: None, + })); + + let open_callback = Closure::::new({ + let inner = inner.clone(); + move || { + let mut inner = inner.lock().expect("Mutex is poised; qed"); + inner.state = ConnectionState::Opened; + + if let Some(waker) = inner.waker.take() { + waker.wake(); + } + } + }); + socket.set_onopen(Some(open_callback.as_ref().unchecked_ref())); + + let message_callback = Closure::::new({ + let inner = inner.clone(); + move |event: web_sys::MessageEvent| { + let Ok(buffer) = event.data().dyn_into::() else { + panic!("Unexpected data format {:?}", event.data()); + }; + + let mut inner = inner.lock().expect("Mutex is poised; qed"); + let bytes = js_sys::Uint8Array::new(&buffer).to_vec(); + inner.data.extend(bytes); + + if let Some(waker) = inner.waker.take() { + waker.wake(); + } + } + }); + socket.set_onmessage(Some(message_callback.as_ref().unchecked_ref())); + + let error_callback = Closure::::new({ + let inner = inner.clone(); + move |_event: web_sys::Event| { + // Callback does not provide useful information, signal it back to the stream. + let mut inner = inner.lock().expect("Mutex is poised; qed"); + inner.state = ConnectionState::Error; + + if let Some(waker) = inner.waker.take() { + waker.wake(); + } + } + }); + socket.set_onerror(Some(error_callback.as_ref().unchecked_ref())); + + let close_callback = Closure::::new({ + let inner = inner.clone(); + move |_event: web_sys::CloseEvent| { + let mut inner = inner.lock().expect("Mutex is poised; qed"); + inner.state = ConnectionState::Closed; + + if let Some(waker) = inner.waker.take() { + waker.wake(); + } + } + }); + socket.set_onclose(Some(close_callback.as_ref().unchecked_ref())); + + let callbacks = (open_callback, message_callback, error_callback, close_callback); + + Ok(Self { + inner, + socket: SendWrapper::new(socket), + _callbacks: SendWrapper::new(callbacks), + }) + } +} + +impl AsyncRead for WasmSocket { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + let mut inner = self.inner.lock().expect("Mutex is poised; qed"); + inner.waker = Some(cx.waker().clone()); + + if self.socket.ready_state() == web_sys::WebSocket::CONNECTING { + return Poll::Pending; + } + + match inner.state { + ConnectionState::Error => Poll::Ready(Err(io::Error::other("Socket error"))), + ConnectionState::Closed => Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())), + ConnectionState::Connecting => Poll::Pending, + ConnectionState::Opened => { + if inner.data.is_empty() { + return Poll::Pending; + } + + let n = inner.data.len().min(buf.len()); + for k in buf.iter_mut().take(n) { + *k = inner.data.pop_front().expect("Buffer non empty; qed"); + } + Poll::Ready(Ok(n)) + }, + } + } +} + +impl AsyncWrite for WasmSocket { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + let mut inner = self.inner.lock().expect("Mutex is poised; qed"); + inner.waker = Some(cx.waker().clone()); + + match inner.state { + ConnectionState::Error => Poll::Ready(Err(io::Error::other("Socket error"))), + ConnectionState::Closed => Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())), + ConnectionState::Connecting => Poll::Pending, + ConnectionState::Opened => match self.socket.send_with_u8_array(buf) { + Ok(()) => Poll::Ready(Ok(buf.len())), + Err(err) => Poll::Ready(Err(io::Error::other(format!("Write error: {err:?}")))), + }, + } + } + + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.socket.ready_state() == web_sys::WebSocket::CLOSED { + return Poll::Ready(Ok(())); + } + + if self.socket.ready_state() != web_sys::WebSocket::CLOSING { + let _ = self.socket.close(); + } + + let mut inner = self.inner.lock().expect("Mutex is poised; qed"); + inner.waker = Some(cx.waker().clone()); + Poll::Pending + } +} + +impl Drop for WasmSocket { + fn drop(&mut self) { + if self.socket.ready_state() != web_sys::WebSocket::CLOSING { + let _ = self.socket.close(); + } + + self.socket.set_onopen(None); + self.socket.set_onmessage(None); + self.socket.set_onerror(None); + self.socket.set_onclose(None); + } +} diff --git a/vendor/pezkuwi-subxt/lightclient/src/rpc.rs b/vendor/pezkuwi-subxt/lightclient/src/rpc.rs new file mode 100644 index 00000000..d87702aa --- /dev/null +++ b/vendor/pezkuwi-subxt/lightclient/src/rpc.rs @@ -0,0 +1,126 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use serde::Deserialize; +use serde_json::value::RawValue; + +/// The RPC response from the light-client. +/// This can either be a response of a method, or a notification from a subscription. +#[derive(Debug, Clone)] +pub enum RpcResponse { + Method { + /// Response ID. + id: String, + /// The result of the method call. + result: Box, + }, + MethodError { + /// Response ID. + id: String, + /// Error. + error: Box, + }, + Notification { + /// RPC method that generated the notification. + method: String, + /// Subscription ID. + subscription_id: String, + /// Result. + result: Box, + }, + NotificationError { + /// RPC method that generated the notification. + method: String, + /// Subscription ID. + subscription_id: String, + /// Result. + error: Box, + }, +} + +impl std::str::FromStr for RpcResponse { + type Err = (); + + fn from_str(response: &str) -> Result { + // Valid response + #[derive(Deserialize, Debug)] + struct Response { + #[allow(unused)] + jsonrpc: String, + id: String, + result: Box, + } + + // Error response + #[derive(Deserialize)] + struct ResponseError { + #[allow(unused)] + jsonrpc: String, + id: String, + error: Box, + } + + // Valid notification (subscription) response + #[derive(Deserialize)] + struct Notification { + #[allow(unused)] + jsonrpc: String, + method: String, + params: NotificationResultParams, + } + #[derive(Deserialize)] + struct NotificationResultParams { + subscription: String, + result: Box, + } + + // Error notification (subscription) response + #[derive(Deserialize)] + struct NotificationError { + #[allow(unused)] + jsonrpc: String, + method: String, + params: NotificationErrorParams, + } + #[derive(Deserialize)] + struct NotificationErrorParams { + /// The ID of the subscription. + subscription: String, + error: Box, + } + + // Try deserializing the response payload to one of the above. We can + // do this more efficiently eg how jsonrpsee_types does. + + let result: Result = serde_json::from_str(response); + if let Ok(response) = result { + return Ok(RpcResponse::Method { id: response.id, result: response.result }); + } + let result: Result = serde_json::from_str(response); + if let Ok(response) = result { + return Ok(RpcResponse::Notification { + subscription_id: response.params.subscription, + method: response.method, + result: response.params.result, + }); + } + let result: Result = serde_json::from_str(response); + if let Ok(response) = result { + return Ok(RpcResponse::MethodError { id: response.id, error: response.error }); + } + let result: Result = serde_json::from_str(response); + if let Ok(response) = result { + return Ok(RpcResponse::NotificationError { + method: response.method, + subscription_id: response.params.subscription, + error: response.params.error, + }); + } + + // We couldn't decode into any of the above. We could pick one of the above` + // errors to return, but there's no real point since the string is obviously + // different from any of them. + Err(()) + } +} diff --git a/vendor/pezkuwi-subxt/lightclient/src/shared_client.rs b/vendor/pezkuwi-subxt/lightclient/src/shared_client.rs new file mode 100644 index 00000000..609fc5d9 --- /dev/null +++ b/vendor/pezkuwi-subxt/lightclient/src/shared_client.rs @@ -0,0 +1,42 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use smoldot_light as sl; +use std::sync::{Arc, Mutex}; + +/// This wraps [`smoldot_light::Client`] so that it can be cloned and shared. +#[derive(Clone)] +pub struct SharedClient { + client: Arc>>, +} + +impl From> + for SharedClient +{ + fn from(client: sl::Client) -> Self { + SharedClient { client: Arc::new(Mutex::new(client)) } + } +} + +impl SharedClient { + /// Delegates to [`smoldot_light::Client::json_rpc_request()`]. + pub(crate) fn json_rpc_request( + &self, + json_rpc_request: impl Into, + chain_id: sl::ChainId, + ) -> Result<(), sl::HandleRpcError> { + self.client + .lock() + .expect("mutex should not be poisoned") + .json_rpc_request(json_rpc_request, chain_id) + } + + /// Delegates to [`smoldot_light::Client::add_chain()`]. + pub(crate) fn add_chain( + &self, + config: sl::AddChainConfig<'_, TChain, impl Iterator>, + ) -> Result, sl::AddChainError> { + self.client.lock().expect("mutex should not be poisoned").add_chain(config) + } +} diff --git a/vendor/pezkuwi-subxt/macro/Cargo.toml b/vendor/pezkuwi-subxt/macro/Cargo.toml new file mode 100644 index 00000000..133434e6 --- /dev/null +++ b/vendor/pezkuwi-subxt/macro/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "pezkuwi-subxt-macro" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +publish = true +autotests = false + +license.workspace = true +repository.workspace = true +documentation.workspace = true +homepage.workspace = true +description = "Generate types and helpers for interacting with Bizinikiwi runtimes." + +[features] +web = ["pezkuwi-subxt-codegen/web"] +runtime-wasm-path = [ + "pezsc-executor", + "pezsc-executor-common", + "pezsp-io", + "pezsp-maybe-compressed-blob", + "pezsp-state-machine", +] +runtime-metadata-insecure-url = ["pezkuwi-subxt-utils-fetchmetadata/url"] + +[lib] +proc-macro = true + +[dependencies] +codec = { package = "parity-scale-codec", workspace = true } +darling = { workspace = true } +pezkuwi-subxt-codegen = { workspace = true } +pezkuwi-subxt-metadata = { workspace = true } +pezkuwi-subxt-utils-fetchmetadata = { workspace = true } +pezsc-executor = { workspace = true, optional = true } +pezsc-executor-common = { workspace = true, optional = true } +pezsp-io = { workspace = true, optional = true } +pezsp-maybe-compressed-blob = { workspace = true, optional = true } +pezsp-state-machine = { workspace = true, optional = true } +proc-macro-error2 = { workspace = true } +quote = { workspace = true } +scale-typegen = { workspace = true } +syn = { workspace = true } + +[lints] +workspace = true diff --git a/vendor/pezkuwi-subxt/macro/src/lib.rs b/vendor/pezkuwi-subxt/macro/src/lib.rs new file mode 100644 index 00000000..cd2260e5 --- /dev/null +++ b/vendor/pezkuwi-subxt/macro/src/lib.rs @@ -0,0 +1,312 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Subxt macro for generating Bizinikiwi runtime interfaces. + +use codec::Decode; +use darling::{FromMeta, ast::NestedMeta}; +use pezkuwi_subxt_codegen::{CodegenBuilder, CodegenError, Metadata}; +use proc_macro::TokenStream; +use proc_macro_error2::{abort_call_site, proc_macro_error}; +use quote::ToTokens; +use scale_typegen::typegen::{ + settings::substitutes::path_segments, + validation::{registry_contains_type_path, similar_type_paths_in_registry}, +}; +use syn::{parse_macro_input, punctuated::Punctuated}; + +#[cfg(feature = "runtime-wasm-path")] +mod wasm_loader; + +#[derive(Clone, Debug)] +struct OuterAttribute(syn::Attribute); + +impl syn::parse::Parse for OuterAttribute { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + Ok(Self(input.call(syn::Attribute::parse_outer)?[0].clone())) + } +} + +#[derive(Debug, FromMeta)] +struct RuntimeMetadataArgs { + #[darling(default)] + runtime_metadata_path: Option, + #[darling(default)] + runtime_metadata_insecure_url: Option, + #[darling(default)] + derive_for_all_types: Option>, + #[darling(default)] + attributes_for_all_types: Option>, + #[darling(multiple)] + derive_for_type: Vec, + #[darling(multiple)] + attributes_for_type: Vec, + #[darling(multiple)] + substitute_type: Vec, + #[darling(default, rename = "crate")] + crate_path: Option, + #[darling(default)] + generate_docs: darling::util::Flag, + #[darling(default)] + runtime_types_only: bool, + #[darling(default)] + no_default_derives: bool, + #[darling(default)] + no_default_substitutions: bool, + #[darling(default)] + unstable_metadata: darling::util::Flag, + #[cfg(feature = "runtime-wasm-path")] + #[darling(default)] + runtime_path: Option, +} + +#[derive(Debug, FromMeta)] +struct DeriveForType { + path: syn::TypePath, + derive: Punctuated, + #[darling(default)] + recursive: bool, +} + +#[derive(Debug, FromMeta)] +struct AttributesForType { + path: syn::TypePath, + attributes: Punctuated, + #[darling(default)] + recursive: bool, +} + +#[derive(Debug, FromMeta)] +struct SubstituteType { + path: syn::Path, + with: syn::Path, +} + +// Note: docs for this are in the subxt library; don't add further docs here as they will be +// appended. +#[allow(missing_docs)] +#[proc_macro_attribute] +#[proc_macro_error] +pub fn subxt(args: TokenStream, input: TokenStream) -> TokenStream { + match subxt_inner(args, parse_macro_input!(input as syn::ItemMod)) { + Ok(e) => e, + Err(e) => e, + } +} + +// Note: just an additional function to make early returns easier. +fn subxt_inner(args: TokenStream, item_mod: syn::ItemMod) -> Result { + let attr_args = NestedMeta::parse_meta_list(args.into()) + .map_err(|e| TokenStream::from(darling::Error::from(e).write_errors()))?; + let args = RuntimeMetadataArgs::from_list(&attr_args) + .map_err(|e| TokenStream::from(e.write_errors()))?; + + // Fetch metadata first, because we need it to validate some of the chosen codegen options. + let metadata = { + let mut metadata = fetch_metadata(&args)?; + + // Run this first to ensure type paths are unique (which may result in 1,2,3 suffixes being + // added to type paths), so that when we validate derives/substitutions below, they are + // allowed for such types. See . + scale_typegen::utils::ensure_unique_type_paths(metadata.types_mut()) + .expect("ensure_unique_type_paths should not fail; please report an issue."); + + metadata + }; + + let mut codegen = CodegenBuilder::new(); + + // Use the item module that the macro is on: + codegen.set_target_module(item_mod); + + // Use the provided crate path: + if let Some(crate_path) = args.crate_path { + codegen.set_subxt_crate_path(crate_path) + } + + // Respect the boolean flags: + if args.runtime_types_only { + codegen.runtime_types_only(); + } + if args.no_default_derives { + codegen.disable_default_derives(); + } + if args.no_default_substitutions { + codegen.disable_default_substitutes(); + } + if !args.generate_docs.is_present() { + codegen.no_docs() + } + + // Configure derives: + codegen.set_additional_global_derives( + args.derive_for_all_types.unwrap_or_default().into_iter().collect(), + ); + + for d in args.derive_for_type { + validate_type_path(&d.path.path, &metadata); + codegen.add_derives_for_type(d.path, d.derive.into_iter(), d.recursive); + } + + // Configure attributes: + codegen.set_additional_global_attributes( + args.attributes_for_all_types + .unwrap_or_default() + .into_iter() + .map(|a| a.0) + .collect(), + ); + for d in args.attributes_for_type { + validate_type_path(&d.path.path, &metadata); + codegen.add_attributes_for_type(d.path, d.attributes.into_iter().map(|a| a.0), d.recursive) + } + + // Insert type substitutions: + for sub in args.substitute_type.into_iter() { + validate_type_path(&sub.path, &metadata); + codegen.set_type_substitute(sub.path, sub.with); + } + + let code = codegen.generate(metadata).map_err(|e| e.into_compile_error())?; + + Ok(code.into()) +} + +/// Checks that a type is present in the type registry. If it is not found, abort with a +/// helpful error message, showing the user alternative types, that have the same name, but are at +/// different locations in the metadata. +fn validate_type_path(path: &syn::Path, metadata: &Metadata) { + let path_segments = path_segments(path); + let ident = &path + .segments + .last() + .expect("Empty path should be filtered out before already") + .ident; + if !registry_contains_type_path(metadata.types(), &path_segments) { + let alternatives = similar_type_paths_in_registry(metadata.types(), path); + let alternatives: String = if alternatives.is_empty() { + format!("There is no Type with name `{ident}` in the provided metadata.") + } else { + let mut s = "A type with the same name is present at: ".to_owned(); + for p in alternatives { + s.push('\n'); + s.push_str(&pretty_path(&p)); + } + s + }; + + abort_call_site!( + "Type `{}` does not exist at path `{}`\n\n{}", + ident.to_string(), + pretty_path(path), + alternatives + ); + } + + fn pretty_path(path: &syn::Path) -> String { + path.to_token_stream().to_string().replace(' ', "") + } +} + +/// Resolves a path, handling the $OUT_DIR placeholder if present. +/// If $OUT_DIR is present in the path, it's replaced with the actual OUT_DIR environment variable. +/// Otherwise, the path is resolved relative to CARGO_MANIFEST_DIR. +fn resolve_path(path_str: &str) -> std::path::PathBuf { + if path_str.contains("$OUT_DIR") { + let out_dir = std::env::var("OUT_DIR").unwrap_or_else(|_| { + abort_call_site!("$OUT_DIR is used in path but OUT_DIR environment variable is not set") + }); + std::path::Path::new(&path_str.replace("$OUT_DIR", &out_dir)).into() + } else { + let root = std::env::var("CARGO_MANIFEST_DIR").unwrap_or_else(|_| ".".into()); + let root_path = std::path::Path::new(&root); + root_path.join(path_str) + } +} + +/// Fetches metadata in a blocking manner, from a url or file path. +fn fetch_metadata( + args: &RuntimeMetadataArgs, +) -> Result { + // Do we want to fetch unstable metadata? This only works if fetching from a URL. + let unstable_metadata = args.unstable_metadata.is_present(); + + #[cfg(feature = "runtime-wasm-path")] + if let Some(path) = &args.runtime_path { + if args.runtime_metadata_insecure_url.is_some() || args.runtime_metadata_path.is_some() { + abort_call_site!( + "Only one of 'runtime_metadata_path', 'runtime_metadata_insecure_url' or `runtime_path` must be provided" + ); + }; + let path = resolve_path(path); + + let metadata = wasm_loader::from_wasm_file(&path).map_err(|e| e.into_compile_error())?; + return Ok(metadata); + }; + + let metadata = match (&args.runtime_metadata_path, &args.runtime_metadata_insecure_url) { + (Some(rest_of_path), None) => { + if unstable_metadata { + abort_call_site!( + "The 'unstable_metadata' attribute requires `runtime_metadata_insecure_url`" + ) + } + + let path = resolve_path(rest_of_path); + + pezkuwi_subxt_utils_fetchmetadata::from_file_blocking(&path) + .and_then(|b| pezkuwi_subxt_codegen::Metadata::decode(&mut &*b).map_err(Into::into)) + .map_err(|e| CodegenError::Other(e.to_string()).into_compile_error())? + }, + #[cfg(feature = "runtime-metadata-insecure-url")] + (None, Some(url_string)) => { + use pezkuwi_subxt_utils_fetchmetadata::{MetadataVersion, Url, from_url_blocking}; + + let url = Url::parse(url_string).unwrap_or_else(|_| { + abort_call_site!("Cannot download metadata; invalid url: {}", url_string) + }); + + let version = match unstable_metadata { + true => MetadataVersion::Unstable, + false => MetadataVersion::Latest, + }; + + from_url_blocking(url, version, None) + .map_err(|e| CodegenError::Other(e.to_string())) + .and_then(|b| pezkuwi_subxt_codegen::Metadata::decode(&mut &*b).map_err(Into::into)) + .map_err(|e| e.into_compile_error())? + }, + #[cfg(not(feature = "runtime-metadata-insecure-url"))] + (None, Some(_)) => { + abort_call_site!( + "'runtime_metadata_insecure_url' requires the 'runtime-metadata-insecure-url' feature to be enabled" + ) + }, + #[cfg(feature = "runtime-wasm-path")] + (None, None) => { + abort_call_site!( + "At least one of 'runtime_metadata_path', 'runtime_metadata_insecure_url' or 'runtime_path` can be provided" + ) + }, + #[cfg(not(feature = "runtime-wasm-path"))] + (None, None) => { + abort_call_site!( + "At least one of 'runtime_metadata_path', 'runtime_metadata_insecure_url' can be provided" + ) + }, + #[cfg(feature = "runtime-wasm-path")] + _ => { + abort_call_site!( + "Only one of 'runtime_metadata_path', 'runtime_metadata_insecure_url' or 'runtime_path` can be provided" + ) + }, + #[cfg(not(feature = "runtime-wasm-path"))] + _ => { + abort_call_site!( + "Only one of 'runtime_metadata_path' or 'runtime_metadata_insecure_url' can be provided" + ) + }, + }; + Ok(metadata) +} diff --git a/vendor/pezkuwi-subxt/macro/src/wasm_loader.rs b/vendor/pezkuwi-subxt/macro/src/wasm_loader.rs new file mode 100644 index 00000000..913cd4f0 --- /dev/null +++ b/vendor/pezkuwi-subxt/macro/src/wasm_loader.rs @@ -0,0 +1,145 @@ +// Copyright 2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use std::{borrow::Cow, path::Path}; + +use codec::{Decode, Encode}; +use pezkuwi_subxt_codegen::{CodegenError, Metadata}; +use pezkuwi_subxt_metadata::SUPPORTED_METADATA_VERSIONS; +use sc_executor::{WasmExecutionMethod, WasmExecutor}; +use sc_executor_common::runtime_blob::RuntimeBlob; +use sp_maybe_compressed_blob::{self, CODE_BLOB_BOMB_LIMIT}; + +/// Result type shorthand +pub type WasmMetadataResult = Result; + +/// Uses wasm artifact produced by compiling the runtime to generate metadata +pub fn from_wasm_file(wasm_file_path: &Path) -> WasmMetadataResult { + let wasm_file = pezkuwi_subxt_utils_fetchmetadata::from_file_blocking(wasm_file_path) + .map_err(|e| CodegenError::Other(e.to_string())) + .and_then(maybe_decompress)?; + call_and_decode(wasm_file) +} + +fn call_and_decode(wasm_file: Vec) -> WasmMetadataResult { + let mut executor = Executor::new(&wasm_file)?; + + if let Ok(versions) = executor.versions() { + let version = versions.into_iter().max().expect("This is checked earlier and can't fail."); + + executor.load_metadata_at_version(version) + } else { + executor.load_legacy_metadata() + } +} + +fn decode(encoded_metadata: Vec) -> WasmMetadataResult { + Metadata::decode(&mut encoded_metadata.as_ref()).map_err(Into::into) +} + +fn maybe_decompress(file_contents: Vec) -> WasmMetadataResult> { + sp_maybe_compressed_blob::decompress(file_contents.as_ref(), CODE_BLOB_BOMB_LIMIT) + .map_err(|e| CodegenError::Wasm(e.to_string())) + .map(Cow::into_owned) +} + +struct Executor { + runtime_blob: RuntimeBlob, + executor: WasmExecutor, + externalities: sp_state_machine::BasicExternalities, +} + +impl Executor { + fn new(wasm_file: &[u8]) -> WasmMetadataResult { + let externalities: sp_state_machine::BasicExternalities = Default::default(); + + let executor: WasmExecutor = WasmExecutor::builder() + .with_execution_method(WasmExecutionMethod::default()) + .with_offchain_heap_alloc_strategy(sc_executor::HeapAllocStrategy::Dynamic { + maximum_pages: Some(64), + }) + .with_max_runtime_instances(1) + .with_runtime_cache_size(1) + .build(); + + let runtime_blob = + RuntimeBlob::new(wasm_file).map_err(|e| CodegenError::Wasm(e.to_string()))?; + + Ok(Self { runtime_blob, executor, externalities }) + } + + fn versions(&mut self) -> WasmMetadataResult> { + let version = self + .executor + .uncached_call( + self.runtime_blob.clone(), + &mut self.externalities, + true, + "Metadata_metadata_versions", + &[], + ) + .map_err(|_| { + CodegenError::Wasm("method \"Metadata_metadata_versions\" doesnt exist".to_owned()) + })?; + let versions = + >::decode(&mut &version[..]).map_err(CodegenError::Decode).map(|x| { + x.into_iter() + .filter(|version| SUPPORTED_METADATA_VERSIONS.contains(version)) + .collect::>() + })?; + + if versions.is_empty() { + return Err(CodegenError::Other( + "No supported metadata versions were returned".to_owned(), + )); + } + + Ok(versions) + } + + fn load_legacy_metadata(&mut self) -> WasmMetadataResult { + let encoded_metadata = self + .executor + .uncached_call( + self.runtime_blob.clone(), + &mut self.externalities, + true, + "Metadata_metadata", + &[], + ) + .map_err(|e| { + CodegenError::Wasm(format!( + "Failed to call \"Metadata_metadata\" on WASM runtime. Cause: {e}" + )) + })?; + let encoded_metadata = + >::decode(&mut &encoded_metadata[..]).map_err(CodegenError::Decode)?; + decode(encoded_metadata) + } + + fn load_metadata_at_version(&mut self, version: u32) -> WasmMetadataResult { + let encoded_metadata = self + .executor + .uncached_call( + self.runtime_blob.clone(), + &mut self.externalities, + true, + "Metadata_metadata_at_version", + &version.encode(), + ) + .map_err(|e| { + CodegenError::Wasm(format!( + "Failed to call \"Metadata_metadata_at_version\" on WASM runtime. Cause: {e}" + )) + })?; + let Some(encoded_metadata) = + >>::decode(&mut &encoded_metadata[..]).map_err(CodegenError::Decode)? + else { + return Err(CodegenError::Other( + format!("Received empty metadata at version: v{version}").to_owned(), + )); + }; + decode(encoded_metadata) + } +} diff --git a/vendor/pezkuwi-subxt/metadata/Cargo.toml b/vendor/pezkuwi-subxt/metadata/Cargo.toml new file mode 100644 index 00000000..9ab128d3 --- /dev/null +++ b/vendor/pezkuwi-subxt/metadata/Cargo.toml @@ -0,0 +1,57 @@ +[package] +name = "pezkuwi-subxt-metadata" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +publish = true +autotests = false + +license.workspace = true +repository.workspace = true +documentation.workspace = true +homepage.workspace = true +description = "Command line utilities for checking metadata compatibility between nodes." + +[features] +default = ["legacy", "std"] +std = ["frame-metadata/std", "scale-info/std"] + +# Enable decoding of legacy metadata, too. +# std required by frame-metadata to decode Metadata { + let bytes = fs::read(Path::new("../artifacts/pezkuwi_metadata_full.scale")) + .expect("Cannot read metadata blob"); + let meta: RuntimeMetadataPrefixed = + Decode::decode(&mut &*bytes).expect("Cannot decode scale metadata"); + + match meta.1 { + RuntimeMetadata::V14(v14) => v14.try_into().unwrap(), + RuntimeMetadata::V15(v15) => v15.try_into().unwrap(), + _ => panic!("Unsupported metadata version {:?}", meta.1), + } +} + +fn bench_get_metadata_hash(c: &mut Criterion) { + let metadata = load_metadata(); + + c.bench_function("get_metadata_hash", |b| b.iter(|| metadata.hasher().hash())); +} + +fn bench_get_call_hash(c: &mut Criterion) { + let metadata = load_metadata(); + let mut group = c.benchmark_group("get_call_hash"); + + for pallet in metadata.pallets() { + let pallet_name = pallet.name(); + let Some(variants) = pallet.call_variants() else { + continue; + }; + + for variant in variants { + let call_name = &variant.name; + let bench_name = format!("{pallet_name}/{call_name}"); + group.bench_function(&bench_name, |b| b.iter(|| pallet.call_hash(call_name))); + } + } +} + +fn bench_get_constant_hash(c: &mut Criterion) { + let metadata = load_metadata(); + let mut group = c.benchmark_group("get_constant_hash"); + + for pallet in metadata.pallets() { + let pallet_name = pallet.name(); + for constant in pallet.constants() { + let constant_name = constant.name(); + let bench_name = format!("{pallet_name}/{constant_name}"); + group.bench_function(&bench_name, |b| b.iter(|| pallet.constant_hash(constant_name))); + } + } +} + +fn bench_get_storage_hash(c: &mut Criterion) { + let metadata = load_metadata(); + let mut group = c.benchmark_group("get_storage_hash"); + + for pallet in metadata.pallets() { + let pallet_name = pallet.name(); + let Some(storage_entries) = pallet.storage() else { + continue; + }; + + for storage in storage_entries.entries() { + let entry_name = storage.name(); + let bench_name = format!("{pallet_name}/{entry_name}"); + group.bench_function(&bench_name, |b| b.iter(|| pallet.storage_hash(entry_name))); + } + } +} + +criterion_group!( + name = benches; + config = Criterion::default(); + targets = + bench_get_metadata_hash, + bench_get_call_hash, + bench_get_constant_hash, + bench_get_storage_hash, +); + +criterion_main!(benches); diff --git a/vendor/pezkuwi-subxt/metadata/src/from/legacy/mod.rs b/vendor/pezkuwi-subxt/metadata/src/from/legacy/mod.rs new file mode 100644 index 00000000..e4fb4928 --- /dev/null +++ b/vendor/pezkuwi-subxt/metadata/src/from/legacy/mod.rs @@ -0,0 +1,410 @@ +mod portable_registry_builder; +#[cfg(test)] +mod tests; + +use crate::{ + Metadata, + utils::{ordered_map::OrderedMap, variant_index::VariantIndex}, +}; +use alloc::{borrow::ToOwned, collections::BTreeMap, format, string::ToString, vec::Vec}; +use frame_decode::{ + constants::{ConstantEntryInfo, ConstantTypeInfo}, + extrinsics::ExtrinsicTypeInfo, + runtime_apis::RuntimeApiTypeInfo, + storage::{StorageEntryInfo, StorageTypeInfo}, +}; +use frame_metadata::v15; +use portable_registry_builder::PortableRegistryBuilder; +use scale_info_legacy::{TypeRegistrySet, type_registry::RuntimeApiName}; + +/// Options to configure the legacy translating. +pub(crate) struct Opts { + pub sanitize_paths: bool, + pub ignore_not_found: bool, +} + +impl Opts { + /// Opts tuned for best compatibility translating. + pub(crate) fn compat() -> Self { + Opts { sanitize_paths: true, ignore_not_found: true } + } +} + +macro_rules! from_historic { + ($vis:vis fn $fn_name:ident($metadata:path $(, builtin_index: $builtin_index:ident)? )) => { + $vis fn $fn_name(metadata: &$metadata, types: &TypeRegistrySet<'_>, opts: Opts) -> Result { + // This will be used to construct our `PortableRegistry` from old-style types. + let mut portable_registry_builder = PortableRegistryBuilder::new(&types); + portable_registry_builder.ignore_not_found(opts.ignore_not_found); + portable_registry_builder.sanitize_paths(opts.sanitize_paths); + + + // We use this type in a few places to denote that we don't know how to decode it. + let unknown_type_id = portable_registry_builder.add_type_str("special::Unknown", None) + .map_err(|e| Error::add_type("constructing 'Unknown' type", e))?; + + // Pallet metadata + let mut call_index = 0u8; + let mut error_index = 0u8; + let mut event_index = 0u8; + + let new_pallets = as_decoded(&metadata.modules).iter().map(|pallet| { + // In older metadatas, calls and event enums can have different indexes + // in a given pallet. Pallets without calls or events don't increment + // the respective index for them. + // + // We assume since errors are non optional, that the pallet index _always_ + // increments for errors (no `None`s to skip). + let (call_index, event_index, error_index) = { + let out = (call_index, event_index, error_index); + if pallet.calls.is_some() { + call_index += 1; + } + if pallet.event.is_some() { + event_index += 1; + } + error_index += 1; + + out + }; + + // For v12 and v13 metadata, there is a builtin index for everything in a pallet. + // We enable this logic for those metadatas to get the correct index. + $( + let $builtin_index = true; + let (call_index, event_index, error_index) = if $builtin_index { + (pallet.index, pallet.index, pallet.index) + } else { + (call_index, event_index, error_index) + }; + )? + + let pallet_name = as_decoded(&pallet.name).to_string(); + + // Storage entries: + let storage = pallet.storage.as_ref().map(|s| { + let storage = as_decoded(s); + let prefix = as_decoded(&storage.prefix); + let entries = metadata.storage_in_pallet(&pallet_name).map(|entry_name| { + let info = metadata + .storage_info(&pallet_name, &entry_name) + .map_err(|e| Error::StorageInfoError(e.into_owned()))?; + let entry_name = entry_name.into_owned(); + + let info = info.map_ids(|old_id| { + portable_registry_builder.add_type(old_id) + }).map_err(|e| { + let ctx = format!("adding type used in storage entry {pallet_name}.{entry_name}"); + Error::add_type(ctx, e) + })?; + + let entry = crate::StorageEntryMetadata { + name: entry_name.clone(), + info: info.into_owned(), + // We don't expose docs via our storage info yet. + docs: Vec::new(), + }; + + Ok((entry_name, entry)) + }).collect::, _>>()?; + Ok(crate::StorageMetadata { + prefix: prefix.clone(), + entries, + }) + }).transpose()?; + + // Pallet error type is just a builtin type: + let error_ty = portable_registry_builder.add_type_str(&format!("builtin::module::error::{pallet_name}"), None) + .map_err(|e| { + let ctx = format!("converting the error enum for pallet {pallet_name}"); + Error::add_type(ctx, e) + })?; + + // Pallet calls also just a builtin type: + let call_ty = pallet.calls.as_ref().map(|_| { + portable_registry_builder.add_type_str(&format!("builtin::module::call::{pallet_name}"), None) + .map_err(|e| { + let ctx = format!("converting the call enum for pallet {pallet_name}"); + Error::add_type(ctx, e) + }) + }).transpose()?; + + // Pallet events also just a builtin type: + let event_ty = pallet.event.as_ref().map(|_| { + portable_registry_builder.add_type_str(&format!("builtin::module::event::{pallet_name}"), None) + .map_err(|e| { + let ctx = format!("converting the event enum for pallet {pallet_name}"); + Error::add_type(ctx, e) + }) + }).transpose()?; + + let call_variant_index = + VariantIndex::build(call_ty, portable_registry_builder.types()); + let error_variant_index = + VariantIndex::build(Some(error_ty), portable_registry_builder.types()); + let event_variant_index = + VariantIndex::build(event_ty, portable_registry_builder.types()); + + let constants = metadata.constants_in_pallet(&pallet_name).map(|name| { + let name = name.into_owned(); + let info = metadata.constant_info(&pallet_name, &name) + .map_err(|e| Error::ConstantInfoError(e.into_owned()))?; + let new_type_id = portable_registry_builder.add_type(info.type_id) + .map_err(|e| { + let ctx = format!("converting the constant {name} for pallet {pallet_name}"); + Error::add_type(ctx, e) + })?; + + let constant = crate::ConstantMetadata { + name: name.clone(), + ty: new_type_id, + value: info.bytes.to_vec(), + // We don't expose docs via our constant info yet. + docs: Vec::new(), + }; + + Ok((name, constant)) + }).collect::>()?; + + let pallet_metadata = crate::PalletMetadataInner { + name: pallet_name.clone(), + call_index, + event_index, + error_index, + storage, + error_ty: Some(error_ty), + call_ty, + event_ty, + call_variant_index, + error_variant_index, + event_variant_index, + constants, + view_functions: Default::default(), + associated_types: Default::default(), + // Pallets did not have docs prior to V15. + docs: Default::default(), + }; + + Ok((pallet_name, pallet_metadata)) + }).collect::,Error>>()?; + + // Extrinsic metadata + let new_extrinsic = { + let signature_info = metadata + .extrinsic_signature_info() + .map_err(|e| Error::ExtrinsicInfoError(e.into_owned()))?; + + let address_ty_id = portable_registry_builder.add_type(signature_info.address_id) + .map_err(|_| Error::CannotFindAddressType)?; + + let signature_ty_id = portable_registry_builder.add_type(signature_info.signature_id) + .map_err(|_| Error::CannotFindCallType)?; + + let transaction_extensions = metadata + .extrinsic_extension_info(None) + .map_err(|e| Error::ExtrinsicInfoError(e.into_owned()))? + .extension_ids + .into_iter() + .map(|ext| { + let ext_name = ext.name.into_owned(); + let ext_type = portable_registry_builder.add_type(ext.id) + .map_err(|e| { + let ctx = format!("converting the signed extension {ext_name}"); + Error::add_type(ctx, e) + })?; + + Ok(crate::TransactionExtensionMetadataInner { + identifier: ext_name, + extra_ty: ext_type, + // This only started existing in V14+ metadata, but in any case, + // we don't need to know how to decode the signed payload for + // historic blocks (hopefully), so set to unknown. + additional_ty: unknown_type_id.into() + }) + }) + .collect::,Error>>()?; + + let transaction_extensions_by_version = BTreeMap::from_iter([( + 0, + (0..transaction_extensions.len() as u32).collect() + )]); + + crate::ExtrinsicMetadata { + address_ty: address_ty_id.into(), + signature_ty: signature_ty_id.into(), + supported_versions: Vec::from_iter([4]), + transaction_extensions, + transaction_extensions_by_version, + } + }; + + // Outer enum types + let outer_enums = crate::OuterEnumsMetadata { + call_enum_ty: portable_registry_builder.add_type_str("builtin::Call", None) + .map_err(|e| { + let ctx = format!("constructing the 'builtin::Call' type to put in the OuterEnums metadata"); + Error::add_type(ctx, e) + })?, + event_enum_ty: portable_registry_builder.add_type_str("builtin::Event", None) + .map_err(|e| { + let ctx = format!("constructing the 'builtin::Event' type to put in the OuterEnums metadata"); + Error::add_type(ctx, e) + })?, + error_enum_ty: portable_registry_builder.add_type_str("builtin::Error", None) + .map_err(|e| { + let ctx = format!("constructing the 'builtin::Error' type to put in the OuterEnums metadata"); + Error::add_type(ctx, e) + })?, + }; + + // These are all the same in V13, but be explicit anyway for clarity. + let pallets_by_call_index = new_pallets + .values() + .iter() + .enumerate() + .map(|(idx,p)| (p.call_index, idx)) + .collect(); + let pallets_by_error_index = new_pallets + .values() + .iter() + .enumerate() + .map(|(idx,p)| (p.error_index, idx)) + .collect(); + let pallets_by_event_index = new_pallets + .values() + .iter() + .enumerate() + .map(|(idx,p)| (p.event_index, idx)) + .collect(); + + // This is optional in the sense that Subxt will return an error if it needs to decode this type, + // and I think for historic metadata we wouldn't end up down that path anyway. Historic metadata + // tends to call it just "DispatchError" but search more specific paths first. + let dispatch_error_ty = portable_registry_builder + .try_add_type_str("hardcoded::DispatchError", None) + .or_else(|| portable_registry_builder.try_add_type_str("sp_runtime::DispatchError", None)) + .or_else(|| portable_registry_builder.try_add_type_str("DispatchError", None)) + .transpose() + .map_err(|e| Error::add_type("constructing DispatchError", e))?; + + // Runtime API definitions live with type definitions. + let apis = type_registry_to_runtime_apis(&types, &mut portable_registry_builder)?; + + Ok(crate::Metadata { + types: portable_registry_builder.finish(), + pallets: new_pallets, + pallets_by_call_index, + pallets_by_error_index, + pallets_by_event_index, + extrinsic: new_extrinsic, + outer_enums, + dispatch_error_ty, + apis, + // Nothing custom existed in V13 + custom: v15::CustomMetadata { map: Default::default() }, + }) + }} +} + +from_historic!(pub fn from_v13(frame_metadata::v13::RuntimeMetadataV13, builtin_index: yes)); +from_historic!(pub fn from_v12(frame_metadata::v12::RuntimeMetadataV12, builtin_index: yes)); +from_historic!(pub fn from_v11(frame_metadata::v11::RuntimeMetadataV11)); +from_historic!(pub fn from_v10(frame_metadata::v10::RuntimeMetadataV10)); +from_historic!(pub fn from_v9(frame_metadata::v9::RuntimeMetadataV9)); +from_historic!(pub fn from_v8(frame_metadata::v8::RuntimeMetadataV8)); + +fn as_decoded(item: &frame_metadata::decode_different::DecodeDifferent) -> &B { + match item { + frame_metadata::decode_different::DecodeDifferent::Encode(_a) => { + panic!("Expecting decoded data") + }, + frame_metadata::decode_different::DecodeDifferent::Decoded(b) => b, + } +} + +// Obtain Runtime API information from some type registry. +pub fn type_registry_to_runtime_apis( + types: &TypeRegistrySet<'_>, + portable_registry_builder: &mut PortableRegistryBuilder, +) -> Result, Error> { + let mut apis = OrderedMap::new(); + let mut trait_name = ""; + let mut trait_methods = OrderedMap::new(); + + for api in types.runtime_apis() { + match api { + RuntimeApiName::Trait(name) => { + if !trait_methods.is_empty() { + apis.push_insert( + trait_name.into(), + crate::RuntimeApiMetadataInner { + name: trait_name.into(), + methods: trait_methods, + docs: Vec::new(), + }, + ); + } + trait_methods = OrderedMap::new(); + trait_name = name; + }, + RuntimeApiName::Method(name) => { + let info = types + .runtime_api_info(trait_name, name) + .map_err(|e| Error::RuntimeApiInfoError(e.into_owned()))?; + + let info = info.map_ids(|id| { + portable_registry_builder.add_type(id).map_err(|e| { + let c = format!("converting type for runtime API {trait_name}.{name}"); + Error::add_type(c, e) + }) + })?; + + trait_methods.push_insert( + name.to_owned(), + crate::RuntimeApiMethodMetadataInner { + name: name.into(), + info, + docs: Vec::new(), + }, + ); + }, + } + } + + Ok(apis) +} + +/// An error encountered converting some legacy metadata to our internal format. +#[allow(missing_docs)] +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Cannot add a type. + #[error("Cannot add type ({context}): {error}")] + AddTypeError { context: String, error: portable_registry_builder::PortableRegistryAddTypeError }, + #[error("Cannot find 'hardcoded::ExtrinsicAddress' type in legacy types")] + CannotFindAddressType, + #[error("Cannot find 'hardcoded::ExtrinsicSignature' type in legacy types")] + CannotFindSignatureType, + #[error( + "Cannot find 'builtin::Call' type in legacy types (this should have been automatically added)" + )] + CannotFindCallType, + #[error("Cannot obtain the storage information we need to convert storage entries")] + StorageInfoError(frame_decode::storage::StorageInfoError<'static>), + #[error("Cannot obtain the extrinsic information we need to convert transaction extensions")] + ExtrinsicInfoError(frame_decode::extrinsics::ExtrinsicInfoError<'static>), + #[error("Cannot obtain the Runtime API information we need")] + RuntimeApiInfoError(frame_decode::runtime_apis::RuntimeApiInfoError<'static>), + #[error("Cannot obtain the Constant information we need")] + ConstantInfoError(frame_decode::constants::ConstantInfoError<'static>), +} + +impl Error { + /// A shorthand for the [`Error::AddTypeError`] variant. + fn add_type( + context: impl Into, + error: impl Into, + ) -> Self { + Error::AddTypeError { context: context.into(), error: error.into() } + } +} diff --git a/vendor/pezkuwi-subxt/metadata/src/from/legacy/portable_registry_builder.rs b/vendor/pezkuwi-subxt/metadata/src/from/legacy/portable_registry_builder.rs new file mode 100644 index 00000000..e3262c24 --- /dev/null +++ b/vendor/pezkuwi-subxt/metadata/src/from/legacy/portable_registry_builder.rs @@ -0,0 +1,502 @@ +use alloc::{ + borrow::ToOwned, + collections::{BTreeMap, BTreeSet}, + string::ToString, + vec::Vec, +}; +use scale_info::{PortableRegistry, PortableType, form::PortableForm}; +use scale_info_legacy::{LookupName, TypeRegistrySet, type_registry::TypeRegistryResolveError}; +use scale_type_resolver::{ + BitsOrderFormat, BitsStoreFormat, FieldIter, PathIter, Primitive, ResolvedTypeVisitor, + UnhandledKind, VariantIter, +}; + +#[derive(thiserror::Error, Debug)] +pub enum PortableRegistryAddTypeError { + #[error("Error resolving type: {0}")] + ResolveError(#[from] TypeRegistryResolveError), + #[error("Cannot find type '{0}'")] + TypeNotFound(LookupName), +} + +/// the purpose of this is to convert a (subset of) [`scale_info_legacy::TypeRegistrySet`] +/// into a [`scale_info::PortableRegistry`]. Type IDs from the former are passed in, and +/// type IDs from the latter are handed back. Calling [`PortableRegistryBuilder::finish()`] +/// then hands back a [`scale_info::PortableRegistry`] which these Ids can be used with. +pub struct PortableRegistryBuilder<'info> { + legacy_types: &'info TypeRegistrySet<'info>, + scale_info_types: PortableRegistry, + old_to_new: BTreeMap, + ignore_not_found: bool, + sanitize_paths: bool, + seen_names_in_default_path: BTreeSet, +} + +impl<'info> PortableRegistryBuilder<'info> { + /// Instantiate a new [`PortableRegistryBuilder`], providing the set of + /// legacy types you wish to use to construct modern types from. + pub fn new(legacy_types: &'info TypeRegistrySet<'info>) -> Self { + PortableRegistryBuilder { + legacy_types, + scale_info_types: PortableRegistry { types: Default::default() }, + old_to_new: Default::default(), + ignore_not_found: false, + sanitize_paths: false, + seen_names_in_default_path: Default::default(), + } + } + + /// If this is enabled, any type that isn't found will be replaced by a "special::Unknown" type + /// instead of a "type not found" error being emitted. + /// + /// Default: false + pub fn ignore_not_found(&mut self, ignore: bool) { + self.ignore_not_found = ignore; + } + + /// Should type paths be sanitized to make them more amenable to things like codegen? + /// + /// Default: false + pub fn sanitize_paths(&mut self, sanitize: bool) { + self.sanitize_paths = sanitize; + } + + /// Try adding a type, given its string name and optionally the pallet it's scoped to. + pub fn try_add_type_str( + &mut self, + id: &str, + pallet: Option<&str>, + ) -> Option> { + let mut id = match LookupName::parse(id) { + Ok(id) => id, + Err(e) => { + return Some(Err(TypeRegistryResolveError::LookupNameInvalid(id.to_owned(), e))); + }, + }; + + if let Some(pallet) = pallet { + id = id.in_pallet(pallet); + } + + self.try_add_type(id) + } + + /// Try adding a type, returning `None` if the type doesn't exist. + pub fn try_add_type( + &mut self, + id: LookupName, + ) -> Option> { + match self.add_type(id) { + Ok(id) => Some(Ok(id)), + Err(PortableRegistryAddTypeError::TypeNotFound(_)) => None, + Err(PortableRegistryAddTypeError::ResolveError(e)) => Some(Err(e)), + } + } + + /// Add a new legacy type, giving its string ID/name and, if applicable, the pallet that it's + /// seen in, returning the corresponding "modern" type ID to use in its place, or an error if + /// something does wrong. + pub fn add_type_str( + &mut self, + id: &str, + pallet: Option<&str>, + ) -> Result { + let mut id = LookupName::parse(id) + .map_err(|e| TypeRegistryResolveError::LookupNameInvalid(id.to_owned(), e))?; + + if let Some(pallet) = pallet { + id = id.in_pallet(pallet); + } + + self.add_type(id) + } + + /// Add a new legacy type, returning the corresponding "modern" type ID to use in + /// its place, or an error if something does wrong. + pub fn add_type(&mut self, id: LookupName) -> Result { + if let Some(new_id) = self.old_to_new.get(&id) { + return Ok(*new_id); + } + + // Assign a new ID immediately to prevent any recursion. If we don't do this, then + // recursive types (ie types that contain themselves) will lead to a stack overflow. + // with this, we assign IDs up front, so the ID is returned immediately on recursing. + let new_id = self.scale_info_types.types.len() as u32; + + // Add a placeholder type to "reserve" this ID. + self.scale_info_types.types.push(PortableType { + id: new_id, + ty: scale_info::Type::new( + scale_info::Path { segments: vec![] }, + core::iter::empty(), + scale_info::TypeDef::Variant(scale_info::TypeDefVariant { variants: vec![] }), + Default::default(), + ), + }); + + // Cache the ID so that recursing calls bail early. + self.old_to_new.insert(id.clone(), new_id); + + let visitor = PortableRegistryVisitor { builder: &mut *self, current_type: &id }; + + match visitor.builder.legacy_types.resolve_type(id.clone(), visitor) { + Ok(Ok(ty)) => { + self.scale_info_types.types[new_id as usize].ty = ty; + Ok(new_id) + }, + Ok(Err(e)) => { + self.old_to_new.remove(&id); + Err(e) + }, + Err(e) => { + self.old_to_new.remove(&id); + Err(e.into()) + }, + } + } + + /// Return the current [`scale_info::PortableRegistry`]. + pub fn types(&self) -> &PortableRegistry { + &self.scale_info_types + } + + /// Finish adding types and return the modern type registry. + pub fn finish(self) -> PortableRegistry { + self.scale_info_types + } +} + +struct PortableRegistryVisitor<'a, 'info> { + builder: &'a mut PortableRegistryBuilder<'info>, + current_type: &'a LookupName, +} + +impl<'a, 'info> ResolvedTypeVisitor<'info> for PortableRegistryVisitor<'a, 'info> { + type TypeId = LookupName; + type Value = Result, PortableRegistryAddTypeError>; + + fn visit_unhandled(self, kind: UnhandledKind) -> Self::Value { + panic!("A handler exists for every type, but visit_unhandled({kind:?}) was called"); + } + + fn visit_not_found(self) -> Self::Value { + if self.builder.ignore_not_found { + // Return the "unknown" type if we're ignoring not found types: + Ok(unknown_type()) + } else { + // Otherwise just return an error at this point: + Err(PortableRegistryAddTypeError::TypeNotFound(self.current_type.clone())) + } + } + + fn visit_primitive(self, primitive: Primitive) -> Self::Value { + let p = match primitive { + Primitive::Bool => scale_info::TypeDefPrimitive::Bool, + Primitive::Char => scale_info::TypeDefPrimitive::Char, + Primitive::Str => scale_info::TypeDefPrimitive::Str, + Primitive::U8 => scale_info::TypeDefPrimitive::U8, + Primitive::U16 => scale_info::TypeDefPrimitive::U16, + Primitive::U32 => scale_info::TypeDefPrimitive::U32, + Primitive::U64 => scale_info::TypeDefPrimitive::U64, + Primitive::U128 => scale_info::TypeDefPrimitive::U128, + Primitive::U256 => scale_info::TypeDefPrimitive::U256, + Primitive::I8 => scale_info::TypeDefPrimitive::I8, + Primitive::I16 => scale_info::TypeDefPrimitive::I16, + Primitive::I32 => scale_info::TypeDefPrimitive::I32, + Primitive::I64 => scale_info::TypeDefPrimitive::I64, + Primitive::I128 => scale_info::TypeDefPrimitive::I128, + Primitive::I256 => scale_info::TypeDefPrimitive::I256, + }; + + Ok(scale_info::Type::new( + Default::default(), + core::iter::empty(), + scale_info::TypeDef::Primitive(p), + Default::default(), + )) + } + + fn visit_sequence>( + self, + path: Path, + inner_type_id: Self::TypeId, + ) -> Self::Value { + let inner_id = self.builder.add_type(inner_type_id)?; + let path = scale_info::Path { segments: prepare_path(path, self.builder) }; + + Ok(scale_info::Type::new( + path, + core::iter::empty(), + scale_info::TypeDef::Sequence(scale_info::TypeDefSequence { + type_param: inner_id.into(), + }), + Default::default(), + )) + } + + fn visit_composite(self, path: Path, fields: Fields) -> Self::Value + where + Path: PathIter<'info>, + Fields: FieldIter<'info, Self::TypeId>, + { + let path = scale_info::Path { segments: prepare_path(path, self.builder) }; + + let mut scale_info_fields = Vec::>::new(); + for field in fields { + let type_name = field.id.to_string(); + let id = self.builder.add_type(field.id)?; + scale_info_fields.push(scale_info::Field { + name: field.name.map(Into::into), + ty: id.into(), + type_name: Some(type_name), + docs: Default::default(), + }); + } + + Ok(scale_info::Type::new( + path, + core::iter::empty(), + scale_info::TypeDef::Composite(scale_info::TypeDefComposite { + fields: scale_info_fields, + }), + Default::default(), + )) + } + + fn visit_array(self, inner_type_id: LookupName, len: usize) -> Self::Value { + let inner_id = self.builder.add_type(inner_type_id)?; + + Ok(scale_info::Type::new( + Default::default(), + core::iter::empty(), + scale_info::TypeDef::Array(scale_info::TypeDefArray { + len: len as u32, + type_param: inner_id.into(), + }), + Default::default(), + )) + } + + fn visit_tuple(self, type_ids: TypeIds) -> Self::Value + where + TypeIds: ExactSizeIterator, + { + let mut scale_info_fields = Vec::new(); + for old_id in type_ids { + let new_id = self.builder.add_type(old_id)?; + scale_info_fields.push(new_id.into()); + } + + Ok(scale_info::Type::new( + Default::default(), + core::iter::empty(), + scale_info::TypeDef::Tuple(scale_info::TypeDefTuple { fields: scale_info_fields }), + Default::default(), + )) + } + + fn visit_variant(self, path: Path, variants: Var) -> Self::Value + where + Path: PathIter<'info>, + Fields: FieldIter<'info, Self::TypeId>, + Var: VariantIter<'info, Fields>, + { + let path = scale_info::Path { segments: prepare_path(path, self.builder) }; + + let mut scale_info_variants = Vec::new(); + for variant in variants { + let mut scale_info_variant_fields = Vec::>::new(); + for field in variant.fields { + let type_name = field.id.to_string(); + let id = self.builder.add_type(field.id)?; + scale_info_variant_fields.push(scale_info::Field { + name: field.name.map(Into::into), + ty: id.into(), + type_name: Some(type_name), + docs: Default::default(), + }); + } + + scale_info_variants.push(scale_info::Variant { + name: variant.name.to_owned(), + index: variant.index, + fields: scale_info_variant_fields, + docs: Default::default(), + }) + } + + Ok(scale_info::Type::new( + path, + core::iter::empty(), + scale_info::TypeDef::Variant(scale_info::TypeDefVariant { + variants: scale_info_variants, + }), + Default::default(), + )) + } + + fn visit_compact(self, inner_type_id: Self::TypeId) -> Self::Value { + let inner_id = self.builder.add_type(inner_type_id)?; + + // Configure the path and type params to maximise compat. + let path = ["parity_scale_codec", "Compact"].into_iter().map(ToOwned::to_owned).collect(); + let type_params = + [scale_info::TypeParameter { name: "T".to_owned(), ty: Some(inner_id.into()) }]; + + Ok(scale_info::Type::new( + scale_info::Path { segments: path }, + type_params, + scale_info::TypeDef::Compact(scale_info::TypeDefCompact { + type_param: inner_id.into(), + }), + Default::default(), + )) + } + + fn visit_bit_sequence( + self, + store_format: BitsStoreFormat, + order_format: BitsOrderFormat, + ) -> Self::Value { + // These order types are added by default into a `TypeRegistry`, so we + // expect them to exist. Parsing should always succeed. + let order_ty_str = match order_format { + BitsOrderFormat::Lsb0 => "bitvec::order::Lsb0", + BitsOrderFormat::Msb0 => "bitvec::order::Msb0", + }; + let order_ty = LookupName::parse(order_ty_str).unwrap(); + let new_order_ty = self.builder.add_type(order_ty)?; + + // The store types also exist by default. Parsing should always succeed. + let store_ty_str = match store_format { + BitsStoreFormat::U8 => "u8", + BitsStoreFormat::U16 => "u16", + BitsStoreFormat::U32 => "u32", + BitsStoreFormat::U64 => "u64", + }; + let store_ty = LookupName::parse(store_ty_str).unwrap(); + let new_store_ty = self.builder.add_type(store_ty)?; + + // Configure the path and type params to look like BitVec's to try + // and maximise compatibility. + let path = ["bitvec", "vec", "BitVec"].into_iter().map(ToOwned::to_owned).collect(); + let type_params = [ + scale_info::TypeParameter { name: "Store".to_owned(), ty: Some(new_store_ty.into()) }, + scale_info::TypeParameter { name: "Order".to_owned(), ty: Some(new_order_ty.into()) }, + ]; + + Ok(scale_info::Type::new( + scale_info::Path { segments: path }, + type_params, + scale_info::TypeDef::BitSequence(scale_info::TypeDefBitSequence { + bit_order_type: new_order_ty.into(), + bit_store_type: new_store_ty.into(), + }), + Default::default(), + )) + } +} + +fn prepare_path<'info, Path: PathIter<'info>>( + path: Path, + builder: &mut PortableRegistryBuilder<'_>, +) -> Vec { + // If no sanitizint, just return the path as-is. + if !builder.sanitize_paths { + return path.map(|p| p.to_owned()).collect(); + } + + /// Names of prelude types. For codegen to work, any type that _isn't_ one of these must + /// have a path that is sensible and can be converted to module names. + static PRELUDE_TYPE_NAMES: [&str; 24] = [ + "Vec", + "Option", + "Result", + "Cow", + "BTreeMap", + "BTreeSet", + "BinaryHeap", + "VecDeque", + "LinkedList", + "Range", + "RangeInclusive", + "NonZeroI8", + "NonZeroU8", + "NonZeroI16", + "NonZeroU16", + "NonZeroI32", + "NonZeroU32", + "NonZeroI64", + "NonZeroU64", + "NonZeroI128", + "NonZeroU128", + "NonZeroIsize", + "NonZeroUsize", + "Duration", + ]; + + let path: Vec<&str> = path.collect(); + + // No path should be empty; at least the type name should be present. + if path.is_empty() { + panic!( + "Empty path is not expected when converting legacy type; type name expected at least" + ); + } + + // The special::Unknown type can be returned as is; dupe paths allowed. + if path.len() == 2 && path[0] == "special" && path[1] == "Unknown" { + return vec!["special".to_owned(), "Unknown".to_owned()]; + } + + // If non-prelude type has no path, give it one. + if path.len() == 1 && !PRELUDE_TYPE_NAMES.contains(&path[0]) { + return vec![ + "other".to_owned(), + prepare_ident(path[0], &mut builder.seen_names_in_default_path), + ]; + } + + // Non-compliant paths are converted to our default path + let non_compliant_path = path[0..path.len() - 1].iter().any(|&p| { + p.is_empty() || + p.starts_with(|c: char| !c.is_ascii_alphabetic()) || + p.contains(|c: char| !c.is_ascii_alphanumeric() || c.is_ascii_uppercase()) + }); + if non_compliant_path { + let last = *path.last().unwrap(); + return vec![ + "other".to_owned(), + prepare_ident(last, &mut builder.seen_names_in_default_path), + ]; + } + + // If path happens by chance to be ["other", Foo] then ensure Foo isn't duped + if path.len() == 2 && path[0] == "other" { + return vec![ + "other".to_owned(), + prepare_ident(path[1], &mut builder.seen_names_in_default_path), + ]; + } + + path.iter().map(|&p| p.to_owned()).collect() +} + +fn prepare_ident(base_ident: &str, seen: &mut BTreeSet) -> String { + let mut n = 1; + let mut ident = base_ident.to_owned(); + while !seen.insert(ident.clone()) { + ident = format!("{base_ident}{n}"); + n += 1; + } + ident +} + +fn unknown_type() -> scale_info::Type { + scale_info::Type::new( + scale_info::Path { segments: Vec::from_iter(["special".to_owned(), "Unknown".to_owned()]) }, + core::iter::empty(), + scale_info::TypeDef::Variant(scale_info::TypeDefVariant { variants: Vec::new() }), + Default::default(), + ) +} diff --git a/vendor/pezkuwi-subxt/metadata/src/from/legacy/tests.rs b/vendor/pezkuwi-subxt/metadata/src/from/legacy/tests.rs new file mode 100644 index 00000000..f67343fc --- /dev/null +++ b/vendor/pezkuwi-subxt/metadata/src/from/legacy/tests.rs @@ -0,0 +1,458 @@ +use super::*; +use alloc::collections::BTreeSet; +use codec::Decode; +use core::str::FromStr; +use frame_decode::{constants::ConstantTypeInfo, runtime_apis::RuntimeApiEntryInfo}; +use frame_metadata::RuntimeMetadata; +use scale_info_legacy::LookupName; +use scale_type_resolver::TypeResolver; + +/// Load some legacy kusama metadata from our artifacts. +fn legacy_kusama_metadata(version: u8) -> (u64, RuntimeMetadata) { + const VERSIONS: [(u8, u64, &str); 5] = [ + (9, 1021, "metadata_v9_1021.scale"), + (10, 1038, "metadata_v10_1038.scale"), + (11, 1045, "metadata_v11_1045.scale"), + (12, 2025, "metadata_v12_2025.scale"), + (13, 9030, "metadata_v13_9030.scale"), + ]; + + let (spec_version, filename) = VERSIONS + .iter() + .find(|(v, _spec_version, _filename)| *v == version) + .map(|(_, spec_version, name)| (*spec_version, *name)) + .unwrap_or_else(|| panic!("v{version} metadata artifact does not exist")); + + let mut path = std::path::PathBuf::from_str("../artifacts/kusama/").unwrap(); + path.push(filename); + + let bytes = std::fs::read(path).expect("Could not read file"); + let metadata = RuntimeMetadata::decode(&mut &*bytes).expect("Could not SCALE decode metadata"); + + (spec_version, metadata) +} + +/// Load our kusama types. +/// TODO: This is WRONG at the moment; change to point to kusama types when they exist: +fn kusama_types() -> scale_info_legacy::ChainTypeRegistry { + frame_decode::legacy_types::pezkuwi::relay_chain() +} + +/// Sanitizing paths changes things between old and new, so disable this in tests by default +/// so that we can compare paths and check that by default things translate identically. +/// Tests assume that ignore_not_found is enabled, which converts not found types to +/// special::Unknown instead of returning an error. +fn test_opts() -> super::Opts { + super::Opts { sanitize_paths: false, ignore_not_found: true } +} + +/// Return a pair of original metadata + converted pezkuwi_subxt_metadata::Metadata +fn metadata_pair( + version: u8, + opts: super::Opts, +) -> (TypeRegistrySet<'static>, RuntimeMetadata, crate::Metadata) { + let (spec_version, metadata) = legacy_kusama_metadata(version); + let types = kusama_types(); + + // Extend the types with builtins. + let types_for_spec = { + let mut types_for_spec = types.for_spec_version(spec_version).to_owned(); + let extended_types = + frame_decode::helpers::type_registry_from_metadata_any(&metadata).unwrap(); + types_for_spec.prepend(extended_types); + types_for_spec + }; + + let pezkuwi_subxt_metadata = match &metadata { + RuntimeMetadata::V9(m) => super::from_v9(m, &types_for_spec, opts), + RuntimeMetadata::V10(m) => super::from_v10(m, &types_for_spec, opts), + RuntimeMetadata::V11(m) => super::from_v11(m, &types_for_spec, opts), + RuntimeMetadata::V12(m) => super::from_v12(m, &types_for_spec, opts), + RuntimeMetadata::V13(m) => super::from_v13(m, &types_for_spec, opts), + _ => panic!("Metadata version {} not expected", metadata.version()), + } + .expect("Could not convert to pezkuwi_subxt_metadata::Metadata"); + + (types_for_spec, metadata, pezkuwi_subxt_metadata) +} + +/// A representation of the shape of some type that we can compare across metadatas. +#[derive(PartialEq, Debug, Clone)] +enum Shape { + Array(Box, usize), + BitSequence(scale_type_resolver::BitsStoreFormat, scale_type_resolver::BitsOrderFormat), + Compact(Box), + Composite(Vec, Vec<(Option, Shape)>), + Primitive(scale_type_resolver::Primitive), + Sequence(Vec, Box), + Tuple(Vec), + Variant(Vec, Vec), + // This is very important for performance; if we've already seen a variant at some path, + // we'll return just the variant path next time in this, to avoid duplicating lots of + // variants. This also eliminates recursion, since variants allow for it. + SeenVariant(Vec), +} + +#[derive(PartialEq, Debug, Clone)] +struct Variant { + index: u8, + name: String, + fields: Vec<(Option, Shape)>, +} + +impl Shape { + /// convert some modern type definition into a [`Shape`]. + fn from_modern_type(id: u32, types: &scale_info::PortableRegistry) -> Shape { + let mut seen_variants = BTreeSet::new(); + Shape::from_modern_type_inner(id, &mut seen_variants, types) + } + + fn from_modern_type_inner( + id: u32, + seen_variants: &mut BTreeSet>, + types: &scale_info::PortableRegistry, + ) -> Shape { + let visitor = + scale_type_resolver::visitor::new((seen_variants, types), |_, _| panic!("Unhandled")) + .visit_array(|(seen_variants, types), type_id, len| { + let inner = Shape::from_modern_type_inner(type_id, seen_variants, types); + Shape::Array(Box::new(inner), len) + }) + .visit_bit_sequence(|_, store, order| Shape::BitSequence(store, order)) + .visit_compact(|(seen_variants, types), type_id| { + let inner = Shape::from_modern_type_inner(type_id, seen_variants, types); + Shape::Compact(Box::new(inner)) + }) + .visit_composite(|(seen_variants, types), path, fields| { + let path = path.map(|p| p.to_owned()).collect(); + let inners = fields + .map(|field| { + let name = field.name.map(|n| n.to_owned()); + let inner = + Shape::from_modern_type_inner(field.id, seen_variants, types); + (name, inner) + }) + .collect(); + Shape::Composite(path, inners) + }) + .visit_primitive(|_types, prim| Shape::Primitive(prim)) + .visit_sequence(|(seen_variants, types), path, type_id| { + let path = path.map(|p| p.to_owned()).collect(); + let inner = Shape::from_modern_type_inner(type_id, seen_variants, types); + Shape::Sequence(path, Box::new(inner)) + }) + .visit_tuple(|(seen_variants, types), fields| { + let inners = fields + .map(|field| Shape::from_modern_type_inner(field, seen_variants, types)) + .collect(); + Shape::Tuple(inners) + }) + .visit_variant(|(seen_variants, types), path, variants| { + let path: Vec = path.map(|p| p.to_owned()).collect(); + // very important to avoid recursion and performance costs: + if !seen_variants.insert(path.clone()) { + return Shape::SeenVariant(path); + } + let variants = variants + .map(|v| Variant { + index: v.index, + name: v.name.to_owned(), + fields: v + .fields + .map(|field| { + let name = field.name.map(|n| n.to_owned()); + let inner = Shape::from_modern_type_inner( + field.id, + seen_variants, + types, + ); + (name, inner) + }) + .collect(), + }) + .collect(); + Shape::Variant(path, variants) + }) + .visit_not_found(|_types| { + panic!("PortableRegistry should not have a type which can't be found") + }); + + types.resolve_type(id, visitor).unwrap() + } + + /// convert some historic type definition into a [`Shape`]. + fn from_legacy_type(name: &LookupName, types: &TypeRegistrySet<'_>) -> Shape { + let mut seen_variants = BTreeSet::new(); + Shape::from_legacy_type_inner(name.clone(), &mut seen_variants, types) + } + + fn from_legacy_type_inner( + id: LookupName, + seen_variants: &mut BTreeSet>, + types: &TypeRegistrySet<'_>, + ) -> Shape { + let visitor = + scale_type_resolver::visitor::new((seen_variants, types), |_, _| panic!("Unhandled")) + .visit_array(|(seen_variants, types), type_id, len| { + let inner = Shape::from_legacy_type_inner(type_id, seen_variants, types); + Shape::Array(Box::new(inner), len) + }) + .visit_bit_sequence(|_types, store, order| Shape::BitSequence(store, order)) + .visit_compact(|(seen_variants, types), type_id| { + let inner = Shape::from_legacy_type_inner(type_id, seen_variants, types); + Shape::Compact(Box::new(inner)) + }) + .visit_composite(|(seen_variants, types), path, fields| { + let path = path.map(|p| p.to_owned()).collect(); + let inners = fields + .map(|field| { + let name = field.name.map(|n| n.to_owned()); + let inner = + Shape::from_legacy_type_inner(field.id, seen_variants, types); + (name, inner) + }) + .collect(); + Shape::Composite(path, inners) + }) + .visit_primitive(|_types, prim| Shape::Primitive(prim)) + .visit_sequence(|(seen_variants, types), path, type_id| { + let path = path.map(|p| p.to_owned()).collect(); + let inner = Shape::from_legacy_type_inner(type_id, seen_variants, types); + Shape::Sequence(path, Box::new(inner)) + }) + .visit_tuple(|(seen_variants, types), fields| { + let inners = fields + .map(|field| Shape::from_legacy_type_inner(field, seen_variants, types)) + .collect(); + Shape::Tuple(inners) + }) + .visit_variant(|(seen_variants, types), path, variants| { + let path: Vec = path.map(|p| p.to_owned()).collect(); + // very important to avoid recursion and performance costs: + if !seen_variants.insert(path.clone()) { + return Shape::SeenVariant(path); + } + let variants = variants + .map(|v| Variant { + index: v.index, + name: v.name.to_owned(), + fields: v + .fields + .map(|field| { + let name = field.name.map(|n| n.to_owned()); + let inner = Shape::from_legacy_type_inner( + field.id, + seen_variants, + types, + ); + (name, inner) + }) + .collect(), + }) + .collect(); + Shape::Variant(path, variants) + }) + .visit_not_found(|(seen_variants, _)| { + // When we convert legacy to modern types, any types we don't find + // are replaced with empty variants (since we can't have dangling types + // in our new PortableRegistry). Do the same here so they compare equal. + Shape::from_legacy_type_inner( + LookupName::parse("special::Unknown").unwrap(), + seen_variants, + types, + ) + }); + + types.resolve_type(id, visitor).unwrap() + } +} + +// Go over all of the constants listed via frame-decode and check that our old +// and new metadatas both have identical output. +macro_rules! constants_eq { + ($name:ident, $version:literal, $version_path:ident) => { + #[test] + fn $name() { + let (old_types, old_md, new_md) = metadata_pair($version, test_opts()); + let RuntimeMetadata::$version_path(old_md) = old_md else { panic!("Wrong version") }; + + let old: Vec<_> = old_md + .constant_tuples() + .map(|(p, n)| old_md.constant_info(&p, &n).unwrap()) + .map(|c| (c.bytes.to_owned(), Shape::from_legacy_type(&c.type_id, &old_types))) + .collect(); + let new: Vec<_> = new_md + .constant_tuples() + .map(|(p, n)| new_md.constant_info(&p, &n).unwrap()) + .map(|c| (c.bytes.to_owned(), Shape::from_modern_type(c.type_id, new_md.types()))) + .collect(); + + assert_eq!(old, new); + } + }; +} + +constants_eq!(v9_constants_eq, 9, V9); +constants_eq!(v10_constants_eq, 10, V10); +constants_eq!(v11_constants_eq, 11, V11); +constants_eq!(v12_constants_eq, 12, V12); +constants_eq!(v13_constants_eq, 13, V13); + +/// Make sure all Runtime APIs are the same once translated. +#[test] +fn runtime_apis() { + for version in 9..=13 { + let (old_types, _old_md, new_md) = metadata_pair(version, test_opts()); + + let old: Vec<_> = old_types + .runtime_api_tuples() + .map(|(p, n)| { + old_types + .runtime_api_info(&p, &n) + .unwrap() + .map_ids(|id| Ok::<_, ()>(Shape::from_legacy_type(&id, &old_types))) + .unwrap() + }) + .collect(); + let new: Vec<_> = new_md + .runtime_api_tuples() + .map(|(p, n)| { + new_md + .runtime_api_info(&p, &n) + .unwrap() + .map_ids(|id| Ok::<_, ()>(Shape::from_modern_type(id, new_md.types()))) + .unwrap() + }) + .collect(); + + assert_eq!(old, new); + } +} + +macro_rules! storage_eq { + ($name:ident, $version:literal, $version_path:ident) => { + #[test] + fn $name() { + let (old_types, old_md, new_md) = metadata_pair($version, test_opts()); + let RuntimeMetadata::$version_path(old_md) = old_md else { panic!("Wrong version") }; + + let old: Vec<_> = old_md + .storage_tuples() + .map(|(p, n)| { + let info = old_md + .storage_info(&p, &n) + .unwrap() + .map_ids(|id| Ok::<_, ()>(Shape::from_legacy_type(&id, &old_types))) + .unwrap(); + (p.into_owned(), n.into_owned(), info) + }) + .collect(); + + let new: Vec<_> = new_md + .storage_tuples() + .map(|(p, n)| { + let info = new_md + .storage_info(&p, &n) + .unwrap() + .map_ids(|id| Ok::<_, ()>(Shape::from_modern_type(id, new_md.types()))) + .unwrap(); + (p.into_owned(), n.into_owned(), info) + }) + .collect(); + + if old.len() != new.len() { + panic!("Storage entries for version 9 metadata differ in length"); + } + + for (old, new) in old.into_iter().zip(new.into_iter()) { + assert_eq!((&old.0, &old.1), (&new.0, &new.1), "Storage entry mismatch"); + assert_eq!(old.2, new.2, "Storage entry {}.{} does not match!", old.0, old.1); + } + } + }; +} + +storage_eq!(v9_storage_eq, 9, V9); +storage_eq!(v10_storage_eq, 10, V10); +storage_eq!(v11_storage_eq, 11, V11); +storage_eq!(v12_storage_eq, 12, V12); +storage_eq!(v13_storage_eq, 13, V13); + +#[test] +fn builtin_call() { + for version in 9..=13 { + let (old_types, _old_md, new_md) = metadata_pair(version, test_opts()); + + let old = Shape::from_legacy_type(&LookupName::parse("builtin::Call").unwrap(), &old_types); + let new = Shape::from_modern_type(new_md.outer_enums.call_enum_ty, new_md.types()); + assert_eq!(old, new, "Call types do not match in metadata V{version}!"); + } +} + +#[test] +fn builtin_error() { + for version in 9..=13 { + let (old_types, _old_md, new_md) = metadata_pair(version, test_opts()); + + let old = + Shape::from_legacy_type(&LookupName::parse("builtin::Error").unwrap(), &old_types); + let new = Shape::from_modern_type(new_md.outer_enums.error_enum_ty, new_md.types()); + assert_eq!(old, new, "Error types do not match in metadata V{version}!"); + } +} + +#[test] +fn builtin_event() { + for version in 9..=13 { + let (old_types, _old_md, new_md) = metadata_pair(version, test_opts()); + + let old = + Shape::from_legacy_type(&LookupName::parse("builtin::Event").unwrap(), &old_types); + let new = Shape::from_modern_type(new_md.outer_enums.event_enum_ty, new_md.types()); + assert_eq!(old, new, "Event types do not match in metadata V{version}!"); + } +} + +#[test] +fn codegen_works() { + for version in 9..=13 { + // We need to do this against `pezkuwi_subxt_codegen::Metadata` and so cannot re-use our + // test functions for it. This is because the compiler sees some difference between + // `subxct_codegen::Metadata` and `crate::Metadata` even though they should be identical. + let new_md = { + let (spec_version, metadata) = legacy_kusama_metadata(version); + let types = kusama_types(); + + let types_for_spec = { + let mut types_for_spec = types.for_spec_version(spec_version).to_owned(); + let extended_types = + frame_decode::helpers::type_registry_from_metadata_any(&metadata).unwrap(); + types_for_spec.prepend(extended_types); + types_for_spec + }; + + match &metadata { + RuntimeMetadata::V9(m) => + pezkuwi_subxt_codegen::Metadata::from_v9(m, &types_for_spec), + RuntimeMetadata::V10(m) => + pezkuwi_subxt_codegen::Metadata::from_v10(m, &types_for_spec), + RuntimeMetadata::V11(m) => + pezkuwi_subxt_codegen::Metadata::from_v11(m, &types_for_spec), + RuntimeMetadata::V12(m) => + pezkuwi_subxt_codegen::Metadata::from_v12(m, &types_for_spec), + RuntimeMetadata::V13(m) => + pezkuwi_subxt_codegen::Metadata::from_v13(m, &types_for_spec), + _ => panic!("Metadata version {} not expected", metadata.version()), + } + .expect("Could not convert to pezkuwi_subxt_metadata::Metadata") + }; + + // We only test that generation succeeds without any errors, not necessarily that it's 100% + // useful: + let codegen = pezkuwi_subxt_codegen::CodegenBuilder::new(); + let _ = codegen + .generate(new_md) + .map_err(|e| e.into_compile_error()) + .unwrap_or_else(|e| panic!("Codegen failed for metadata V{version}: {e}")); + } +} diff --git a/vendor/pezkuwi-subxt/metadata/src/from/mod.rs b/vendor/pezkuwi-subxt/metadata/src/from/mod.rs new file mode 100644 index 00000000..93acf332 --- /dev/null +++ b/vendor/pezkuwi-subxt/metadata/src/from/mod.rs @@ -0,0 +1,89 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use alloc::string::String; +use thiserror::Error as DeriveError; +mod v14; +mod v15; +mod v16; + +/// Legacy translation hidden behind the corresponding feature flag. +#[cfg(feature = "legacy")] +pub mod legacy; + +/// The metadata versions that we support converting into [`crate::Metadata`]. +/// These are ordest from highest to lowest, so that the metadata we'd want to +/// pick first is first in the array. +pub const SUPPORTED_METADATA_VERSIONS: [u32; 3] = [16, 15, 14]; + +/// An error emitted if something goes wrong converting [`frame_metadata`] +/// types into [`crate::Metadata`]. +#[derive(Debug, PartialEq, Eq, DeriveError)] +#[non_exhaustive] +pub enum TryFromError { + /// Type missing from type registry + #[error("Type id {0} is expected but not found in the type registry")] + TypeNotFound(u32), + /// Type was not a variant/enum type + #[error("Type {0} was not a variant/enum type, but is expected to be one")] + VariantExpected(u32), + /// An unsupported metadata version was provided. + #[error("Cannot convert v{0} metadata into Metadata type")] + UnsupportedMetadataVersion(u32), + /// Type name missing from type registry + #[error("Type name {0} is expected but not found in the type registry")] + TypeNameNotFound(String), + /// Invalid type path. + #[error("Type has an invalid path {0}")] + InvalidTypePath(String), + /// Cannot decode storage entry information. + #[error("Error decoding storage entry information: {0}")] + StorageInfoError(#[from] frame_decode::storage::StorageInfoError<'static>), + /// Cannot decode Runtime API information. + #[error("Error decoding Runtime API information: {0}")] + RuntimeInfoError(#[from] frame_decode::runtime_apis::RuntimeApiInfoError<'static>), + /// Cannot decode View Function information. + #[error("Error decoding View Function information: {0}")] + ViewFunctionInfoError(#[from] frame_decode::view_functions::ViewFunctionInfoError<'static>), +} + +impl TryFrom for crate::Metadata { + type Error = TryFromError; + + fn try_from(value: frame_metadata::RuntimeMetadataPrefixed) -> Result { + match value.1 { + frame_metadata::RuntimeMetadata::V0(_) => + Err(TryFromError::UnsupportedMetadataVersion(0)), + frame_metadata::RuntimeMetadata::V1(_) => + Err(TryFromError::UnsupportedMetadataVersion(1)), + frame_metadata::RuntimeMetadata::V2(_) => + Err(TryFromError::UnsupportedMetadataVersion(2)), + frame_metadata::RuntimeMetadata::V3(_) => + Err(TryFromError::UnsupportedMetadataVersion(3)), + frame_metadata::RuntimeMetadata::V4(_) => + Err(TryFromError::UnsupportedMetadataVersion(4)), + frame_metadata::RuntimeMetadata::V5(_) => + Err(TryFromError::UnsupportedMetadataVersion(5)), + frame_metadata::RuntimeMetadata::V6(_) => + Err(TryFromError::UnsupportedMetadataVersion(6)), + frame_metadata::RuntimeMetadata::V7(_) => + Err(TryFromError::UnsupportedMetadataVersion(7)), + frame_metadata::RuntimeMetadata::V8(_) => + Err(TryFromError::UnsupportedMetadataVersion(8)), + frame_metadata::RuntimeMetadata::V9(_) => + Err(TryFromError::UnsupportedMetadataVersion(9)), + frame_metadata::RuntimeMetadata::V10(_) => + Err(TryFromError::UnsupportedMetadataVersion(10)), + frame_metadata::RuntimeMetadata::V11(_) => + Err(TryFromError::UnsupportedMetadataVersion(11)), + frame_metadata::RuntimeMetadata::V12(_) => + Err(TryFromError::UnsupportedMetadataVersion(12)), + frame_metadata::RuntimeMetadata::V13(_) => + Err(TryFromError::UnsupportedMetadataVersion(13)), + frame_metadata::RuntimeMetadata::V14(m) => m.try_into(), + frame_metadata::RuntimeMetadata::V15(m) => m.try_into(), + frame_metadata::RuntimeMetadata::V16(m) => m.try_into(), + } + } +} diff --git a/vendor/pezkuwi-subxt/metadata/src/from/v14.rs b/vendor/pezkuwi-subxt/metadata/src/from/v14.rs new file mode 100644 index 00000000..8673c4dd --- /dev/null +++ b/vendor/pezkuwi-subxt/metadata/src/from/v14.rs @@ -0,0 +1,305 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::TryFromError; + +use crate::{ + ConstantMetadata, CustomMetadataInner, ExtrinsicMetadata, Metadata, OuterEnumsMetadata, + PalletMetadataInner, StorageEntryMetadata, StorageMetadata, TransactionExtensionMetadataInner, + utils::{ordered_map::OrderedMap, variant_index::VariantIndex}, +}; +use alloc::{borrow::ToOwned, collections::BTreeMap, format, string::String, vec, vec::Vec}; +use frame_decode::storage::StorageTypeInfo; +use frame_metadata::v14; +use hashbrown::HashMap; +use scale_info::form::PortableForm; + +impl TryFrom for Metadata { + type Error = TryFromError; + fn try_from(mut m: v14::RuntimeMetadataV14) -> Result { + let outer_enums = generate_outer_enums(&mut m)?; + let missing_extrinsic_type_ids = MissingExtrinsicTypeIds::generate_from(&m)?; + + let mut pallets = OrderedMap::new(); + let mut pallets_by_index = HashMap::new(); + for (pos, p) in m.pallets.iter().enumerate() { + let name: String = p.name.clone(); + + let storage = match &p.storage { + None => None, + Some(s) => Some(StorageMetadata { + prefix: s.prefix.clone(), + entries: s + .entries + .iter() + .map(|s| { + let entry_name: String = s.name.clone(); + let storage_info = m + .storage_info(&name, &entry_name) + .map_err(|e| e.into_owned())? + .into_owned(); + let storage_entry = StorageEntryMetadata { + name: entry_name.clone(), + info: storage_info, + docs: s.docs.clone(), + }; + + Ok::<_, TryFromError>((entry_name, storage_entry)) + }) + .collect::>()?, + }), + }; + + let constants = p.constants.iter().map(|c| { + let name = c.name.clone(); + (name, from_constant_metadata(c.clone())) + }); + + let call_variant_index = + VariantIndex::build(p.calls.as_ref().map(|c| c.ty.id), &m.types); + let error_variant_index = + VariantIndex::build(p.error.as_ref().map(|e| e.ty.id), &m.types); + let event_variant_index = + VariantIndex::build(p.event.as_ref().map(|e| e.ty.id), &m.types); + + pallets_by_index.insert(p.index, pos); + pallets.push_insert( + name.clone(), + PalletMetadataInner { + name: name.clone(), + call_index: p.index, + event_index: p.index, + error_index: p.index, + storage, + call_ty: p.calls.as_ref().map(|c| c.ty.id), + call_variant_index, + event_ty: p.event.as_ref().map(|e| e.ty.id), + event_variant_index, + error_ty: p.error.as_ref().map(|e| e.ty.id), + error_variant_index, + constants: constants.collect(), + view_functions: Default::default(), + associated_types: Default::default(), + docs: vec![], + }, + ); + } + + let dispatch_error_ty = m + .types + .types + .iter() + .find(|ty| ty.ty.path.segments == ["sp_runtime", "DispatchError"]) + .map(|ty| ty.id); + + Ok(Metadata { + types: m.types, + pallets, + pallets_by_call_index: pallets_by_index.clone(), + pallets_by_error_index: pallets_by_index.clone(), + pallets_by_event_index: pallets_by_index, + extrinsic: from_extrinsic_metadata(m.extrinsic, missing_extrinsic_type_ids), + dispatch_error_ty, + outer_enums: OuterEnumsMetadata { + call_enum_ty: outer_enums.call_enum_ty.id, + event_enum_ty: outer_enums.event_enum_ty.id, + error_enum_ty: outer_enums.error_enum_ty.id, + }, + apis: Default::default(), + custom: CustomMetadataInner { map: Default::default() }, + }) + } +} + +fn from_signed_extension_metadata( + value: v14::SignedExtensionMetadata, +) -> TransactionExtensionMetadataInner { + TransactionExtensionMetadataInner { + identifier: value.identifier, + extra_ty: value.ty.id, + additional_ty: value.additional_signed.id, + } +} + +fn from_extrinsic_metadata( + value: v14::ExtrinsicMetadata, + missing_ids: MissingExtrinsicTypeIds, +) -> ExtrinsicMetadata { + let transaction_extensions: Vec<_> = value + .signed_extensions + .into_iter() + .map(from_signed_extension_metadata) + .collect(); + + let transaction_extension_indexes = (0..transaction_extensions.len() as u32).collect(); + + ExtrinsicMetadata { + supported_versions: vec![value.version], + transaction_extensions, + address_ty: missing_ids.address, + signature_ty: missing_ids.signature, + transaction_extensions_by_version: BTreeMap::from_iter([( + 0, + transaction_extension_indexes, + )]), + } +} + +fn from_constant_metadata(s: v14::PalletConstantMetadata) -> ConstantMetadata { + ConstantMetadata { name: s.name, ty: s.ty.id, value: s.value, docs: s.docs } +} + +fn generate_outer_enums( + metadata: &mut v14::RuntimeMetadataV14, +) -> Result, TryFromError> { + let outer_enums = OuterEnums::find_in(&metadata.types); + + let Some(call_enum_id) = outer_enums.call_ty else { + return Err(TryFromError::TypeNameNotFound("RuntimeCall".into())); + }; + let Some(event_type_id) = outer_enums.event_ty else { + return Err(TryFromError::TypeNameNotFound("RuntimeEvent".into())); + }; + let error_type_id = if let Some(id) = outer_enums.error_ty { + id + } else { + let call_enum = &metadata.types.types[call_enum_id as usize]; + let mut error_path = call_enum.ty.path.segments.clone(); + + let Some(last) = error_path.last_mut() else { + return Err(TryFromError::InvalidTypePath("RuntimeCall".into())); + }; + "RuntimeError".clone_into(last); + generate_outer_error_enum_type(metadata, error_path) + }; + + Ok(frame_metadata::v15::OuterEnums { + call_enum_ty: call_enum_id.into(), + event_enum_ty: event_type_id.into(), + error_enum_ty: error_type_id.into(), + }) +} + +/// Generates an outer `RuntimeError` enum type and adds it to the metadata. +/// +/// Returns the id of the generated type from the registry. +fn generate_outer_error_enum_type( + metadata: &mut v14::RuntimeMetadataV14, + path_segments: Vec, +) -> u32 { + let variants: Vec<_> = metadata + .pallets + .iter() + .filter_map(|pallet| { + let error = pallet.error.as_ref()?; + let path = format!("{}Error", pallet.name); + let ty = error.ty.id.into(); + + Some(scale_info::Variant { + name: pallet.name.clone(), + fields: vec![scale_info::Field { + name: None, + ty, + type_name: Some(path), + docs: vec![], + }], + index: pallet.index, + docs: vec![], + }) + }) + .collect(); + + let enum_type = scale_info::Type { + path: scale_info::Path { segments: path_segments }, + type_params: vec![], + type_def: scale_info::TypeDef::Variant(scale_info::TypeDefVariant { variants }), + docs: vec![], + }; + + let enum_type_id = metadata.types.types.len() as u32; + + metadata + .types + .types + .push(scale_info::PortableType { id: enum_type_id, ty: enum_type }); + + enum_type_id +} + +/// The type IDs extracted from the metadata that represent the +/// generic type parameters passed to the `UncheckedExtrinsic` from +/// the bizinikiwi-based chain. +#[derive(Clone, Copy)] +struct MissingExtrinsicTypeIds { + address: u32, + signature: u32, +} + +impl MissingExtrinsicTypeIds { + fn generate_from( + metadata: &v14::RuntimeMetadataV14, + ) -> Result { + const ADDRESS: &str = "Address"; + const SIGNATURE: &str = "Signature"; + + let extrinsic_id = metadata.extrinsic.ty.id; + let Some(extrinsic_ty) = metadata.types.resolve(extrinsic_id) else { + return Err(TryFromError::TypeNotFound(extrinsic_id)); + }; + + let find_param = |name: &'static str| -> Option { + extrinsic_ty + .type_params + .iter() + .find(|param| param.name.as_str() == name) + .and_then(|param| param.ty.as_ref()) + .map(|ty| ty.id) + }; + + let Some(address) = find_param(ADDRESS) else { + return Err(TryFromError::TypeNameNotFound(ADDRESS.into())); + }; + let Some(signature) = find_param(SIGNATURE) else { + return Err(TryFromError::TypeNameNotFound(SIGNATURE.into())); + }; + + Ok(MissingExtrinsicTypeIds { address, signature }) + } +} + +/// Outer enum IDs, which are required in Subxt but are not present in V14 metadata. +pub struct OuterEnums { + /// The RuntimeCall type ID. + pub call_ty: Option, + /// The RuntimeEvent type ID. + pub event_ty: Option, + /// The RuntimeError type ID. + pub error_ty: Option, +} + +impl OuterEnums { + pub fn find_in(types: &scale_info::PortableRegistry) -> OuterEnums { + let find_type = |name: &str| { + types.types.iter().find_map(|ty| { + let ident = ty.ty.path.ident()?; + + if ident != name { + return None; + } + + let scale_info::TypeDef::Variant(_) = &ty.ty.type_def else { + return None; + }; + + Some(ty.id) + }) + }; + + OuterEnums { + call_ty: find_type("RuntimeCall"), + event_ty: find_type("RuntimeEvent"), + error_ty: find_type("RuntimeError"), + } + } +} diff --git a/vendor/pezkuwi-subxt/metadata/src/from/v15.rs b/vendor/pezkuwi-subxt/metadata/src/from/v15.rs new file mode 100644 index 00000000..0093a5d5 --- /dev/null +++ b/vendor/pezkuwi-subxt/metadata/src/from/v15.rs @@ -0,0 +1,177 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::TryFromError; + +use crate::{ + ConstantMetadata, ExtrinsicMetadata, Metadata, OuterEnumsMetadata, PalletMetadataInner, + RuntimeApiMetadataInner, RuntimeApiMethodMetadataInner, StorageEntryMetadata, StorageMetadata, + TransactionExtensionMetadataInner, + utils::{ordered_map::OrderedMap, variant_index::VariantIndex}, +}; +use alloc::{collections::BTreeMap, vec, vec::Vec}; +use frame_decode::{runtime_apis::RuntimeApiTypeInfo, storage::StorageTypeInfo}; +use frame_metadata::v15; +use hashbrown::HashMap; +use scale_info::form::PortableForm; + +impl TryFrom for Metadata { + type Error = TryFromError; + fn try_from(m: v15::RuntimeMetadataV15) -> Result { + let mut pallets = OrderedMap::new(); + let mut pallets_by_index = HashMap::new(); + for (pos, p) in m.pallets.iter().enumerate() { + let name = p.name.clone(); + + let storage = match &p.storage { + None => None, + Some(s) => Some(StorageMetadata { + prefix: s.prefix.clone(), + entries: s + .entries + .iter() + .map(|s| { + let entry_name = s.name.clone(); + let storage_info = m + .storage_info(&name, &entry_name) + .map_err(|e| e.into_owned())? + .into_owned(); + let storage_entry = StorageEntryMetadata { + name: entry_name.clone(), + info: storage_info, + docs: s.docs.clone(), + }; + + Ok::<_, TryFromError>((entry_name, storage_entry)) + }) + .collect::>()?, + }), + }; + + let constants = p.constants.iter().map(|c| { + let name = c.name.clone(); + (name, from_constant_metadata(c.clone())) + }); + + let call_variant_index = + VariantIndex::build(p.calls.as_ref().map(|c| c.ty.id), &m.types); + let error_variant_index = + VariantIndex::build(p.error.as_ref().map(|e| e.ty.id), &m.types); + let event_variant_index = + VariantIndex::build(p.event.as_ref().map(|e| e.ty.id), &m.types); + + pallets_by_index.insert(p.index, pos); + pallets.push_insert( + name.clone(), + PalletMetadataInner { + name, + call_index: p.index, + event_index: p.index, + error_index: p.index, + storage, + call_ty: p.calls.as_ref().map(|c| c.ty.id), + call_variant_index, + event_ty: p.event.as_ref().map(|e| e.ty.id), + event_variant_index, + error_ty: p.error.as_ref().map(|e| e.ty.id), + error_variant_index, + constants: constants.collect(), + view_functions: Default::default(), + associated_types: Default::default(), + docs: p.docs.clone(), + }, + ); + } + + let apis = m + .apis + .iter() + .map(|api| { + let trait_name = api.name.clone(); + let methods = api + .methods + .iter() + .map(|method| { + let method_name = method.name.clone(); + let method_info = RuntimeApiMethodMetadataInner { + info: m + .runtime_api_info(&trait_name, &method.name) + .map_err(|e| e.into_owned())? + .into_owned(), + name: method.name.clone(), + docs: method.docs.clone(), + }; + Ok((method_name, method_info)) + }) + .collect::>()?; + + let runtime_api_metadata = RuntimeApiMetadataInner { + name: trait_name.clone(), + methods, + docs: api.docs.clone(), + }; + Ok((trait_name, runtime_api_metadata)) + }) + .collect::>()?; + + let dispatch_error_ty = m + .types + .types + .iter() + .find(|ty| ty.ty.path.segments == ["sp_runtime", "DispatchError"]) + .map(|ty| ty.id); + + Ok(Metadata { + types: m.types, + pallets, + pallets_by_call_index: pallets_by_index.clone(), + pallets_by_error_index: pallets_by_index.clone(), + pallets_by_event_index: pallets_by_index, + extrinsic: from_extrinsic_metadata(m.extrinsic), + dispatch_error_ty, + apis, + outer_enums: OuterEnumsMetadata { + call_enum_ty: m.outer_enums.call_enum_ty.id, + event_enum_ty: m.outer_enums.event_enum_ty.id, + error_enum_ty: m.outer_enums.error_enum_ty.id, + }, + custom: m.custom, + }) + } +} + +fn from_signed_extension_metadata( + value: v15::SignedExtensionMetadata, +) -> TransactionExtensionMetadataInner { + TransactionExtensionMetadataInner { + identifier: value.identifier, + extra_ty: value.ty.id, + additional_ty: value.additional_signed.id, + } +} + +fn from_extrinsic_metadata(value: v15::ExtrinsicMetadata) -> ExtrinsicMetadata { + let transaction_extensions: Vec<_> = value + .signed_extensions + .into_iter() + .map(from_signed_extension_metadata) + .collect(); + + let transaction_extension_indexes = (0..transaction_extensions.len() as u32).collect(); + + ExtrinsicMetadata { + supported_versions: vec![value.version], + transaction_extensions, + address_ty: value.address_ty.id, + signature_ty: value.signature_ty.id, + transaction_extensions_by_version: BTreeMap::from_iter([( + 0, + transaction_extension_indexes, + )]), + } +} + +fn from_constant_metadata(s: v15::PalletConstantMetadata) -> ConstantMetadata { + ConstantMetadata { name: s.name, ty: s.ty.id, value: s.value, docs: s.docs } +} diff --git a/vendor/pezkuwi-subxt/metadata/src/from/v16.rs b/vendor/pezkuwi-subxt/metadata/src/from/v16.rs new file mode 100644 index 00000000..ee0272dc --- /dev/null +++ b/vendor/pezkuwi-subxt/metadata/src/from/v16.rs @@ -0,0 +1,203 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::TryFromError; + +use crate::{ + ConstantMetadata, ExtrinsicMetadata, Metadata, OuterEnumsMetadata, PalletMetadataInner, + RuntimeApiMetadataInner, RuntimeApiMethodMetadataInner, StorageEntryMetadata, StorageMetadata, + TransactionExtensionMetadataInner, ViewFunctionMetadataInner, + utils::{ordered_map::OrderedMap, variant_index::VariantIndex}, +}; +use frame_decode::{ + runtime_apis::RuntimeApiTypeInfo, storage::StorageTypeInfo, + view_functions::ViewFunctionTypeInfo, +}; +use frame_metadata::{v15, v16}; +use hashbrown::HashMap; +use scale_info::form::PortableForm; + +impl TryFrom for Metadata { + type Error = TryFromError; + fn try_from(m: v16::RuntimeMetadataV16) -> Result { + let types = &m.types; + + let mut pallets = OrderedMap::new(); + let mut pallets_by_index = HashMap::new(); + for (pos, p) in m.pallets.iter().enumerate() { + let name = p.name.clone(); + + let storage = match &p.storage { + None => None, + Some(s) => Some(StorageMetadata { + prefix: s.prefix.clone(), + entries: s + .entries + .iter() + .map(|s| { + let entry_name = s.name.clone(); + let storage_info = m + .storage_info(&name, &entry_name) + .map_err(|e| e.into_owned())? + .into_owned(); + let storage_entry = StorageEntryMetadata { + name: entry_name.clone(), + info: storage_info, + docs: s.docs.clone(), + }; + + Ok::<_, TryFromError>((entry_name, storage_entry)) + }) + .collect::>()?, + }), + }; + + let view_functions = p + .view_functions + .iter() + .map(|vf| { + let view_function_metadata = ViewFunctionMetadataInner { + name: vf.name.clone(), + info: m + .view_function_info(&name, &vf.name) + .map_err(|e| e.into_owned())? + .into_owned(), + docs: vf.docs.clone(), + }; + Ok((vf.name.clone(), view_function_metadata)) + }) + .collect::>()?; + + let constants = p.constants.iter().map(|c| { + let name = c.name.clone(); + (name, from_constant_metadata(c.clone())) + }); + + let call_variant_index = VariantIndex::build(p.calls.as_ref().map(|c| c.ty.id), types); + let error_variant_index = VariantIndex::build(p.error.as_ref().map(|e| e.ty.id), types); + let event_variant_index = VariantIndex::build(p.event.as_ref().map(|e| e.ty.id), types); + + let associated_types = + p.associated_types.iter().map(|t| (t.name.clone(), t.ty.id)).collect(); + + pallets_by_index.insert(p.index, pos); + pallets.push_insert( + name.clone(), + PalletMetadataInner { + name, + call_index: p.index, + event_index: p.index, + error_index: p.index, + storage, + call_ty: p.calls.as_ref().map(|c| c.ty.id), + call_variant_index, + event_ty: p.event.as_ref().map(|e| e.ty.id), + event_variant_index, + error_ty: p.error.as_ref().map(|e| e.ty.id), + error_variant_index, + constants: constants.collect(), + view_functions, + associated_types, + docs: p.docs.clone(), + }, + ); + } + + let apis = m + .apis + .iter() + .map(|api| { + let trait_name = api.name.clone(); + let methods = api + .methods + .iter() + .map(|method| { + let method_name = method.name.clone(); + let method_info = RuntimeApiMethodMetadataInner { + info: m + .runtime_api_info(&trait_name, &method.name) + .map_err(|e| e.into_owned())? + .into_owned(), + name: method.name.clone(), + docs: method.docs.clone(), + }; + Ok((method_name, method_info)) + }) + .collect::>()?; + + let runtime_api_metadata = RuntimeApiMetadataInner { + name: trait_name.clone(), + methods, + docs: api.docs.clone(), + }; + Ok((trait_name, runtime_api_metadata)) + }) + .collect::>()?; + + let custom_map = m + .custom + .map + .into_iter() + .map(|(key, val)| { + let custom_val = v15::CustomValueMetadata { ty: val.ty, value: val.value }; + (key, custom_val) + }) + .collect(); + + let dispatch_error_ty = types + .types + .iter() + .find(|ty| ty.ty.path.segments == ["sp_runtime", "DispatchError"]) + .map(|ty| ty.id); + + Ok(Metadata { + types: m.types, + pallets, + pallets_by_call_index: pallets_by_index.clone(), + pallets_by_error_index: pallets_by_index.clone(), + pallets_by_event_index: pallets_by_index, + extrinsic: from_extrinsic_metadata(m.extrinsic), + dispatch_error_ty, + apis, + outer_enums: OuterEnumsMetadata { + call_enum_ty: m.outer_enums.call_enum_ty.id, + event_enum_ty: m.outer_enums.event_enum_ty.id, + error_enum_ty: m.outer_enums.error_enum_ty.id, + }, + custom: v15::CustomMetadata { map: custom_map }, + }) + } +} + +fn from_transaction_extension_metadata( + value: v16::TransactionExtensionMetadata, +) -> TransactionExtensionMetadataInner { + TransactionExtensionMetadataInner { + identifier: value.identifier, + extra_ty: value.ty.id, + additional_ty: value.implicit.id, + } +} + +fn from_extrinsic_metadata(value: v16::ExtrinsicMetadata) -> ExtrinsicMetadata { + ExtrinsicMetadata { + supported_versions: value.versions, + transaction_extensions_by_version: value + .transaction_extensions_by_version + .into_iter() + .map(|(version, idxs)| (version, idxs.into_iter().map(|idx| idx.0).collect())) + .collect(), + transaction_extensions: value + .transaction_extensions + .into_iter() + .map(from_transaction_extension_metadata) + .collect(), + address_ty: value.address_ty.id, + signature_ty: value.signature_ty.id, + } +} + +fn from_constant_metadata(s: v16::PalletConstantMetadata) -> ConstantMetadata { + ConstantMetadata { name: s.name, ty: s.ty.id, value: s.value, docs: s.docs } +} diff --git a/vendor/pezkuwi-subxt/metadata/src/lib.rs b/vendor/pezkuwi-subxt/metadata/src/lib.rs new file mode 100644 index 00000000..7250532c --- /dev/null +++ b/vendor/pezkuwi-subxt/metadata/src/lib.rs @@ -0,0 +1,1234 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! A representation of the metadata provided by a bizinikiwi based node. +//! This representation is optimized to be used by Subxt and related crates, +//! and is independent of the different versions of metadata that can be +//! provided from a node. +//! +//! Typically, this will be constructed by either: +//! +//! 1. Calling `Metadata::decode()` given some metadata bytes obtained from a node (this uses +//! [`codec::Decode`]). +//! 2. Obtaining [`frame_metadata::RuntimeMetadataPrefixed`], and then using `.try_into()` to +//! convert it into [`Metadata`]. + +#![cfg_attr(not(feature = "std"), no_std)] +#![deny(missing_docs)] + +extern crate alloc; + +mod from; +mod utils; + +use alloc::{ + borrow::Cow, + collections::BTreeMap, + string::{String, ToString}, + vec::Vec, +}; +use frame_decode::{ + constants::{ConstantEntry, ConstantInfo, ConstantInfoError}, + custom_values::{CustomValue, CustomValueInfo, CustomValueInfoError}, + extrinsics::{ + ExtrinsicCallInfo, ExtrinsicExtensionInfo, ExtrinsicInfoArg, ExtrinsicInfoError, + ExtrinsicSignatureInfo, + }, + runtime_apis::{RuntimeApiEntry, RuntimeApiInfo, RuntimeApiInfoError, RuntimeApiInput}, + storage::{StorageEntry, StorageInfo, StorageInfoError, StorageKeyInfo}, + view_functions::{ + ViewFunctionEntry, ViewFunctionInfo, ViewFunctionInfoError, ViewFunctionInput, + }, +}; + +use hashbrown::HashMap; +use scale_info::{PortableRegistry, Variant, form::PortableForm}; +use utils::{ + ordered_map::OrderedMap, + validation::{HASH_LEN, get_custom_value_hash}, + variant_index::VariantIndex, +}; + +pub use frame_decode::storage::StorageHasher; +pub use from::{SUPPORTED_METADATA_VERSIONS, TryFromError}; +pub use utils::validation::MetadataHasher; + +#[cfg(feature = "legacy")] +pub use from::legacy::Error as LegacyFromError; + +type CustomMetadataInner = frame_metadata::v15::CustomMetadata; + +/// Node metadata. This can be constructed by providing some compatible [`frame_metadata`] +/// which is then decoded into this. We aim to preserve all of the existing information in +/// the incoming metadata while optimizing the format a little for Subxt's use cases. +#[derive(Debug, Clone)] +pub struct Metadata { + /// Type registry containing all types used in the metadata. + types: PortableRegistry, + /// Metadata of all the pallets. + pallets: OrderedMap, + /// Find the pallet for a given call index. + pallets_by_call_index: HashMap, + /// Find the pallet for a given event index. + /// + /// for modern metadatas, this is the same as pallets_by_call_index, + /// but for old metadatas this can vary. + pallets_by_event_index: HashMap, + /// Find the pallet for a given error index. + /// + /// for modern metadatas, this is the same as pallets_by_call_index, + /// but for old metadatas this can vary. + pallets_by_error_index: HashMap, + /// Metadata of the extrinsic. + extrinsic: ExtrinsicMetadata, + /// The types of the outer enums. + outer_enums: OuterEnumsMetadata, + /// The type Id of the `DispatchError` type, which Subxt makes use of. + dispatch_error_ty: Option, + /// Details about each of the runtime API traits. + apis: OrderedMap, + /// Allows users to add custom types to the metadata. A map that associates a string key to a + /// `CustomValueMetadata`. + custom: CustomMetadataInner, +} + +// Since we've abstracted away from frame-metadatas, we impl this on our custom Metadata +// so that it can be used by `frame-decode` to obtain the relevant extrinsic info. +impl frame_decode::extrinsics::ExtrinsicTypeInfo for Metadata { + type TypeId = u32; + + fn extrinsic_call_info( + &self, + pallet_index: u8, + call_index: u8, + ) -> Result, ExtrinsicInfoError<'_>> { + let pallet = self + .pallet_by_call_index(pallet_index) + .ok_or(ExtrinsicInfoError::PalletNotFound { index: pallet_index })?; + + let call = pallet.call_variant_by_index(call_index).ok_or_else(|| { + ExtrinsicInfoError::CallNotFound { + index: call_index, + pallet_index, + pallet_name: Cow::Borrowed(pallet.name()), + } + })?; + + Ok(ExtrinsicCallInfo { + pallet_name: Cow::Borrowed(pallet.name()), + call_name: Cow::Borrowed(&call.name), + args: call + .fields + .iter() + .map(|f| ExtrinsicInfoArg { + name: Cow::Borrowed(f.name.as_deref().unwrap_or("")), + id: f.ty.id, + }) + .collect(), + }) + } + + fn extrinsic_signature_info( + &self, + ) -> Result, ExtrinsicInfoError<'_>> { + Ok(ExtrinsicSignatureInfo { + address_id: self.extrinsic().address_ty, + signature_id: self.extrinsic().signature_ty, + }) + } + + fn extrinsic_extension_info( + &self, + extension_version: Option, + ) -> Result, ExtrinsicInfoError<'_>> { + let extension_version = extension_version.unwrap_or_else(|| { + // We have some transaction, probably a V4 one with no extension version, + // but our metadata may support multiple versions. Use the metadata to decide + // what version to assume we'll decode it as. + self.extrinsic().transaction_extension_version_to_use_for_decoding() + }); + + let extension_ids = self + .extrinsic() + .transaction_extensions_by_version(extension_version) + .ok_or(ExtrinsicInfoError::ExtrinsicExtensionVersionNotFound { extension_version })? + .map(|f| ExtrinsicInfoArg { name: Cow::Borrowed(f.identifier()), id: f.extra_ty() }) + .collect(); + + Ok(ExtrinsicExtensionInfo { extension_ids }) + } +} +impl frame_decode::storage::StorageTypeInfo for Metadata { + type TypeId = u32; + + fn storage_info( + &self, + pallet_name: &str, + storage_entry: &str, + ) -> Result, StorageInfoError<'_>> { + let pallet = + self.pallet_by_name(pallet_name) + .ok_or_else(|| StorageInfoError::PalletNotFound { + pallet_name: pallet_name.to_string(), + })?; + let entry = pallet + .storage() + .and_then(|storage| storage.entry_by_name(storage_entry)) + .ok_or_else(|| StorageInfoError::StorageNotFound { + name: storage_entry.to_string(), + pallet_name: Cow::Borrowed(pallet.name()), + })?; + + let info = StorageInfo { + keys: Cow::Borrowed(&*entry.info.keys), + value_id: entry.info.value_id, + default_value: entry.info.default_value.as_ref().map(|def| Cow::Borrowed(&**def)), + }; + + Ok(info) + } +} +impl frame_decode::storage::StorageEntryInfo for Metadata { + fn storage_entries(&self) -> impl Iterator> { + self.pallets().flat_map(|pallet| { + let pallet_name = pallet.name(); + let pallet_iter = core::iter::once(StorageEntry::In(pallet_name.into())); + let entries_iter = pallet.storage().into_iter().flat_map(|storage| { + storage.entries().iter().map(|entry| StorageEntry::Name(entry.name().into())) + }); + + pallet_iter.chain(entries_iter) + }) + } +} +impl frame_decode::runtime_apis::RuntimeApiTypeInfo for Metadata { + type TypeId = u32; + + fn runtime_api_info( + &self, + trait_name: &str, + method_name: &str, + ) -> Result, RuntimeApiInfoError<'_>> { + let api_trait = + self.apis + .get_by_key(trait_name) + .ok_or_else(|| RuntimeApiInfoError::TraitNotFound { + trait_name: trait_name.to_string(), + })?; + let api_method = api_trait.methods.get_by_key(method_name).ok_or_else(|| { + RuntimeApiInfoError::MethodNotFound { + trait_name: Cow::Borrowed(&api_trait.name), + method_name: method_name.to_string(), + } + })?; + + let info = RuntimeApiInfo { + inputs: Cow::Borrowed(&api_method.info.inputs), + output_id: api_method.info.output_id, + }; + + Ok(info) + } +} +impl frame_decode::runtime_apis::RuntimeApiEntryInfo for Metadata { + fn runtime_api_entries(&self) -> impl Iterator> { + self.runtime_api_traits().flat_map(|api_trait| { + let trait_name = api_trait.name(); + let trait_iter = core::iter::once(RuntimeApiEntry::In(trait_name.into())); + let method_iter = + api_trait.methods().map(|method| RuntimeApiEntry::Name(method.name().into())); + + trait_iter.chain(method_iter) + }) + } +} +impl frame_decode::view_functions::ViewFunctionTypeInfo for Metadata { + type TypeId = u32; + + fn view_function_info( + &self, + pallet_name: &str, + function_name: &str, + ) -> Result, ViewFunctionInfoError<'_>> { + let pallet = self.pallet_by_name(pallet_name).ok_or_else(|| { + ViewFunctionInfoError::PalletNotFound { pallet_name: pallet_name.to_string() } + })?; + let function = pallet.view_function_by_name(function_name).ok_or_else(|| { + ViewFunctionInfoError::FunctionNotFound { + pallet_name: Cow::Borrowed(pallet.name()), + function_name: function_name.to_string(), + } + })?; + + let info = ViewFunctionInfo { + inputs: Cow::Borrowed(&function.inner.info.inputs), + output_id: function.inner.info.output_id, + query_id: *function.query_id(), + }; + + Ok(info) + } +} +impl frame_decode::view_functions::ViewFunctionEntryInfo for Metadata { + fn view_function_entries(&self) -> impl Iterator> { + self.pallets().flat_map(|pallet| { + let pallet_name = pallet.name(); + let pallet_iter = core::iter::once(ViewFunctionEntry::In(pallet_name.into())); + let fn_iter = pallet + .view_functions() + .map(|function| ViewFunctionEntry::Name(function.name().into())); + + pallet_iter.chain(fn_iter) + }) + } +} +impl frame_decode::constants::ConstantTypeInfo for Metadata { + type TypeId = u32; + + fn constant_info( + &self, + pallet_name: &str, + constant_name: &str, + ) -> Result, ConstantInfoError<'_>> { + let pallet = + self.pallet_by_name(pallet_name) + .ok_or_else(|| ConstantInfoError::PalletNotFound { + pallet_name: pallet_name.to_string(), + })?; + let constant = pallet.constant_by_name(constant_name).ok_or_else(|| { + ConstantInfoError::ConstantNotFound { + pallet_name: Cow::Borrowed(pallet.name()), + constant_name: constant_name.to_string(), + } + })?; + + let info = ConstantInfo { bytes: &constant.value, type_id: constant.ty }; + + Ok(info) + } +} +impl frame_decode::constants::ConstantEntryInfo for Metadata { + fn constant_entries(&self) -> impl Iterator> { + self.pallets().flat_map(|pallet| { + let pallet_name = pallet.name(); + let pallet_iter = core::iter::once(ConstantEntry::In(pallet_name.into())); + let constant_iter = + pallet.constants().map(|constant| ConstantEntry::Name(constant.name().into())); + + pallet_iter.chain(constant_iter) + }) + } +} +impl frame_decode::custom_values::CustomValueTypeInfo for Metadata { + type TypeId = u32; + + fn custom_value_info( + &self, + name: &str, + ) -> Result, CustomValueInfoError> { + let custom_value = self + .custom() + .get(name) + .ok_or_else(|| CustomValueInfoError { not_found: name.to_string() })?; + + let info = CustomValueInfo { bytes: custom_value.data, type_id: custom_value.type_id }; + + Ok(info) + } +} +impl frame_decode::custom_values::CustomValueEntryInfo for Metadata { + fn custom_values(&self) -> impl Iterator> { + self.custom.map.keys().map(|name| CustomValue { name: Cow::Borrowed(name) }) + } +} + +impl Metadata { + /// This is essentially an alias for `::decode(&mut bytes)` + pub fn decode_from(mut bytes: &[u8]) -> Result { + ::decode(&mut bytes) + } + + /// Convert V16 metadata into [`Metadata`]. + pub fn from_v16( + metadata: frame_metadata::v16::RuntimeMetadataV16, + ) -> Result { + metadata.try_into() + } + + /// Convert V15 metadata into [`Metadata`]. + pub fn from_v15( + metadata: frame_metadata::v15::RuntimeMetadataV15, + ) -> Result { + metadata.try_into() + } + + /// Convert V14 metadata into [`Metadata`]. + pub fn from_v14( + metadata: frame_metadata::v14::RuntimeMetadataV14, + ) -> Result { + metadata.try_into() + } + + /// Convert V13 metadata into [`Metadata`], given the necessary extra type information. + #[cfg(feature = "legacy")] + pub fn from_v13( + metadata: &frame_metadata::v13::RuntimeMetadataV13, + types: &scale_info_legacy::TypeRegistrySet<'_>, + ) -> Result { + from::legacy::from_v13(metadata, types, from::legacy::Opts::compat()) + } + + /// Convert V12 metadata into [`Metadata`], given the necessary extra type information. + #[cfg(feature = "legacy")] + pub fn from_v12( + metadata: &frame_metadata::v12::RuntimeMetadataV12, + types: &scale_info_legacy::TypeRegistrySet<'_>, + ) -> Result { + from::legacy::from_v12(metadata, types, from::legacy::Opts::compat()) + } + + /// Convert V13 metadata into [`Metadata`], given the necessary extra type information. + #[cfg(feature = "legacy")] + pub fn from_v11( + metadata: &frame_metadata::v11::RuntimeMetadataV11, + types: &scale_info_legacy::TypeRegistrySet<'_>, + ) -> Result { + from::legacy::from_v11(metadata, types, from::legacy::Opts::compat()) + } + + /// Convert V13 metadata into [`Metadata`], given the necessary extra type information. + #[cfg(feature = "legacy")] + pub fn from_v10( + metadata: &frame_metadata::v10::RuntimeMetadataV10, + types: &scale_info_legacy::TypeRegistrySet<'_>, + ) -> Result { + from::legacy::from_v10(metadata, types, from::legacy::Opts::compat()) + } + + /// Convert V9 metadata into [`Metadata`], given the necessary extra type information. + #[cfg(feature = "legacy")] + pub fn from_v9( + metadata: &frame_metadata::v9::RuntimeMetadataV9, + types: &scale_info_legacy::TypeRegistrySet<'_>, + ) -> Result { + from::legacy::from_v9(metadata, types, from::legacy::Opts::compat()) + } + + /// Convert V8 metadata into [`Metadata`], given the necessary extra type information. + #[cfg(feature = "legacy")] + pub fn from_v8( + metadata: &frame_metadata::v8::RuntimeMetadataV8, + types: &scale_info_legacy::TypeRegistrySet<'_>, + ) -> Result { + from::legacy::from_v8(metadata, types, from::legacy::Opts::compat()) + } + + /// Access the underlying type registry. + pub fn types(&self) -> &PortableRegistry { + &self.types + } + + /// Mutable access to the underlying type registry. + pub fn types_mut(&mut self) -> &mut PortableRegistry { + &mut self.types + } + + /// The type ID of the `DispatchError` type, if it exists. + pub fn dispatch_error_ty(&self) -> Option { + self.dispatch_error_ty + } + + /// Return details about the extrinsic format. + pub fn extrinsic(&self) -> &ExtrinsicMetadata { + &self.extrinsic + } + + /// Return details about the outer enums. + pub fn outer_enums(&self) -> OuterEnumsMetadata { + self.outer_enums + } + + /// An iterator over all of the available pallets. + pub fn pallets(&self) -> impl ExactSizeIterator> { + self.pallets + .values() + .iter() + .map(|inner| PalletMetadata { inner, types: self.types() }) + } + + /// Access a pallet given some call/extrinsic pallet index byte + pub fn pallet_by_call_index(&self, variant_index: u8) -> Option> { + let inner = self + .pallets_by_call_index + .get(&variant_index) + .and_then(|i| self.pallets.get_by_index(*i))?; + + Some(PalletMetadata { inner, types: self.types() }) + } + + /// Access a pallet given some event pallet index byte + pub fn pallet_by_event_index(&self, variant_index: u8) -> Option> { + let inner = self + .pallets_by_event_index + .get(&variant_index) + .and_then(|i| self.pallets.get_by_index(*i))?; + + Some(PalletMetadata { inner, types: self.types() }) + } + + /// Access a pallet given some error pallet index byte + pub fn pallet_by_error_index(&self, variant_index: u8) -> Option> { + let inner = self + .pallets_by_error_index + .get(&variant_index) + .and_then(|i| self.pallets.get_by_index(*i))?; + + Some(PalletMetadata { inner, types: self.types() }) + } + + /// Access a pallet given its name. + pub fn pallet_by_name(&self, pallet_name: &str) -> Option> { + let inner = self.pallets.get_by_key(pallet_name)?; + + Some(PalletMetadata { inner, types: self.types() }) + } + + /// An iterator over all of the runtime APIs. + pub fn runtime_api_traits(&self) -> impl ExactSizeIterator> { + self.apis + .values() + .iter() + .map(|inner| RuntimeApiMetadata { inner, types: self.types() }) + } + + /// Access a runtime API trait given its name. + pub fn runtime_api_trait_by_name(&'_ self, name: &str) -> Option> { + let inner = self.apis.get_by_key(name)?; + Some(RuntimeApiMetadata { inner, types: self.types() }) + } + + /// Returns custom user defined types + pub fn custom(&self) -> CustomMetadata<'_> { + CustomMetadata { types: self.types(), inner: &self.custom } + } + + /// Obtain a unique hash representing this metadata or specific parts of it. + pub fn hasher(&self) -> MetadataHasher<'_> { + MetadataHasher::new(self) + } + + /// Get type hash for a type in the registry + pub fn type_hash(&self, id: u32) -> Option<[u8; HASH_LEN]> { + self.types.resolve(id)?; + Some(crate::utils::validation::get_type_hash(&self.types, id)) + } +} + +/// Metadata for a specific pallet. +#[derive(Debug, Clone, Copy)] +pub struct PalletMetadata<'a> { + inner: &'a PalletMetadataInner, + types: &'a PortableRegistry, +} + +impl<'a> PalletMetadata<'a> { + /// The pallet name. + pub fn name(&self) -> &'a str { + &self.inner.name + } + + /// The index to use for calls in this pallet. + pub fn call_index(&self) -> u8 { + self.inner.call_index + } + + /// The index to use for events in this pallet. + pub fn event_index(&self) -> u8 { + self.inner.event_index + } + + /// The index to use for errors in this pallet. + pub fn error_index(&self) -> u8 { + self.inner.error_index + } + + /// The pallet docs. + pub fn docs(&self) -> &'a [String] { + &self.inner.docs + } + + /// Type ID for the pallet's Call type, if it exists. + pub fn call_ty_id(&self) -> Option { + self.inner.call_ty + } + + /// Type ID for the pallet's Event type, if it exists. + pub fn event_ty_id(&self) -> Option { + self.inner.event_ty + } + + /// Type ID for the pallet's Error type, if it exists. + pub fn error_ty_id(&self) -> Option { + self.inner.error_ty + } + + /// Return metadata about the pallet's storage entries. + pub fn storage(&self) -> Option<&'a StorageMetadata> { + self.inner.storage.as_ref() + } + + /// Return all of the event variants, if an event type exists. + pub fn event_variants(&self) -> Option<&'a [Variant]> { + VariantIndex::get(self.inner.event_ty, self.types) + } + + /// Return an event variant given it's encoded variant index. + pub fn event_variant_by_index(&self, variant_index: u8) -> Option<&'a Variant> { + self.inner.event_variant_index.lookup_by_index( + variant_index, + self.inner.event_ty, + self.types, + ) + } + + /// Does this pallet have any view functions? + pub fn has_view_functions(&self) -> bool { + !self.inner.view_functions.is_empty() + } + + /// Return an iterator over the View Functions in this pallet, if any. + pub fn view_functions( + &self, + ) -> impl ExactSizeIterator> + use<'a> { + self.inner + .view_functions + .values() + .iter() + .map(|vf: &'a _| ViewFunctionMetadata { inner: vf, types: self.types }) + } + + /// Return the view function with a given name, if any + pub fn view_function_by_name(&self, name: &str) -> Option> { + self.inner + .view_functions + .get_by_key(name) + .map(|vf: &'a _| ViewFunctionMetadata { inner: vf, types: self.types }) + } + + /// Iterate (in no particular order) over the associated type names and type IDs for this + /// pallet. + pub fn associated_types(&self) -> impl ExactSizeIterator + use<'a> { + self.inner.associated_types.iter().map(|(name, ty)| (&**name, *ty)) + } + + /// Fetch an associated type ID given the associated type name. + pub fn associated_type_id(&self, name: &str) -> Option { + self.inner.associated_types.get(name).copied() + } + + /// Return all of the call variants, if a call type exists. + pub fn call_variants(&self) -> Option<&'a [Variant]> { + VariantIndex::get(self.inner.call_ty, self.types) + } + + /// Return a call variant given it's encoded variant index. + pub fn call_variant_by_index(&self, variant_index: u8) -> Option<&'a Variant> { + self.inner + .call_variant_index + .lookup_by_index(variant_index, self.inner.call_ty, self.types) + } + + /// Return a call variant given it's name. + pub fn call_variant_by_name(&self, call_name: &str) -> Option<&'a Variant> { + self.inner + .call_variant_index + .lookup_by_name(call_name, self.inner.call_ty, self.types) + } + + /// Return all of the error variants, if an error type exists. + pub fn error_variants(&self) -> Option<&'a [Variant]> { + VariantIndex::get(self.inner.error_ty, self.types) + } + + /// Return an error variant given it's encoded variant index. + pub fn error_variant_by_index(&self, variant_index: u8) -> Option<&'a Variant> { + self.inner.error_variant_index.lookup_by_index( + variant_index, + self.inner.error_ty, + self.types, + ) + } + + /// Return constant details given the constant name. + pub fn constant_by_name(&self, name: &str) -> Option<&'a ConstantMetadata> { + self.inner.constants.get_by_key(name) + } + + /// An iterator over the constants in this pallet. + pub fn constants(&self) -> impl ExactSizeIterator + use<'a> { + self.inner.constants.values().iter() + } + + /// Return a hash for the storage entry, or None if it was not found. + pub fn storage_hash(&self, entry_name: &str) -> Option<[u8; HASH_LEN]> { + crate::utils::validation::get_storage_hash(self, entry_name) + } + + /// Return a hash for the constant, or None if it was not found. + pub fn constant_hash(&self, constant_name: &str) -> Option<[u8; HASH_LEN]> { + crate::utils::validation::get_constant_hash(self, constant_name) + } + + /// Return a hash for the call, or None if it was not found. + pub fn call_hash(&self, call_name: &str) -> Option<[u8; HASH_LEN]> { + crate::utils::validation::get_call_hash(self, call_name) + } + + /// Return a hash for the entire pallet. + pub fn hash(&self) -> [u8; HASH_LEN] { + crate::utils::validation::get_pallet_hash(*self) + } +} + +#[derive(Debug, Clone)] +struct PalletMetadataInner { + /// Pallet name. + name: String, + /// The index for calls in the pallet. + call_index: u8, + /// The index for events in the pallet. + /// + /// This is the same as `call_index` for modern metadatas, + /// but can be different for older metadatas (pre-V12). + event_index: u8, + /// The index for errors in the pallet. + /// + /// This is the same as `call_index` for modern metadatas, + /// but can be different for older metadatas (pre-V12). + error_index: u8, + /// Pallet storage metadata. + storage: Option, + /// Type ID for the pallet Call enum. + call_ty: Option, + /// Call variants by name/u8. + call_variant_index: VariantIndex, + /// Type ID for the pallet Event enum. + event_ty: Option, + /// Event variants by name/u8. + event_variant_index: VariantIndex, + /// Type ID for the pallet Error enum. + error_ty: Option, + /// Error variants by name/u8. + error_variant_index: VariantIndex, + /// Map from constant name to constant details. + constants: OrderedMap, + /// Details about each of the pallet view functions. + view_functions: OrderedMap, + /// Mapping from associated type to type ID describing its shape. + associated_types: BTreeMap, + /// Pallet documentation. + docs: Vec, +} + +/// Metadata for the storage entries in a pallet. +#[derive(Debug, Clone)] +pub struct StorageMetadata { + /// The common prefix used by all storage entries. + prefix: String, + /// Map from storage entry name to details. + entries: OrderedMap, +} + +impl StorageMetadata { + /// The common prefix used by all storage entries. + pub fn prefix(&self) -> &str { + &self.prefix + } + + /// An iterator over the storage entries. + pub fn entries(&self) -> &[StorageEntryMetadata] { + self.entries.values() + } + + /// Return a specific storage entry given its name. + pub fn entry_by_name(&self, name: &str) -> Option<&StorageEntryMetadata> { + self.entries.get_by_key(name) + } +} + +/// Metadata for a single storage entry. +#[derive(Debug, Clone)] +pub struct StorageEntryMetadata { + /// Variable name of the storage entry. + name: String, + /// Information about the storage entry. + info: StorageInfo<'static, u32>, + /// Storage entry documentation. + docs: Vec, +} + +impl StorageEntryMetadata { + /// Name of this entry. + pub fn name(&self) -> &str { + &self.name + } + /// Keys in this storage entry. + pub fn keys(&self) -> impl ExactSizeIterator> { + let keys = &*self.info.keys; + keys.iter() + } + /// Value type for this storage entry. + pub fn value_ty(&self) -> u32 { + self.info.value_id + } + /// The default value, if one exists, for this entry. + pub fn default_value(&self) -> Option<&[u8]> { + self.info.default_value.as_deref() + } + /// Storage entry documentation. + pub fn docs(&self) -> &[String] { + &self.docs + } +} + +/// Metadata for a single constant. +#[derive(Debug, Clone)] +pub struct ConstantMetadata { + /// Name of the pallet constant. + name: String, + /// Type of the pallet constant. + ty: u32, + /// Value stored in the constant (SCALE encoded). + value: Vec, + /// Constant documentation. + docs: Vec, +} + +impl ConstantMetadata { + /// Name of the pallet constant. + pub fn name(&self) -> &str { + &self.name + } + /// Type of the pallet constant. + pub fn ty(&self) -> u32 { + self.ty + } + /// Value stored in the constant (SCALE encoded). + pub fn value(&self) -> &[u8] { + &self.value + } + /// Constant documentation. + pub fn docs(&self) -> &[String] { + &self.docs + } +} + +/// Metadata for the extrinsic type. +#[derive(Debug, Clone)] +pub struct ExtrinsicMetadata { + /// The type of the address that signs the extrinsic. + /// Used to help decode tx signatures. + address_ty: u32, + /// The type of the extrinsic's signature. + /// Used to help decode tx signatures. + signature_ty: u32, + /// Which extrinsic versions are supported by this chain. + supported_versions: Vec, + /// The signed extensions in the order they appear in the extrinsic. + transaction_extensions: Vec, + /// Different versions of transaction extensions can exist. Each version + /// is a u8 which corresponds to the indexes of the transaction extensions + /// seen in the above Vec, in order, that exist at that version. + transaction_extensions_by_version: BTreeMap>, +} + +impl ExtrinsicMetadata { + /// Which extrinsic versions are supported. + pub fn supported_versions(&self) -> &[u8] { + &self.supported_versions + } + + /// The extra/additional information associated with the extrinsic. + pub fn transaction_extensions_by_version( + &self, + version: u8, + ) -> Option>> { + let extension_indexes = self.transaction_extensions_by_version.get(&version)?; + let iter = extension_indexes.iter().map(|index| { + let tx_metadata = self.transaction_extensions.get(*index as usize).expect( + "transaction extension should exist if index is in transaction_extensions_by_version", + ); + + TransactionExtensionMetadata { + identifier: &tx_metadata.identifier, + extra_ty: tx_metadata.extra_ty, + additional_ty: tx_metadata.additional_ty, + } + }); + + Some(iter) + } + + /// When constructing a v5 extrinsic, use this transaction extensions version. + pub fn transaction_extension_version_to_use_for_encoding(&self) -> u8 { + *self + .transaction_extensions_by_version + .keys() + .max() + .expect("At least one version of transaction extensions is expected") + } + + /// An iterator of the transaction extensions to use when encoding a transaction. Basically + /// equivalent to `self.transaction_extensions_by_version(self. + /// transaction_extension_version_to_use_for_encoding()).unwrap()` + pub fn transaction_extensions_to_use_for_encoding( + &self, + ) -> impl Iterator> { + let encoding_version = self.transaction_extension_version_to_use_for_encoding(); + self.transaction_extensions_by_version(encoding_version).unwrap() + } + + /// When presented with a v4 extrinsic that has no version, treat it as being this version. + pub fn transaction_extension_version_to_use_for_decoding(&self) -> u8 { + *self + .transaction_extensions_by_version + .keys() + .max() + .expect("At least one version of transaction extensions is expected") + } +} + +/// Metadata for the signed extensions used by extrinsics. +#[derive(Debug, Clone)] +pub struct TransactionExtensionMetadata<'a> { + /// The unique transaction extension identifier, which may be different from the type name. + identifier: &'a str, + /// The type of the transaction extension, with the data to be included in the extrinsic. + extra_ty: u32, + /// The type of the additional signed data, with the data to be included in the signed payload. + additional_ty: u32, +} + +#[derive(Debug, Clone)] +struct TransactionExtensionMetadataInner { + identifier: String, + extra_ty: u32, + additional_ty: u32, +} + +impl<'a> TransactionExtensionMetadata<'a> { + /// The unique signed extension identifier, which may be different from the type name. + pub fn identifier(&self) -> &'a str { + self.identifier + } + /// The type of the signed extension, with the data to be included in the extrinsic. + pub fn extra_ty(&self) -> u32 { + self.extra_ty + } + /// The type of the additional signed data, with the data to be included in the signed payload + pub fn additional_ty(&self) -> u32 { + self.additional_ty + } +} + +/// Metadata for the outer enums. +#[derive(Debug, Clone, Copy)] +pub struct OuterEnumsMetadata { + /// The type of the outer call enum. + call_enum_ty: u32, + /// The type of the outer event enum. + event_enum_ty: u32, + /// The type of the outer error enum. + error_enum_ty: u32, +} + +impl OuterEnumsMetadata { + /// The type of the outer call enum. + pub fn call_enum_ty(&self) -> u32 { + self.call_enum_ty + } + + /// The type of the outer event enum. + pub fn event_enum_ty(&self) -> u32 { + self.event_enum_ty + } + + /// The type of the outer error enum. + pub fn error_enum_ty(&self) -> u32 { + self.error_enum_ty + } +} + +/// Metadata for the available runtime APIs. +#[derive(Debug, Clone, Copy)] +pub struct RuntimeApiMetadata<'a> { + inner: &'a RuntimeApiMetadataInner, + types: &'a PortableRegistry, +} + +impl<'a> RuntimeApiMetadata<'a> { + /// Trait name. + pub fn name(&self) -> &'a str { + &self.inner.name + } + /// Trait documentation. + pub fn docs(&self) -> &[String] { + &self.inner.docs + } + /// An iterator over the trait methods. + pub fn methods(&self) -> impl ExactSizeIterator> + use<'a> { + self.inner.methods.values().iter().map(|item| RuntimeApiMethodMetadata { + trait_name: &self.inner.name, + inner: item, + types: self.types, + }) + } + /// Get a specific trait method given its name. + pub fn method_by_name(&self, name: &str) -> Option> { + self.inner.methods.get_by_key(name).map(|item| RuntimeApiMethodMetadata { + trait_name: &self.inner.name, + inner: item, + types: self.types, + }) + } + /// Return a hash for the runtime API trait. + pub fn hash(&self) -> [u8; HASH_LEN] { + crate::utils::validation::get_runtime_apis_hash(*self) + } +} + +#[derive(Debug, Clone)] +struct RuntimeApiMetadataInner { + /// Trait name. + name: String, + /// Trait methods. + methods: OrderedMap, + /// Trait documentation. + docs: Vec, +} + +/// Metadata for a single runtime API method. +#[derive(Debug, Clone)] +pub struct RuntimeApiMethodMetadata<'a> { + trait_name: &'a str, + inner: &'a RuntimeApiMethodMetadataInner, + types: &'a PortableRegistry, +} + +impl<'a> RuntimeApiMethodMetadata<'a> { + /// Method name. + pub fn name(&self) -> &'a str { + &self.inner.name + } + /// Method documentation. + pub fn docs(&self) -> &[String] { + &self.inner.docs + } + /// Method inputs. + pub fn inputs( + &self, + ) -> impl ExactSizeIterator> + use<'a> { + let inputs = &*self.inner.info.inputs; + inputs.iter() + } + /// Method return type. + pub fn output_ty(&self) -> u32 { + self.inner.info.output_id + } + /// Return a hash for the method. + pub fn hash(&self) -> [u8; HASH_LEN] { + crate::utils::validation::get_runtime_api_hash(self) + } +} + +#[derive(Debug, Clone)] +struct RuntimeApiMethodMetadataInner { + /// Method name. + name: String, + /// Info. + info: RuntimeApiInfo<'static, u32>, + /// Method documentation. + docs: Vec, +} + +/// Metadata for the available View Functions. Currently these exist only +/// at the pallet level, but eventually they could exist at the runtime level too. +#[derive(Debug, Clone, Copy)] +pub struct ViewFunctionMetadata<'a> { + inner: &'a ViewFunctionMetadataInner, + types: &'a PortableRegistry, +} + +impl<'a> ViewFunctionMetadata<'a> { + /// Method name. + pub fn name(&self) -> &'a str { + &self.inner.name + } + /// Query ID. This is used to query the function. Roughly, it is constructed by doing + /// `twox_128(pallet_name) ++ twox_128("fn_name(fnarg_types) -> return_ty")` . + pub fn query_id(&self) -> &'a [u8; 32] { + &self.inner.info.query_id + } + /// Method documentation. + pub fn docs(&self) -> &'a [String] { + &self.inner.docs + } + /// Method inputs. + pub fn inputs( + &self, + ) -> impl ExactSizeIterator> + use<'a> { + let inputs = &*self.inner.info.inputs; + inputs.iter() + } + /// Method return type. + pub fn output_ty(&self) -> u32 { + self.inner.info.output_id + } + /// Return a hash for the method. The query ID of a view function validates it to some + /// degree, but only takes type _names_ into account. This hash takes into account the + /// actual _shape_ of each argument and the return type. + pub fn hash(&self) -> [u8; HASH_LEN] { + crate::utils::validation::get_view_function_hash(self) + } +} + +#[derive(Debug, Clone)] +struct ViewFunctionMetadataInner { + /// View function name. + name: String, + /// Info. + info: ViewFunctionInfo<'static, u32>, + /// Documentation. + docs: Vec, +} + +/// Metadata for a single input parameter to a runtime API method / pallet view function. +#[derive(Debug, Clone)] +pub struct MethodParamMetadata { + /// Parameter name. + pub name: String, + /// Parameter type. + pub ty: u32, +} + +/// Metadata of custom types with custom values, basically the same as +/// `frame_metadata::v15::CustomMetadata>`. +#[derive(Debug, Clone)] +pub struct CustomMetadata<'a> { + types: &'a PortableRegistry, + inner: &'a CustomMetadataInner, +} + +impl<'a> CustomMetadata<'a> { + /// Get a certain [CustomValueMetadata] by its name. + pub fn get(&self, name: &str) -> Option> { + self.inner.map.get_key_value(name).map(|(name, e)| CustomValueMetadata { + types: self.types, + type_id: e.ty.id, + data: &e.value, + name, + }) + } + + /// Iterates over names (keys) and associated custom values + pub fn iter(&self) -> impl Iterator> + use<'a> { + self.inner.map.iter().map(|(name, e)| CustomValueMetadata { + types: self.types, + type_id: e.ty.id, + data: &e.value, + name: name.as_ref(), + }) + } + + /// Access the underlying type registry. + pub fn types(&self) -> &PortableRegistry { + self.types + } +} + +/// Basically the same as `frame_metadata::v15::CustomValueMetadata>`, but borrowed. +pub struct CustomValueMetadata<'a> { + types: &'a PortableRegistry, + type_id: u32, + data: &'a [u8], + name: &'a str, +} + +impl<'a> CustomValueMetadata<'a> { + /// Access the underlying type registry. + pub fn types(&self) -> &PortableRegistry { + self.types + } + + /// The scale encoded value + pub fn bytes(&self) -> &'a [u8] { + self.data + } + + /// The type id in the TypeRegistry + pub fn type_id(&self) -> u32 { + self.type_id + } + + /// The name under which the custom value is registered. + pub fn name(&self) -> &str { + self.name + } + + /// Calculates the hash for the CustomValueMetadata. + pub fn hash(&self) -> [u8; HASH_LEN] { + get_custom_value_hash(self) + } +} + +/// Decode SCALE encoded metadata. +/// +/// - The default assumption is that metadata is encoded as +/// [`frame_metadata::RuntimeMetadataPrefixed`]. This is the expected format that metadata is +/// encoded into. +/// - if this fails, we also try to decode as [`frame_metadata::RuntimeMetadata`]. +/// - If this all fails, we also try to decode as [`frame_metadata::OpaqueMetadata`]. +pub fn decode_runtime_metadata( + input: &[u8], +) -> Result { + use codec::Decode; + + let err = match frame_metadata::RuntimeMetadataPrefixed::decode(&mut &*input) { + Ok(md) => return Ok(md.1), + Err(e) => e, + }; + + if let Ok(md) = frame_metadata::RuntimeMetadata::decode(&mut &*input) { + return Ok(md); + } + + // frame_metadata::OpaqueMetadata is a vec of bytes. If we can decode the length, AND + // the length definitely corresponds to the number of remaining bytes, then we try to + // decode the inner bytes. + if let Ok(len) = codec::Compact::::decode(&mut &*input) { + if input.len() == len.0 as usize { + return decode_runtime_metadata(input); + } + } + + Err(err) +} + +// Support decoding metadata from the "wire" format directly into this. +// Errors may be lost in the case that the metadata content is somehow invalid. +impl codec::Decode for Metadata { + fn decode(input: &mut I) -> Result { + let metadata = frame_metadata::RuntimeMetadataPrefixed::decode(input)?; + let metadata = match metadata.1 { + frame_metadata::RuntimeMetadata::V14(md) => md.try_into(), + frame_metadata::RuntimeMetadata::V15(md) => md.try_into(), + frame_metadata::RuntimeMetadata::V16(md) => md.try_into(), + _ => { + return Err("Metadata::decode failed: Cannot try_into() to Metadata: unsupported metadata version".into()) + }, + }; + + metadata.map_err(|_| "Metadata::decode failed: Cannot try_into() to Metadata".into()) + } +} diff --git a/vendor/pezkuwi-subxt/metadata/src/utils/mod.rs b/vendor/pezkuwi-subxt/metadata/src/utils/mod.rs new file mode 100644 index 00000000..1aa04a2c --- /dev/null +++ b/vendor/pezkuwi-subxt/metadata/src/utils/mod.rs @@ -0,0 +1,7 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +pub mod ordered_map; +pub mod validation; +pub mod variant_index; diff --git a/vendor/pezkuwi-subxt/metadata/src/utils/ordered_map.rs b/vendor/pezkuwi-subxt/metadata/src/utils/ordered_map.rs new file mode 100644 index 00000000..634fa43e --- /dev/null +++ b/vendor/pezkuwi-subxt/metadata/src/utils/ordered_map.rs @@ -0,0 +1,81 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use alloc::vec::Vec; +use hashbrown::HashMap; + +/// A minimal ordered map to let one search for +/// things by key or get the values in insert order. +#[derive(Debug, Clone)] +pub struct OrderedMap { + values: Vec, + map: HashMap, +} + +impl Default for OrderedMap { + fn default() -> Self { + Self { values: Default::default(), map: Default::default() } + } +} + +impl OrderedMap +where + K: PartialEq + Eq + core::hash::Hash, +{ + /// Create a new, empty [`OrderedMap`]. + pub fn new() -> Self { + Self::default() + } + + /// Number of entries in the map. + #[allow(dead_code)] + pub fn len(&self) -> usize { + self.values.len() + } + + /// Is the map empty. + #[allow(dead_code)] + pub fn is_empty(&self) -> bool { + self.values.is_empty() + } + + /// Push/insert an item to the end of the map. + pub fn push_insert(&mut self, key: K, value: V) { + let idx = self.values.len(); + self.values.push(value); + self.map.insert(key, idx); + } + + /// Get an item by its key. + pub fn get_by_key(&self, key: &Q) -> Option<&V> + where + K: alloc::borrow::Borrow, + Q: core::hash::Hash + Eq + ?Sized, + { + self.map.get(key).and_then(|&v| self.values.get(v)) + } + + /// Get an item by its index. + pub fn get_by_index(&self, i: usize) -> Option<&V> { + self.values.get(i) + } + + /// Access the underlying values. + pub fn values(&self) -> &[V] { + &self.values + } +} + +impl FromIterator<(K, V)> for OrderedMap +where + K: PartialEq + Eq + core::hash::Hash, +{ + fn from_iter>(iter: T) -> Self { + let mut map = OrderedMap::new(); + for (k, v) in iter { + map.push_insert(k, v) + } + map + } +} diff --git a/vendor/pezkuwi-subxt/metadata/src/utils/validation.rs b/vendor/pezkuwi-subxt/metadata/src/utils/validation.rs new file mode 100644 index 00000000..dc5eb56a --- /dev/null +++ b/vendor/pezkuwi-subxt/metadata/src/utils/validation.rs @@ -0,0 +1,1110 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Utility functions for metadata validation. + +use crate::{ + CustomMetadata, CustomValueMetadata, ExtrinsicMetadata, Metadata, PalletMetadata, + RuntimeApiMetadata, RuntimeApiMethodMetadata, StorageEntryMetadata, ViewFunctionMetadata, +}; +use alloc::vec::Vec; +use hashbrown::HashMap; +use scale_info::{Field, PortableRegistry, TypeDef, TypeDefVariant, Variant, form::PortableForm}; + +// The number of bytes our `hash` function produces. +pub(crate) const HASH_LEN: usize = 32; +pub type Hash = [u8; HASH_LEN]; + +/// Internal byte representation for various metadata types utilized for +/// generating deterministic hashes between different rust versions. +#[repr(u8)] +enum TypeBeingHashed { + Composite, + Variant, + Sequence, + Array, + Tuple, + Primitive, + Compact, + BitSequence, +} + +/// Hashing function utilized internally. +fn hash(data: &[u8]) -> Hash { + pezsp_crypto_hashing::twox_256(data) +} + +/// XOR two hashes together. Only use this when you don't care about the order +/// of the things you're hashing together. +fn xor(a: Hash, b: Hash) -> Hash { + let mut out = [0u8; HASH_LEN]; + for (idx, (a, b)) in a.into_iter().zip(b).enumerate() { + out[idx] = a ^ b; + } + out +} + +// Combine some number of HASH_LEN byte hashes and output a single HASH_LEN +// byte hash to uniquely represent the inputs. +macro_rules! count_idents { + () => { 0 }; + ($n:ident $($rest:ident)*) => { 1 + count_idents!($($rest)*) } +} +macro_rules! concat_and_hash_n { + ($name:ident($($arg:ident)+)) => { + fn $name($($arg: &Hash),+) -> Hash { + let mut out = [0u8; HASH_LEN * count_idents!($($arg)+)]; + let mut start = 0; + $( + out[start..start+HASH_LEN].copy_from_slice(&$arg[..]); + #[allow(unused_assignments)] + { start += HASH_LEN; } + )+ + hash(&out) + } + } +} +concat_and_hash_n!(concat_and_hash2(a b)); +concat_and_hash_n!(concat_and_hash3(a b c)); +concat_and_hash_n!(concat_and_hash4(a b c d)); +concat_and_hash_n!(concat_and_hash5(a b c d e)); +concat_and_hash_n!(concat_and_hash6(a b c d e f)); + +/// Obtain the hash representation of a `scale_info::Field`. +fn get_field_hash( + registry: &PortableRegistry, + field: &Field, + cache: &mut HashMap, +) -> Hash { + let field_name_bytes = match &field.name { + Some(name) => hash(name.as_bytes()), + None => [0u8; HASH_LEN], + }; + + concat_and_hash2(&field_name_bytes, &get_type_hash_recurse(registry, field.ty.id, cache)) +} + +/// Obtain the hash representation of a `scale_info::Variant`. +fn get_variant_hash( + registry: &PortableRegistry, + var: &Variant, + cache: &mut HashMap, +) -> Hash { + let variant_name_bytes = hash(var.name.as_bytes()); + let variant_field_bytes = var.fields.iter().fold([0u8; HASH_LEN], |bytes, field| { + // EncodeAsType and DecodeAsType don't care about variant field ordering, + // so XOR the fields to ensure that it doesn't matter. + xor(bytes, get_field_hash(registry, field, cache)) + }); + + concat_and_hash2(&variant_name_bytes, &variant_field_bytes) +} + +fn get_type_def_variant_hash( + registry: &PortableRegistry, + variant: &TypeDefVariant, + only_these_variants: Option<&[&str]>, + cache: &mut HashMap, +) -> Hash { + let variant_id_bytes = [TypeBeingHashed::Variant as u8; HASH_LEN]; + let variant_field_bytes = variant.variants.iter().fold([0u8; HASH_LEN], |bytes, var| { + // With EncodeAsType and DecodeAsType we no longer care which order the variants are in, + // as long as all of the names+types are there. XOR to not care about ordering. + let should_hash = only_these_variants + .as_ref() + .map(|only_these_variants| only_these_variants.contains(&var.name.as_str())) + .unwrap_or(true); + if should_hash { xor(bytes, get_variant_hash(registry, var, cache)) } else { bytes } + }); + concat_and_hash2(&variant_id_bytes, &variant_field_bytes) +} + +/// Obtain the hash representation of a `scale_info::TypeDef`. +fn get_type_def_hash( + registry: &PortableRegistry, + ty_def: &TypeDef, + cache: &mut HashMap, +) -> Hash { + match ty_def { + TypeDef::Composite(composite) => { + let composite_id_bytes = [TypeBeingHashed::Composite as u8; HASH_LEN]; + let composite_field_bytes = + composite.fields.iter().fold([0u8; HASH_LEN], |bytes, field| { + // With EncodeAsType and DecodeAsType we no longer care which order the fields + // are in, as long as all of the names+types are there. XOR to not care + // about ordering. + xor(bytes, get_field_hash(registry, field, cache)) + }); + concat_and_hash2(&composite_id_bytes, &composite_field_bytes) + }, + TypeDef::Variant(variant) => get_type_def_variant_hash(registry, variant, None, cache), + TypeDef::Sequence(sequence) => concat_and_hash2( + &[TypeBeingHashed::Sequence as u8; HASH_LEN], + &get_type_hash_recurse(registry, sequence.type_param.id, cache), + ), + TypeDef::Array(array) => { + // Take length into account too; different length must lead to different hash. + let array_id_bytes = { + let mut a = [0u8; HASH_LEN]; + a[0] = TypeBeingHashed::Array as u8; + a[1..5].copy_from_slice(&array.len.to_be_bytes()); + a + }; + concat_and_hash2( + &array_id_bytes, + &get_type_hash_recurse(registry, array.type_param.id, cache), + ) + }, + TypeDef::Tuple(tuple) => { + let mut bytes = hash(&[TypeBeingHashed::Tuple as u8]); + for field in &tuple.fields { + bytes = concat_and_hash2(&bytes, &get_type_hash_recurse(registry, field.id, cache)); + } + bytes + }, + TypeDef::Primitive(primitive) => { + // Cloning the 'primitive' type should essentially be a copy. + hash(&[TypeBeingHashed::Primitive as u8, primitive.clone() as u8]) + }, + TypeDef::Compact(compact) => concat_and_hash2( + &[TypeBeingHashed::Compact as u8; HASH_LEN], + &get_type_hash_recurse(registry, compact.type_param.id, cache), + ), + TypeDef::BitSequence(bitseq) => concat_and_hash3( + &[TypeBeingHashed::BitSequence as u8; HASH_LEN], + &get_type_hash_recurse(registry, bitseq.bit_order_type.id, cache), + &get_type_hash_recurse(registry, bitseq.bit_store_type.id, cache), + ), + } +} + +/// indicates whether a hash has been fully computed for a type or not +#[derive(Clone, Debug)] +pub enum CachedHash { + /// hash not known yet, but computation has already started + Recursive, + /// hash of the type, computation was finished + Hash(Hash), +} + +impl CachedHash { + fn hash(&self) -> Hash { + match &self { + CachedHash::Hash(hash) => *hash, + CachedHash::Recursive => [123; HASH_LEN], // some magical value + } + } +} + +/// Obtain the hash representation of a `scale_info::Type` identified by id. +/// +/// Hashes of the outer enums (call, event, error) should be computed prior to this +/// and passed in as the `outer_enum_hashes` argument. Whenever a type is encountered that +/// is one of the outer enums, the procomputed hash is used instead of computing a new one. +/// +/// The reason for this unintuitive behavior is that we sometimes want to trim the outer enum types +/// beforehand to only include certain pallets, which affects their hash values. +pub fn get_type_hash(registry: &PortableRegistry, id: u32) -> Hash { + get_type_hash_recurse(registry, id, &mut HashMap::new()) +} + +/// Obtain the hash representation of a `scale_info::Type` identified by id. +fn get_type_hash_recurse( + registry: &PortableRegistry, + id: u32, + cache: &mut HashMap, +) -> Hash { + // Guard against recursive types, with a 2 step caching approach: + // if the cache has an entry for the id, just return a hash derived from it. + // if the type has not been seen yet, mark it with `CachedHash::Recursive` in the cache and + // proceed to `get_type_def_hash()`. -> During the execution of get_type_def_hash() we + // might get into get_type_hash(id) again for the original id -> in this case the + // `CachedHash::Recursive` provokes an early return. -> Once we return from + // `get_type_def_hash()` we need to update the cache entry: -> We set the cache + // value to `CachedHash::Hash(type_hash)`, where `type_hash` was returned from + // `get_type_def_hash()` -> It makes sure, that different types end up with + // different cache values. + // + // Values in the cache can be thought of as a mapping like this: + // type_id -> not contained = We haven't seen the type yet. + // -> `CachedHash::Recursive` = We have seen the type but hash calculation for it + // hasn't finished yet. -> `CachedHash::Hash(hash)` = Hash calculation for the type was + // completed. + if let Some(cached_hash) = cache.get(&id) { + return cached_hash.hash(); + } + cache.insert(id, CachedHash::Recursive); + let ty = registry + .resolve(id) + .expect("Type ID provided by the metadata is registered; qed"); + let type_hash = get_type_def_hash(registry, &ty.type_def, cache); + cache.insert(id, CachedHash::Hash(type_hash)); + type_hash +} + +/// Obtain the hash representation of a `frame_metadata::v15::ExtrinsicMetadata`. +fn get_extrinsic_hash(registry: &PortableRegistry, extrinsic: &ExtrinsicMetadata) -> Hash { + // Get the hashes of the extrinsic type. + let address_hash = get_type_hash(registry, extrinsic.address_ty); + // The `RuntimeCall` type is intentionally omitted and hashed by the outer enums instead. + let signature_hash = get_type_hash(registry, extrinsic.signature_ty); + + // Supported versions are just u8s and we will likely never have more than 32 of these, so put + // them into an array of u8s and panic if more than 32. + if extrinsic.supported_versions.len() > 32 { + panic!("The metadata validation logic does not support more than 32 extrinsic versions."); + } + let supported_extrinsic_versions = { + let mut a = [0u8; 32]; + a[0..extrinsic.supported_versions.len()].copy_from_slice(&extrinsic.supported_versions); + a + }; + + let mut bytes = concat_and_hash3(&address_hash, &signature_hash, &supported_extrinsic_versions); + + for signed_extension in extrinsic.transaction_extensions.iter() { + bytes = concat_and_hash4( + &bytes, + &hash(signed_extension.identifier.as_bytes()), + &get_type_hash(registry, signed_extension.extra_ty), + &get_type_hash(registry, signed_extension.additional_ty), + ) + } + + bytes +} + +/// Get the hash corresponding to a single storage entry. +fn get_storage_entry_hash(registry: &PortableRegistry, entry: &StorageEntryMetadata) -> Hash { + let mut bytes = concat_and_hash3( + &hash(entry.name.as_bytes()), + &get_type_hash(registry, entry.info.value_id), + &hash(entry.info.default_value.as_deref().unwrap_or_default()), + ); + + for key in &*entry.info.keys { + bytes = concat_and_hash3( + &bytes, + &[key.hasher as u8; HASH_LEN], + &get_type_hash(registry, key.key_id), + ) + } + + bytes +} + +fn get_custom_metadata_hash(custom_metadata: &CustomMetadata) -> Hash { + custom_metadata.iter().fold([0u8; HASH_LEN], |bytes, custom_value| { + xor(bytes, get_custom_value_hash(&custom_value)) + }) +} + +/// Obtain the hash of some custom value in the metadata including it's name/key. +/// +/// If the `custom_value` has a type id that is not present in the metadata, +/// only the name and bytes are used for hashing. +pub fn get_custom_value_hash(custom_value: &CustomValueMetadata) -> Hash { + let name_hash = hash(custom_value.name.as_bytes()); + if custom_value.types.resolve(custom_value.type_id()).is_none() { + hash(&name_hash) + } else { + concat_and_hash2(&name_hash, &get_type_hash(custom_value.types, custom_value.type_id())) + } +} + +/// Obtain the hash for a specific storage item, or an error if it's not found. +pub fn get_storage_hash(pallet: &PalletMetadata, entry_name: &str) -> Option { + let storage = pallet.storage()?; + let entry = storage.entry_by_name(entry_name)?; + let hash = get_storage_entry_hash(pallet.types, entry); + Some(hash) +} + +/// Obtain the hash for a specific constant, or an error if it's not found. +pub fn get_constant_hash(pallet: &PalletMetadata, constant_name: &str) -> Option { + let constant = pallet.constant_by_name(constant_name)?; + + // We only need to check that the type of the constant asked for matches. + let bytes = get_type_hash(pallet.types, constant.ty); + Some(bytes) +} + +/// Obtain the hash for a specific call, or an error if it's not found. +pub fn get_call_hash(pallet: &PalletMetadata, call_name: &str) -> Option { + let call_variant = pallet.call_variant_by_name(call_name)?; + + // hash the specific variant representing the call we are interested in. + let hash = get_variant_hash(pallet.types, call_variant, &mut HashMap::new()); + Some(hash) +} + +/// Obtain the hash of a specific runtime API method, or an error if it's not found. +pub fn get_runtime_api_hash(runtime_api: &RuntimeApiMethodMetadata) -> Hash { + let registry = runtime_api.types; + + // The trait name is part of the runtime API call that is being + // generated for this method. Therefore the trait name is strongly + // connected to the method in the same way as a parameter is + // to the method. + let mut bytes = concat_and_hash2( + &hash(runtime_api.trait_name.as_bytes()), + &hash(runtime_api.name().as_bytes()), + ); + + for input in runtime_api.inputs() { + bytes = concat_and_hash3( + &bytes, + &hash(input.name.as_bytes()), + &get_type_hash(registry, input.id), + ); + } + + bytes = concat_and_hash2(&bytes, &get_type_hash(registry, runtime_api.output_ty())); + + bytes +} + +/// Obtain the hash of all of a runtime API trait, including all of its methods. +pub fn get_runtime_apis_hash(trait_metadata: RuntimeApiMetadata) -> Hash { + // Each API is already hashed considering the trait name, so we don't need + // to consider the trait name again here. + trait_metadata.methods().fold([0u8; HASH_LEN], |bytes, method_metadata| { + // We don't care what order the trait methods exist in, and want the hash to + // be identical regardless. For this, we can just XOR the hashes for each method + // together; we'll get the same output whichever order they are XOR'd together in, + // so long as each individual method is the same. + xor(bytes, get_runtime_api_hash(&method_metadata)) + }) +} + +/// Obtain the hash of a specific view function, or an error if it's not found. +pub fn get_view_function_hash(view_function: &ViewFunctionMetadata) -> Hash { + let registry = view_function.types; + + // The Query ID is `twox_128(pallet_name) ++ twox_128("fn_name(fnarg_types) -> return_ty")`. + let mut bytes = *view_function.query_id(); + + // This only takes type _names_ into account, so we beef this up by combining with actual + // type hashes, in a similar approach to runtime APIs.. + for input in view_function.inputs() { + bytes = concat_and_hash3( + &bytes, + &hash(input.name.as_bytes()), + &get_type_hash(registry, input.id), + ); + } + + bytes = concat_and_hash2(&bytes, &get_type_hash(registry, view_function.output_ty())); + + bytes +} + +/// Obtain the hash of all of the view functions in a pallet, including all of its methods. +fn get_pallet_view_functions_hash(pallet_metadata: &PalletMetadata) -> Hash { + // Each API is already hashed considering the trait name, so we don't need + // to consider the trait name again here. + pallet_metadata + .view_functions() + .fold([0u8; HASH_LEN], |bytes, method_metadata| { + // We don't care what order the view functions are declared in, and want the hash to + // be identical regardless. For this, we can just XOR the hashes for each method + // together; we'll get the same output whichever order they are XOR'd together in, + // so long as each individual method is the same. + xor(bytes, get_view_function_hash(&method_metadata)) + }) +} + +/// Obtain the hash representation of a `frame_metadata::v15::PalletMetadata`. +pub fn get_pallet_hash(pallet: PalletMetadata) -> Hash { + let registry = pallet.types; + + let call_bytes = match pallet.call_ty_id() { + Some(calls) => get_type_hash(registry, calls), + None => [0u8; HASH_LEN], + }; + let event_bytes = match pallet.event_ty_id() { + Some(event) => get_type_hash(registry, event), + None => [0u8; HASH_LEN], + }; + let error_bytes = match pallet.error_ty_id() { + Some(error) => get_type_hash(registry, error), + None => [0u8; HASH_LEN], + }; + let constant_bytes = pallet.constants().fold([0u8; HASH_LEN], |bytes, constant| { + // We don't care what order the constants occur in, so XOR together the combinations + // of (constantName, constantType) to make the order we see them irrelevant. + let constant_hash = concat_and_hash2( + &hash(constant.name.as_bytes()), + &get_type_hash(registry, constant.ty()), + ); + xor(bytes, constant_hash) + }); + let storage_bytes = match pallet.storage() { + Some(storage) => { + let prefix_hash = hash(storage.prefix().as_bytes()); + let entries_hash = storage.entries().iter().fold([0u8; HASH_LEN], |bytes, entry| { + // We don't care what order the storage entries occur in, so XOR them together + // to make the order irrelevant. + xor(bytes, get_storage_entry_hash(registry, entry)) + }); + concat_and_hash2(&prefix_hash, &entries_hash) + }, + None => [0u8; HASH_LEN], + }; + let view_functions_bytes = get_pallet_view_functions_hash(&pallet); + + // Hash all of the above together: + concat_and_hash6( + &call_bytes, + &event_bytes, + &error_bytes, + &constant_bytes, + &storage_bytes, + &view_functions_bytes, + ) +} + +/// Obtain a hash representation of our metadata or some part of it. +/// This is obtained by calling [`crate::Metadata::hasher()`]. +pub struct MetadataHasher<'a> { + metadata: &'a Metadata, + specific_pallets: Option>, + specific_runtime_apis: Option>, + include_custom_values: bool, +} + +impl<'a> MetadataHasher<'a> { + /// Create a new [`MetadataHasher`] + pub(crate) fn new(metadata: &'a Metadata) -> Self { + Self { + metadata, + specific_pallets: None, + specific_runtime_apis: None, + include_custom_values: true, + } + } + + /// Only hash the provided pallets instead of hashing every pallet. + pub fn only_these_pallets>(&mut self, specific_pallets: &'a [S]) -> &mut Self { + self.specific_pallets = Some(specific_pallets.iter().map(|n| n.as_ref()).collect()); + self + } + + /// Only hash the provided runtime APIs instead of hashing every runtime API + pub fn only_these_runtime_apis>( + &mut self, + specific_runtime_apis: &'a [S], + ) -> &mut Self { + self.specific_runtime_apis = + Some(specific_runtime_apis.iter().map(|n| n.as_ref()).collect()); + self + } + + /// Do not hash the custom values + pub fn ignore_custom_values(&mut self) -> &mut Self { + self.include_custom_values = false; + self + } + + /// Hash the given metadata. + pub fn hash(&self) -> Hash { + let metadata = self.metadata; + + let pallet_hash = metadata.pallets().fold([0u8; HASH_LEN], |bytes, pallet| { + // If specific pallets are given, only include this pallet if it is in the specific + // pallets. + let should_hash = self + .specific_pallets + .as_ref() + .map(|specific_pallets| specific_pallets.contains(&pallet.name())) + .unwrap_or(true); + // We don't care what order the pallets are seen in, so XOR their + // hashes together to be order independent. + if should_hash { xor(bytes, get_pallet_hash(pallet)) } else { bytes } + }); + + let apis_hash = metadata.runtime_api_traits().fold([0u8; HASH_LEN], |bytes, api| { + // If specific runtime APIs are given, only include this pallet if it is in the specific + // runtime APIs. + let should_hash = self + .specific_runtime_apis + .as_ref() + .map(|specific_runtime_apis| specific_runtime_apis.contains(&api.name())) + .unwrap_or(true); + // We don't care what order the runtime APIs are seen in, so XOR their + // hashes together to be order independent. + if should_hash { xor(bytes, get_runtime_apis_hash(api)) } else { bytes } + }); + + let outer_enums_hash = concat_and_hash3( + &get_type_hash(&metadata.types, metadata.outer_enums.call_enum_ty), + &get_type_hash(&metadata.types, metadata.outer_enums.event_enum_ty), + &get_type_hash(&metadata.types, metadata.outer_enums.error_enum_ty), + ); + + let extrinsic_hash = get_extrinsic_hash(&metadata.types, &metadata.extrinsic); + + let custom_values_hash = if self.include_custom_values { + get_custom_metadata_hash(&metadata.custom()) + } else { + Default::default() + }; + + concat_and_hash5( + &pallet_hash, + &apis_hash, + &outer_enums_hash, + &extrinsic_hash, + &custom_values_hash, + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use bitvec::{order::Lsb0, vec::BitVec}; + use frame_metadata::v15; + use scale_info::{Registry, meta_type}; + + // Define recursive types. + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + struct A { + pub b: Box, + } + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + struct B { + pub a: Box, + } + + // Define TypeDef supported types. + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + // TypeDef::Composite with TypeDef::Array with Typedef::Primitive. + struct AccountId32(Hash); + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + // TypeDef::Variant. + enum DigestItem { + PreRuntime( + // TypeDef::Array with primitive. + [::core::primitive::u8; 4usize], + // TypeDef::Sequence. + ::std::vec::Vec<::core::primitive::u8>, + ), + Other(::std::vec::Vec<::core::primitive::u8>), + // Nested TypeDef::Tuple. + RuntimeEnvironmentUpdated(((i8, i16), (u32, u64))), + // TypeDef::Compact. + Index(#[codec(compact)] ::core::primitive::u8), + // TypeDef::BitSequence. + BitSeq(BitVec), + } + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + // Ensure recursive types and TypeDef variants are captured. + struct MetadataTestType { + recursive: A, + composite: AccountId32, + type_def: DigestItem, + } + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + // Simulate a PalletCallMetadata. + enum Call { + #[codec(index = 0)] + FillBlock { ratio: AccountId32 }, + #[codec(index = 1)] + Remark { remark: DigestItem }, + } + + fn build_default_extrinsic() -> v15::ExtrinsicMetadata { + v15::ExtrinsicMetadata { + version: 0, + signed_extensions: vec![], + address_ty: meta_type::<()>(), + call_ty: meta_type::<()>(), + signature_ty: meta_type::<()>(), + extra_ty: meta_type::<()>(), + } + } + + fn default_pallet() -> v15::PalletMetadata { + v15::PalletMetadata { + name: "Test", + storage: None, + calls: None, + event: None, + constants: vec![], + error: None, + index: 0, + docs: vec![], + } + } + + fn build_default_pallets() -> Vec { + vec![ + v15::PalletMetadata { + name: "First", + calls: Some(v15::PalletCallMetadata { ty: meta_type::() }), + ..default_pallet() + }, + v15::PalletMetadata { + name: "Second", + index: 1, + calls: Some(v15::PalletCallMetadata { + ty: meta_type::<(DigestItem, AccountId32, A)>(), + }), + ..default_pallet() + }, + ] + } + + fn pallets_to_metadata(pallets: Vec) -> Metadata { + v15::RuntimeMetadataV15::new( + pallets, + build_default_extrinsic(), + meta_type::<()>(), + vec![], + v15::OuterEnums { + call_enum_ty: meta_type::<()>(), + event_enum_ty: meta_type::<()>(), + error_enum_ty: meta_type::<()>(), + }, + v15::CustomMetadata { map: Default::default() }, + ) + .try_into() + .expect("can build valid metadata") + } + + #[test] + fn different_pallet_index() { + let pallets = build_default_pallets(); + let mut pallets_swap = pallets.clone(); + + let metadata = pallets_to_metadata(pallets); + + // Change the order in which pallets are registered. + pallets_swap.swap(0, 1); + pallets_swap[0].index = 0; + pallets_swap[1].index = 1; + let metadata_swap = pallets_to_metadata(pallets_swap); + + let hash = MetadataHasher::new(&metadata).hash(); + let hash_swap = MetadataHasher::new(&metadata_swap).hash(); + + // Changing pallet order must still result in a deterministic unique hash. + assert_eq!(hash, hash_swap); + } + + #[test] + fn recursive_type() { + let mut pallet = default_pallet(); + pallet.calls = Some(v15::PalletCallMetadata { ty: meta_type::() }); + let metadata = pallets_to_metadata(vec![pallet]); + + // Check hashing algorithm finishes on a recursive type. + MetadataHasher::new(&metadata).hash(); + } + + #[test] + /// Ensure correctness of hashing when parsing the `metadata.types`. + /// + /// Having a recursive structure `A: { B }` and `B: { A }` registered in different order + /// `types: { { id: 0, A }, { id: 1, B } }` and `types: { { id: 0, B }, { id: 1, A } }` + /// must produce the same deterministic hashing value. + fn recursive_types_different_order() { + let mut pallets = build_default_pallets(); + pallets[0].calls = Some(v15::PalletCallMetadata { ty: meta_type::() }); + pallets[1].calls = Some(v15::PalletCallMetadata { ty: meta_type::() }); + pallets[1].index = 1; + let mut pallets_swap = pallets.clone(); + let metadata = pallets_to_metadata(pallets); + + pallets_swap.swap(0, 1); + pallets_swap[0].index = 0; + pallets_swap[1].index = 1; + let metadata_swap = pallets_to_metadata(pallets_swap); + + let hash = MetadataHasher::new(&metadata).hash(); + let hash_swap = MetadataHasher::new(&metadata_swap).hash(); + + // Changing pallet order must still result in a deterministic unique hash. + assert_eq!(hash, hash_swap); + } + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + struct Aba { + ab: (A, B), + other: A, + } + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + struct Abb { + ab: (A, B), + other: B, + } + + #[test] + /// Ensure ABB and ABA have a different structure: + fn do_not_reuse_visited_type_ids() { + let metadata_hash_with_type = |ty| { + let mut pallets = build_default_pallets(); + pallets[0].calls = Some(v15::PalletCallMetadata { ty }); + let metadata = pallets_to_metadata(pallets); + MetadataHasher::new(&metadata).hash() + }; + + let aba_hash = metadata_hash_with_type(meta_type::()); + let abb_hash = metadata_hash_with_type(meta_type::()); + + assert_ne!(aba_hash, abb_hash); + } + + #[test] + fn hash_cache_gets_filled_with_correct_hashes() { + let mut registry = Registry::new(); + let a_type_id = registry.register_type(&meta_type::()).id; + let b_type_id = registry.register_type(&meta_type::()).id; + let registry: PortableRegistry = registry.into(); + + let mut cache = HashMap::new(); + + let a_hash = get_type_hash_recurse(®istry, a_type_id, &mut cache); + let a_hash2 = get_type_hash_recurse(®istry, a_type_id, &mut cache); + let b_hash = get_type_hash_recurse(®istry, b_type_id, &mut cache); + + let CachedHash::Hash(a_cache_hash) = cache[&a_type_id] else { panic!() }; + let CachedHash::Hash(b_cache_hash) = cache[&b_type_id] else { panic!() }; + + assert_eq!(a_hash, a_cache_hash); + assert_eq!(b_hash, b_cache_hash); + + assert_eq!(a_hash, a_hash2); + assert_ne!(a_hash, b_hash); + } + + #[test] + // Redundant clone clippy warning is a lie; https://github.com/rust-lang/rust-clippy/issues/10870 + #[allow(clippy::redundant_clone)] + fn pallet_hash_correctness() { + let compare_pallets_hash = |lhs: &v15::PalletMetadata, rhs: &v15::PalletMetadata| { + let metadata = pallets_to_metadata(vec![lhs.clone()]); + let hash = MetadataHasher::new(&metadata).hash(); + + let metadata = pallets_to_metadata(vec![rhs.clone()]); + let new_hash = MetadataHasher::new(&metadata).hash(); + + assert_ne!(hash, new_hash); + }; + + // Build metadata progressively from an empty pallet to a fully populated pallet. + let mut pallet = default_pallet(); + let pallet_lhs = pallet.clone(); + pallet.storage = Some(v15::PalletStorageMetadata { + prefix: "Storage", + entries: vec![v15::StorageEntryMetadata { + name: "BlockWeight", + modifier: v15::StorageEntryModifier::Default, + ty: v15::StorageEntryType::Plain(meta_type::()), + default: vec![], + docs: vec![], + }], + }); + compare_pallets_hash(&pallet_lhs, &pallet); + + let pallet_lhs = pallet.clone(); + // Calls are similar to: + // + // ``` + // pub enum Call { + // call_name_01 { arg01: type }, + // call_name_02 { arg01: type, arg02: type } + // } + // ``` + pallet.calls = Some(v15::PalletCallMetadata { ty: meta_type::() }); + compare_pallets_hash(&pallet_lhs, &pallet); + + let pallet_lhs = pallet.clone(); + // Events are similar to Calls. + pallet.event = Some(v15::PalletEventMetadata { ty: meta_type::() }); + compare_pallets_hash(&pallet_lhs, &pallet); + + let pallet_lhs = pallet.clone(); + pallet.constants = vec![v15::PalletConstantMetadata { + name: "BlockHashCount", + ty: meta_type::(), + value: vec![96u8, 0, 0, 0], + docs: vec![], + }]; + compare_pallets_hash(&pallet_lhs, &pallet); + + let pallet_lhs = pallet.clone(); + pallet.error = Some(v15::PalletErrorMetadata { ty: meta_type::() }); + compare_pallets_hash(&pallet_lhs, &pallet); + } + + #[test] + fn metadata_per_pallet_hash_correctness() { + let pallets = build_default_pallets(); + + // Build metadata with just the first pallet. + let metadata_one = pallets_to_metadata(vec![pallets[0].clone()]); + // Build metadata with both pallets. + let metadata_both = pallets_to_metadata(pallets); + + // Hashing will ignore any non-existent pallet and return the same result. + let hash = MetadataHasher::new(&metadata_one) + .only_these_pallets(&["First", "Second"]) + .hash(); + let hash_rhs = MetadataHasher::new(&metadata_one).only_these_pallets(&["First"]).hash(); + assert_eq!(hash, hash_rhs, "hashing should ignore non-existent pallets"); + + // Hashing one pallet from metadata with 2 pallets inserted will ignore the second pallet. + let hash_second = MetadataHasher::new(&metadata_both).only_these_pallets(&["First"]).hash(); + assert_eq!(hash_second, hash, "hashing one pallet should ignore the others"); + + // Check hashing with all pallets. + let hash_second = MetadataHasher::new(&metadata_both) + .only_these_pallets(&["First", "Second"]) + .hash(); + assert_ne!( + hash_second, hash, + "hashing both pallets should produce a different result from hashing just one pallet" + ); + } + + #[test] + fn field_semantic_changes() { + // Get a hash representation of the provided meta type, + // inserted in the context of pallet metadata call. + let to_hash = |meta_ty| { + let pallet = v15::PalletMetadata { + calls: Some(v15::PalletCallMetadata { ty: meta_ty }), + ..default_pallet() + }; + let metadata = pallets_to_metadata(vec![pallet]); + MetadataHasher::new(&metadata).hash() + }; + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + enum EnumA1 { + First { hi: u8, bye: String }, + Second(u32), + Third, + } + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + enum EnumA2 { + Second(u32), + Third, + First { bye: String, hi: u8 }, + } + + // EncodeAsType and DecodeAsType only care about enum variant names + // and not indexes or field ordering or the enum name itself.. + assert_eq!(to_hash(meta_type::()), to_hash(meta_type::())); + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + struct StructB1 { + hello: bool, + another: [u8; 32], + } + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + struct StructB2 { + another: [u8; 32], + hello: bool, + } + + // As with enums, struct names and field orders are irrelevant as long as + // the field names and types are the same. + assert_eq!(to_hash(meta_type::()), to_hash(meta_type::())); + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + enum EnumC1 { + First(u8), + } + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + enum EnumC2 { + Second(u8), + } + + // The enums are binary compatible, but the variants have different names, so + // semantically they are different and should not be equal. + assert_ne!(to_hash(meta_type::()), to_hash(meta_type::())); + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + enum EnumD1 { + First { a: u8 }, + } + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + enum EnumD2 { + First { b: u8 }, + } + + // Named fields contain a different semantic meaning ('a' and 'b') despite + // being binary compatible, so hashes should be different. + assert_ne!(to_hash(meta_type::()), to_hash(meta_type::())); + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + struct StructE1 { + a: u32, + } + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + struct StructE2 { + b: u32, + } + + // Similar to enums, struct fields that contain a different semantic meaning + // ('a' and 'b') despite being binary compatible will have different hashes. + assert_ne!(to_hash(meta_type::()), to_hash(meta_type::())); + } + + use frame_metadata::v15::{ + PalletEventMetadata, PalletStorageMetadata, StorageEntryMetadata, StorageEntryModifier, + }; + + fn metadata_with_pallet_events() -> v15::RuntimeMetadataV15 { + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + struct FirstEvent { + s: String, + } + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + struct SecondEvent { + n: u8, + } + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + enum Events { + First(FirstEvent), + Second(SecondEvent), + } + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + enum Errors { + First(DispatchError), + Second(DispatchError), + } + + #[allow(dead_code)] + #[derive(scale_info::TypeInfo)] + enum Calls { + First(u8), + Second(u8), + } + + #[allow(dead_code)] + enum DispatchError { + A, + B, + C, + } + + impl scale_info::TypeInfo for DispatchError { + type Identity = DispatchError; + + fn type_info() -> scale_info::Type { + scale_info::Type { + path: scale_info::Path { segments: vec!["sp_runtime", "DispatchError"] }, + type_params: vec![], + type_def: TypeDef::Variant(TypeDefVariant { variants: vec![] }), + docs: vec![], + } + } + } + + let pallets = vec![ + v15::PalletMetadata { + name: "First", + index: 0, + calls: Some(v15::PalletCallMetadata { ty: meta_type::() }), + storage: Some(PalletStorageMetadata { + prefix: "___", + entries: vec![StorageEntryMetadata { + name: "Hello", + modifier: StorageEntryModifier::Optional, + // Note: This is the important part here: + // The Events type will be trimmed down and this trimming needs to be + // reflected when the hash of this storage item is computed. + ty: frame_metadata::v14::StorageEntryType::Plain(meta_type::>()), + default: vec![], + docs: vec![], + }], + }), + event: Some(PalletEventMetadata { ty: meta_type::() }), + constants: vec![], + error: None, + docs: vec![], + }, + v15::PalletMetadata { + name: "Second", + index: 1, + calls: Some(v15::PalletCallMetadata { ty: meta_type::() }), + storage: None, + event: Some(PalletEventMetadata { ty: meta_type::() }), + constants: vec![], + error: None, + docs: vec![], + }, + ]; + + v15::RuntimeMetadataV15::new( + pallets, + build_default_extrinsic(), + meta_type::<()>(), + vec![], + v15::OuterEnums { + call_enum_ty: meta_type::(), + event_enum_ty: meta_type::(), + error_enum_ty: meta_type::(), + }, + v15::CustomMetadata { map: Default::default() }, + ) + } + + #[test] + fn hash_comparison_trimmed_metadata() { + use pezkuwi_subxt_utils_stripmetadata::StripMetadata; + + // trim the metadata: + let metadata = metadata_with_pallet_events(); + let trimmed_metadata = { + let mut m = metadata.clone(); + m.strip_metadata(|e| e == "First", |_| true); + m + }; + + // Now convert it into our inner repr: + let metadata = Metadata::try_from(metadata).unwrap(); + let trimmed_metadata = Metadata::try_from(trimmed_metadata).unwrap(); + + // test that the hashes are the same: + let hash = MetadataHasher::new(&metadata).only_these_pallets(&["First"]).hash(); + let hash_trimmed = MetadataHasher::new(&trimmed_metadata).hash(); + + assert_eq!(hash, hash_trimmed); + } +} diff --git a/vendor/pezkuwi-subxt/metadata/src/utils/variant_index.rs b/vendor/pezkuwi-subxt/metadata/src/utils/variant_index.rs new file mode 100644 index 00000000..c7aab5de --- /dev/null +++ b/vendor/pezkuwi-subxt/metadata/src/utils/variant_index.rs @@ -0,0 +1,82 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use alloc::{borrow::ToOwned, string::String}; +use hashbrown::HashMap; +use scale_info::{PortableRegistry, TypeDef, Variant, form::PortableForm}; + +/// Given some type ID and type registry, build a couple of +/// indexes to look up variants by index or name. If the ID provided +/// is not a variant, the index will be empty. +/// +/// API optimized for dealing with the `Option` variant type IDs +/// that we get in metadata pallets. +#[derive(Debug, Clone)] +pub struct VariantIndex { + by_name: HashMap, + by_index: HashMap, +} + +impl VariantIndex { + /// Build indexes from the optional variant ID. + pub fn build(variant_id: Option, types: &PortableRegistry) -> Self { + let Some(variants) = Self::get(variant_id, types) else { + return Self::empty(); + }; + + let mut by_name = HashMap::new(); + let mut by_index = HashMap::new(); + for (pos, variant) in variants.iter().enumerate() { + by_name.insert(variant.name.to_owned(), pos); + by_index.insert(variant.index, pos); + } + + Self { by_name, by_index } + } + + /// Build an empty index. + pub fn empty() -> Self { + Self { by_name: Default::default(), by_index: Default::default() } + } + + /// Get the variants we're pointing at; None if this isn't possible. + pub fn get( + variant_id: Option, + types: &PortableRegistry, + ) -> Option<&[Variant]> { + let variant_id = variant_id?; + let TypeDef::Variant(v) = &types.resolve(variant_id)?.type_def else { + return None; + }; + Some(&v.variants) + } + + /// Lookup a variant by name; `None` if the type is not a variant or name isn't found. + pub fn lookup_by_name<'a, K>( + &self, + name: &K, + variant_id: Option, + types: &'a PortableRegistry, + ) -> Option<&'a Variant> + where + String: alloc::borrow::Borrow, + K: core::hash::Hash + Eq + ?Sized, + { + let pos = *self.by_name.get(name)?; + let variants = Self::get(variant_id, types)?; + variants.get(pos) + } + + /// Lookup a variant by index; `None` if the type is not a variant or index isn't found. + pub fn lookup_by_index<'a>( + &self, + index: u8, + variant_id: Option, + types: &'a PortableRegistry, + ) -> Option<&'a Variant> { + let pos = *self.by_index.get(&index)?; + let variants = Self::get(variant_id, types)?; + variants.get(pos) + } +} diff --git a/vendor/pezkuwi-subxt/rpcs/Cargo.toml b/vendor/pezkuwi-subxt/rpcs/Cargo.toml new file mode 100644 index 00000000..7bc27259 --- /dev/null +++ b/vendor/pezkuwi-subxt/rpcs/Cargo.toml @@ -0,0 +1,101 @@ +[package] +name = "pezkuwi-subxt-rpcs" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +publish = true + +license.workspace = true +readme = "README.md" +repository.workspace = true +documentation.workspace = true +homepage.workspace = true +description = "Make RPC calls to Bizinikiwi based nodes" +keywords = ["parity", "rpcs", "subxt"] + +[features] +default = ["jsonrpsee", "native"] + +subxt = ["dep:pezkuwi-subxt-core"] +jsonrpsee = ["dep:jsonrpsee", "dep:tokio-util"] + +unstable-light-client = ["dep:pezkuwi-subxt-lightclient"] + +reconnecting-rpc-client = [ + "dep:finito", + "dep:tokio", + "jsonrpsee", + "tokio/sync", +] + +mock-rpc-client = [ + "dep:tokio", + "tokio/sync", +] + +# Enable this for native (ie non web/wasm builds). +# Exactly 1 of "web" and "native" is expected. +native = [ + "jsonrpsee?/async-client", + "jsonrpsee?/client-ws-transport-tls", + "jsonrpsee?/ws-client", + "pezkuwi-subxt-lightclient?/native", +] + +# Enable this for web/wasm builds. +# Exactly 1 of "web" and "native" is expected. +web = [ + "dep:wasm-bindgen-futures", + "finito?/wasm-bindgen", + "getrandom/js", + "jsonrpsee?/async-wasm-client", + "jsonrpsee?/client-web-transport", + "jsonrpsee?/wasm-client", + "pezkuwi-subxt-lightclient?/web", +] + +[dependencies] +codec = { workspace = true } +derive-where = { workspace = true } +frame-metadata = { workspace = true, features = ["decode"] } +futures = { workspace = true } +getrandom = { workspace = true, optional = true } +hex = { workspace = true } +impl-serde = { workspace = true } +primitive-types = { workspace = true, features = ["serde"] } +serde = { workspace = true } +serde_json = { workspace = true, features = ["default", "raw_value"] } +thiserror = { workspace = true } +tracing = { workspace = true } +url = { workspace = true } + +# Included with the jsonrpsee feature +jsonrpsee = { workspace = true, optional = true } +tokio-util = { workspace = true, features = ["compat"], optional = true } + +# Included with the reconnecting-rpc-client feature +finito = { workspace = true, optional = true } +tokio = { workspace = true, optional = true } + +# Included with the unstable-light-client feature +pezkuwi-subxt-lightclient = { workspace = true, optional = true, default-features = false } + +# Included with the pezkuwi-subxt-core feature to impl Config for RpcConfig +pezkuwi-subxt-core = { workspace = true, optional = true } + +# Included with WASM feature +wasm-bindgen-futures = { workspace = true, optional = true } + +[dev-dependencies] +http-body = { workspace = true } +hyper = { workspace = true } +jsonrpsee = { workspace = true, features = ["server"] } +tower = { workspace = true } + +[package.metadata.docs.rs] +default-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/vendor/pezkuwi-subxt/rpcs/README.md b/vendor/pezkuwi-subxt/rpcs/README.md new file mode 100644 index 00000000..ca9caea8 --- /dev/null +++ b/vendor/pezkuwi-subxt/rpcs/README.md @@ -0,0 +1,18 @@ +# subxt-rpcs + +This crate provides an interface for interacting with Bizinikiwi nodes via the available RPC methods. + +```rust +use subxt_rpcs::{RpcClient, ChainHeadRpcMethods}; + +// Connect to a local node: +let client = RpcClient::from_url("ws://127.0.0.1:9944").await?; +// Use a set of methods, here the V2 "chainHead" ones: +let methods = ChainHeadRpcMethods::new(client); + +// Call some RPC methods (in this case a subscription): +let mut follow_subscription = methods.chainhead_v1_follow(false).await.unwrap(); +while let Some(follow_event) = follow_subscription.next().await { + // do something with events.. +} +``` \ No newline at end of file diff --git a/vendor/pezkuwi-subxt/rpcs/src/client/jsonrpsee_impl.rs b/vendor/pezkuwi-subxt/rpcs/src/client/jsonrpsee_impl.rs new file mode 100644 index 00000000..e887675a --- /dev/null +++ b/vendor/pezkuwi-subxt/rpcs/src/client/jsonrpsee_impl.rs @@ -0,0 +1,137 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::{RawRpcFuture, RawRpcSubscription, RpcClientT}; +use crate::Error; +use futures::stream::{StreamExt, TryStreamExt}; +use jsonrpsee::{ + core::{ + client::{Error as JsonrpseeError, Client, ClientT, SubscriptionClientT, SubscriptionKind}, + traits::ToRpcParams, + }, + types::SubscriptionId, +}; +use serde_json::value::RawValue; + +/// Construct a `jsonrpsee` RPC client with some sane defaults. +pub async fn client(url: &str) -> Result { + jsonrpsee_helpers::client(url).await.map_err(|e| Error::Client(Box::new(e))) +} + +struct Params(Option>); + +impl ToRpcParams for Params { + fn to_rpc_params(self) -> Result>, serde_json::Error> { + Ok(self.0) + } +} + +impl RpcClientT for Client { + fn request_raw<'a>( + &'a self, + method: &'a str, + params: Option>, + ) -> RawRpcFuture<'a, Box> { + Box::pin(async move { + let res = ClientT::request(self, method, Params(params)).await?; + Ok(res) + }) + } + + fn subscribe_raw<'a>( + &'a self, + sub: &'a str, + params: Option>, + unsub: &'a str, + ) -> RawRpcFuture<'a, RawRpcSubscription> { + Box::pin(async move { + let stream = SubscriptionClientT::subscribe::, _>( + self, + sub, + Params(params), + unsub, + ).await?; + + let id = match stream.kind() { + SubscriptionKind::Subscription(SubscriptionId::Str(id)) => { + Some(id.clone().into_owned()) + } + _ => None, + }; + + let stream = stream + .map_err(|e| Error::Client(Box::new(e))) + .boxed(); + Ok(RawRpcSubscription { stream, id }) + }) + } +} + +// Convert a JsonrpseeError into the RPC error in this crate. +// The main reason for this is to capture user errors so that +// they can be represented/handled without casting. +impl From for Error { + fn from(error: JsonrpseeError) -> Self { + match error { + JsonrpseeError::Call(e) => { + Error::User(crate::UserError { + code: e.code(), + message: e.message().to_owned(), + data: e.data().map(|d| d.to_owned()) + }) + }, + e => { + Error::Client(Box::new(e)) + } + } + } +} + +// helpers for a jsonrpsee specific RPC client. +#[cfg(all(feature = "jsonrpsee", feature = "native"))] +mod jsonrpsee_helpers { + pub use jsonrpsee::{ + client_transport::ws::{self, EitherStream, Url, WsTransportClientBuilder}, + core::client::{Client, Error}, + }; + use tokio_util::compat::Compat; + + pub type Sender = ws::Sender>; + pub type Receiver = ws::Receiver>; + + /// Build WS RPC client from URL + pub async fn client(url: &str) -> Result { + let (sender, receiver) = ws_transport(url).await?; + Ok(Client::builder() + .max_buffer_capacity_per_subscription(4096) + .build_with_tokio(sender, receiver)) + } + + async fn ws_transport(url: &str) -> Result<(Sender, Receiver), Error> { + let url = Url::parse(url).map_err(|e| Error::Transport(e.into()))?; + WsTransportClientBuilder::default() + .build(url) + .await + .map_err(|e| Error::Transport(e.into())) + } +} + +// helpers for a jsonrpsee specific RPC client. +#[cfg(all(feature = "jsonrpsee", feature = "web", target_arch = "wasm32"))] +mod jsonrpsee_helpers { + pub use jsonrpsee::{ + client_transport::web, + core::client::{Client, ClientBuilder, Error}, + }; + + /// Build web RPC client from URL + pub async fn client(url: &str) -> Result { + let (sender, receiver) = web::connect(url) + .await + .map_err(|e| Error::Transport(e.into()))?; + Ok(ClientBuilder::default() + .max_buffer_capacity_per_subscription(4096) + .build_with_wasm(sender, receiver)) + } +} \ No newline at end of file diff --git a/vendor/pezkuwi-subxt/rpcs/src/client/lightclient_impl.rs b/vendor/pezkuwi-subxt/rpcs/src/client/lightclient_impl.rs new file mode 100644 index 00000000..84862e73 --- /dev/null +++ b/vendor/pezkuwi-subxt/rpcs/src/client/lightclient_impl.rs @@ -0,0 +1,62 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::{RawRpcFuture, RawRpcSubscription, RpcClientT}; +use crate::Error; +use futures::stream::{StreamExt, TryStreamExt}; +use serde_json::value::RawValue; +use pezkuwi_subxt_lightclient::{LightClientRpc, LightClientRpcError}; + +impl RpcClientT for LightClientRpc { + fn request_raw<'a>( + &'a self, + method: &'a str, + params: Option>, + ) -> RawRpcFuture<'a, Box> { + Box::pin(async move { + let res = self.request(method.to_owned(), params) + .await?; + + Ok(res) + }) + } + + fn subscribe_raw<'a>( + &'a self, + sub: &'a str, + params: Option>, + unsub: &'a str, + ) -> RawRpcFuture<'a, RawRpcSubscription> { + Box::pin(async move { + let sub = self.subscribe(sub.to_owned(), params, unsub.to_owned()) + .await?; + + let id = Some(sub.id().to_owned()); + let stream = sub + .map_err(|e| Error::Client(Box::new(e))) + .boxed(); + + Ok(RawRpcSubscription { id, stream }) + }) + } +} + +impl From for Error { + fn from(err: LightClientRpcError) -> Error { + match err { + LightClientRpcError::JsonRpcError(e) => { + // If the error is a typical user error, report it as such, else + // just wrap the error into a ClientError. + let Ok(user_error) = e.try_deserialize() else { + return Error::Client(Box::::from(e)) + }; + Error::User(user_error) + }, + LightClientRpcError::SmoldotError(e) => Error::Client(Box::::from(e)), + LightClientRpcError::BackgroundTaskDropped => Error::Client(Box::::from("Smoldot background task was dropped")), + } + } +} + +type CoreError = dyn core::error::Error + Send + Sync + 'static; \ No newline at end of file diff --git a/vendor/pezkuwi-subxt/rpcs/src/client/mock_rpc_client.rs b/vendor/pezkuwi-subxt/rpcs/src/client/mock_rpc_client.rs new file mode 100644 index 00000000..0c0223f8 --- /dev/null +++ b/vendor/pezkuwi-subxt/rpcs/src/client/mock_rpc_client.rs @@ -0,0 +1,632 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module exposes a [`MockRpcClient`], which is useful for testing. +//! +//! # Example +//! +//! ```rust +//! use pezkuwi_subxt_rpcs::client::{ RpcClient, MockRpcClient }; +//! use pezkuwi_subxt_rpcs::client::mock_rpc_client::Json; +//! +//! let mut state = vec![ +//! Json(1u8), +//! Json(2u8), +//! Json(3u8), +//! ]; +//! +//! // Define a mock client by providing some functions which intercept +//! // method and subscription calls and return some response. +//! let mock_client = MockRpcClient::builder() +//! .method_handler_once("foo", async move |params| { +//! // Return each item from our state, and then null afterwards. +//! state.pop() +//! }) +//! .subscription_handler("bar", async move |params, unsub| { +//! // Arrays, vecs or an RpcSubscription can be returned here to +//! // signal the set of values to be handed back on a subscription. +//! vec![Json(1), Json(2), Json(3)] +//! }) +//! .build(); +//! +//! // Build an RPC Client that can be used in Subxt or in conjunction with +//! // the RPC methods provided in this crate. +//! let rpc_client = RpcClient::new(mock_client); +//! ``` + +use super::{RpcClientT, RawRpcFuture, RawRpcSubscription}; +use crate::{Error, UserError}; +use core::future::Future; +use futures::StreamExt; +use serde_json::value::RawValue; +use std::sync::{Arc, Mutex}; +use std::collections::{HashMap, VecDeque}; + +type MethodHandlerFnOnce = Box>) -> RawRpcFuture<'static, Box> + Send + Sync + 'static>; +type SubscriptionHandlerFnOnce = Box>, &str) -> RawRpcFuture<'static, RawRpcSubscription> + Send + Sync + 'static>; + +type MethodHandlerFn = Box>) -> RawRpcFuture<'static, Box> + Send + Sync + 'static>; +type SubscriptionHandlerFn = Box>, &str) -> RawRpcFuture<'static, RawRpcSubscription> + Send + Sync + 'static>; + +/// A builder to configure and build a new [`MockRpcClient`]. +#[derive(Default)] +pub struct MockRpcClientBuilder { + method_handlers_once: HashMap>, + method_handlers: HashMap, + method_fallback: Option, + subscription_handlers_once: HashMap>, + subscription_handlers: HashMap, + subscription_fallback: Option +} + +impl MockRpcClientBuilder { + /// Add a handler for a specific RPC method. This is called exactly once, and multiple such calls for the same method can be + /// added. Only when any calls registered with this have been used up is the method set by [`Self::method_handler`] called. + pub fn method_handler_once(mut self, name: impl Into, f: MethodHandler) -> Self + where + MethodHandler: FnOnce(Option>) -> MFut + Send + Sync + 'static, + MFut: Future + Send + 'static, + MRes: IntoHandlerResponse, + { + let handler: MethodHandlerFnOnce = Box::new(move |_method: &str, params: Option>| { + let fut = f(params); + Box::pin(async move { fut.await.into_handler_response() }) + }); + self.method_handlers_once.entry(name.into()).or_default().push_back(handler); + self + } + + /// Add a handler for a specific RPC method. + pub fn method_handler(mut self, name: impl Into, mut f: MethodHandler) -> Self + where + MethodHandler: FnMut(Option>) -> MFut + Send + Sync + 'static, + MFut: Future + Send + 'static, + MRes: IntoHandlerResponse, + { + let handler: MethodHandlerFn = Box::new(move |_method: &str, params: Option>| { + let fut = f(params); + Box::pin(async move { fut.await.into_handler_response() }) + }); + self.method_handlers.insert(name.into(), handler); + self + } + + /// Add a fallback handler to handle any methods not handled by a specific handler. + pub fn method_fallback(mut self, mut f: MethodHandler) -> Self + where + MethodHandler: FnMut(String, Option>) -> MFut + Send + Sync + 'static, + MFut: Future + Send + 'static, + MRes: IntoHandlerResponse, + { + let handler: MethodHandlerFn = Box::new(move |method: &str, params: Option>| { + let fut = f(method.to_owned(), params); + Box::pin(async move { fut.await.into_handler_response() }) + }); + self.method_fallback = Some(handler); + self + } + + /// Add a handler for a specific RPC subscription. + pub fn subscription_handler_once(mut self, name: impl Into, f: SubscriptionHandler) -> Self + where + SubscriptionHandler: FnOnce(Option>, String) -> SFut + Send + Sync + 'static, + SFut: Future + Send + 'static, + SRes: IntoSubscriptionResponse, + { + let handler: SubscriptionHandlerFnOnce = Box::new(move |_sub: &str, params: Option>, unsub: &str| { + let fut = f(params, unsub.to_owned()); + Box::pin(async move { fut.await.into_subscription_response() }) + }); + self.subscription_handlers_once.entry(name.into()).or_default().push_back(handler); + self + } + + /// Add a handler for a specific RPC subscription. + pub fn subscription_handler(mut self, name: impl Into, mut f: SubscriptionHandler) -> Self + where + SubscriptionHandler: FnMut(Option>, String) -> SFut + Send + Sync + 'static, + SFut: Future + Send + 'static, + SRes: IntoSubscriptionResponse, + { + let handler: SubscriptionHandlerFn = Box::new(move |_sub: &str, params: Option>, unsub: &str| { + let fut = f(params, unsub.to_owned()); + Box::pin(async move { fut.await.into_subscription_response() }) + }); + self.subscription_handlers.insert(name.into(), handler); + self + } + + /// Add a fallback handler to handle any subscriptions not handled by a specific handler. + pub fn subscription_fallback(mut self, mut f: SubscriptionHandler) -> Self + where + SubscriptionHandler: FnMut(String, Option>, String) -> SFut + Send + Sync + 'static, + SFut: Future + Send + 'static, + SRes: IntoSubscriptionResponse, + { + let handler: SubscriptionHandlerFn = Box::new(move |sub: &str, params: Option>, unsub: &str| { + let fut = f(sub.to_owned(), params, unsub.to_owned()); + Box::pin(async move { fut.await.into_subscription_response() }) + }); + self.subscription_fallback = Some(handler); + self + } + + /// Construct a [`MockRpcClient`] given some state which will be mutably available to each of the handlers. + pub fn build(self) -> MockRpcClient { + MockRpcClient { + method_handlers_once: Arc::new(Mutex::new(self.method_handlers_once)), + method_handlers: Arc::new(Mutex::new(self.method_handlers)), + method_fallback: self.method_fallback.map(|f| Arc::new(Mutex::new(f))), + subscription_handlers_once: Arc::new(Mutex::new(self.subscription_handlers_once)), + subscription_handlers: Arc::new(Mutex::new(self.subscription_handlers)), + subscription_fallback: self.subscription_fallback.map(|f| Arc::new(Mutex::new(f))), + } + } +} + +/// A mock RPC client that responds programmatically to requests. +/// Useful for testing. +#[derive(Clone)] +pub struct MockRpcClient { + // These are all accessed for just long enough to call the method. The method + // returns a future, but the method call itself isn't held for long. + method_handlers_once: Arc>>>, + method_handlers: Arc>>, + method_fallback: Option>>, + subscription_handlers_once: Arc>>>, + subscription_handlers: Arc>>, + subscription_fallback: Option>>, +} + +impl MockRpcClient { + /// Construct a new [`MockRpcClient`] + pub fn builder() -> MockRpcClientBuilder { + MockRpcClientBuilder::default() + } +} + +impl RpcClientT for MockRpcClient { + fn request_raw<'a>( + &'a self, + method: &'a str, + params: Option>, + ) -> RawRpcFuture<'a, Box> { + // Remove and call a one-time handler if any exist. + let mut handlers_once = self.method_handlers_once.lock().unwrap(); + if let Some(handlers) = handlers_once.get_mut(method) { + if let Some(handler) = handlers.pop_front() { + return handler(method, params) + } + } + drop(handlers_once); + + // Call a specific handler for the method if one is found. + let mut handlers = self.method_handlers.lock().unwrap(); + if let Some(handler) = handlers.get_mut(method) { + return handler(method, params) + } + drop(handlers); + + // Call a fallback handler if one exists + if let Some(handler) = &self.method_fallback { + let mut handler = handler.lock().unwrap(); + return handler(method, params) + } + + // Else, method not found. + Box::pin(async move { Err(UserError::method_not_found().into()) }) + } + fn subscribe_raw<'a>( + &'a self, + sub: &'a str, + params: Option>, + unsub: &'a str, + ) -> RawRpcFuture<'a, RawRpcSubscription> { + // Remove and call a one-time handler if any exist. + let mut handlers_once = self.subscription_handlers_once.lock().unwrap(); + if let Some(handlers) = handlers_once.get_mut(sub) { + if let Some(handler) = handlers.pop_front() { + return handler(sub, params, unsub) + } + } + drop(handlers_once); + + // Call a specific handler for the subscriptions if one is found. + let mut handlers = self.subscription_handlers.lock().unwrap(); + if let Some(handler) = handlers.get_mut(sub) { + return handler(sub, params, unsub) + } + drop(handlers); + + // Call a fallback handler if one exists + if let Some(handler) = &self.subscription_fallback { + let mut handler = handler.lock().unwrap(); + return handler(sub, params, unsub) + } + + // Else, method not found. + Box::pin(async move { Err(UserError::method_not_found().into()) }) + } +} + +/// Return responses wrapped in this to have them serialized to JSON. +pub struct Json(pub T); + +impl Json { + /// Create a [`Json`] from some serializable value. + /// Useful when value types are heterogeneous. + pub fn value_of(item: T) -> Self { + Json(serde_json::to_value(item).expect("item cannot be converted to a serde_json::Value")) + } +} + +/// Anything that can be converted into a valid handler response implements this. +pub trait IntoHandlerResponse { + /// Convert self into a handler response. + fn into_handler_response(self) -> Result, Error>; +} + +impl IntoHandlerResponse for Result { + fn into_handler_response(self) -> Result, Error> { + self.and_then(|val| val.into_handler_response()) + } +} + +impl IntoHandlerResponse for Option { + fn into_handler_response(self) -> Result, Error> { + self.ok_or_else(|| UserError::method_not_found().into()) + .and_then(|val| val.into_handler_response()) + } +} + +impl IntoHandlerResponse for Box { + fn into_handler_response(self) -> Result, Error> { + Ok(self) + } +} + +impl IntoHandlerResponse for serde_json::Value { + fn into_handler_response(self) -> Result, Error> { + serialize_to_raw_value(&self) + } +} + +impl IntoHandlerResponse for Json { + fn into_handler_response(self) -> Result, Error> { + serialize_to_raw_value(&self.0) + } +} + +impl IntoHandlerResponse for core::convert::Infallible { + fn into_handler_response(self) -> Result, Error> { + match self {} + } +} + +fn serialize_to_raw_value(val: &T) -> Result, Error> { + let res = serde_json::to_string(val).map_err(Error::Deserialization)?; + let raw_value = RawValue::from_string(res).map_err(Error::Deserialization)?; + Ok(raw_value) +} + +/// Anything that can be a response to a subscription handler implements this. +pub trait IntoSubscriptionResponse { + /// Convert self into a handler response. + fn into_subscription_response(self) -> Result; +} + +// A tuple of a subscription plus some string is treated as a subscription with that string ID. +impl > IntoSubscriptionResponse for (T, S) { + fn into_subscription_response(self) -> Result { + self.0 + .into_subscription_response() + .map(|mut r| { + r.id = Some(self.1.into()); + r + }) + } +} + +impl IntoSubscriptionResponse for tokio::sync::mpsc::Receiver { + fn into_subscription_response(self) -> Result { + struct IntoStream(tokio::sync::mpsc::Receiver); + impl futures::Stream for IntoStream { + type Item = T; + fn poll_next(mut self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> std::task::Poll> { + self.0.poll_recv(cx) + } + } + + Ok(RawRpcSubscription { + stream: Box::pin(IntoStream(self).map(|item| item.into_handler_response())), + id: None, + }) + } +} +impl IntoSubscriptionResponse for tokio::sync::mpsc::UnboundedReceiver { + fn into_subscription_response(self) -> Result { + struct IntoStream(tokio::sync::mpsc::UnboundedReceiver); + impl futures::Stream for IntoStream { + type Item = T; + fn poll_next(mut self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> std::task::Poll> { + self.0.poll_recv(cx) + } + } + + Ok(RawRpcSubscription { + stream: Box::pin(IntoStream(self).map(|item| item.into_handler_response())), + id: None, + }) + } +} + +impl IntoSubscriptionResponse for RawRpcSubscription { + fn into_subscription_response(self) -> Result { + Ok(self) + } +} + +impl IntoSubscriptionResponse for Result { + fn into_subscription_response(self) -> Result { + self.and_then(|res| res.into_subscription_response()) + } +} + +impl IntoSubscriptionResponse for Vec { + fn into_subscription_response(self) -> Result { + let iter = self.into_iter().map(|item| item.into_handler_response()); + Ok(RawRpcSubscription { + stream: Box::pin(futures::stream::iter(iter)), + id: None, + }) + } +} + +impl IntoSubscriptionResponse for Option { + fn into_subscription_response(self) -> Result { + match self { + Some(sub) => { + sub.into_subscription_response() + }, + None => { + Ok(RawRpcSubscription { + stream: Box::pin(futures::stream::empty()), + id: None, + }) + } + } + } +} + +impl IntoSubscriptionResponse for [T; N] { + fn into_subscription_response(self) -> Result { + let iter = self.into_iter().map(|item| item.into_handler_response()); + Ok(RawRpcSubscription { + stream: Box::pin(futures::stream::iter(iter)), + id: None, + }) + } +} + +impl IntoSubscriptionResponse for core::convert::Infallible { + fn into_subscription_response(self) -> Result { + match self {} + } +} + +/// Send the first items and then the second items back on a subscription; +/// If any one of the responses is an error, we'll return the error. +/// If one response has an ID and the other doesn't, we'll use that ID. +pub struct AndThen(pub A, pub B); + +impl IntoSubscriptionResponse for AndThen { + fn into_subscription_response(self) -> Result { + let a_responses = self.0.into_subscription_response(); + let b_responses = self.1.into_subscription_response(); + + match (a_responses, b_responses) { + (Err(a), _) => { + Err(a) + }, + (_, Err(b)) => { + Err(b) + }, + (Ok(mut a), Ok(b)) => { + a.stream = Box::pin(a.stream.chain(b.stream)); + a.id = a.id.or(b.id); + Ok(a) + } + } + } +} + +/// Send back either one response or the other. +pub enum Either { + /// The first possibility. + A(A), + /// The second possibility. + B(B) +} + +impl IntoHandlerResponse for Either { + fn into_handler_response(self) -> Result, Error> { + match self { + Either::A(a) => a.into_handler_response(), + Either::B(b) => b.into_handler_response(), + } + } +} + +impl IntoSubscriptionResponse for Either { + fn into_subscription_response(self) -> Result { + match self { + Either::A(a) => a.into_subscription_response(), + Either::B(b) => b.into_subscription_response(), + } + } +} + + +#[cfg(test)] +mod test { + use crate::{RpcClient, rpc_params}; + use super::*; + + #[tokio::test] + async fn test_method_params() { + let rpc_client = MockRpcClient::builder() + .method_handler("foo", async |params| { + Json(params) + }) + .build(); + + let rpc_client = RpcClient::new(rpc_client); + + // We get back whatever params we give + let res: (i32,i32,i32) = rpc_client.request("foo", rpc_params![1, 2, 3]).await.unwrap(); + assert_eq!(res, (1,2,3)); + + let res: (String,) = rpc_client.request("foo", rpc_params!["hello"]).await.unwrap(); + assert_eq!(res, ("hello".to_owned(),)); + } + + #[tokio::test] + async fn test_method_handler_then_fallback() { + let rpc_client = MockRpcClient::builder() + .method_handler("foo", async |_params| { + Json(1) + }) + .method_fallback(async |name, _params| { + Json(name) + }) + .build(); + + let rpc_client = RpcClient::new(rpc_client); + + // Whenever we call "foo", we get 1 back. + for i in [1,1,1,1] { + let res: i32 = rpc_client.request("foo", rpc_params![]).await.unwrap(); + assert_eq!(res, i); + } + + // Whenever we call anything else, we get the name of the method back + for name in ["bar", "wibble", "steve"] { + let res: String = rpc_client.request(name, rpc_params![]).await.unwrap(); + assert_eq!(res, name); + } + } + + #[tokio::test] + async fn test_method_once_then_handler() { + let rpc_client = MockRpcClient::builder() + .method_handler_once("foo", async |_params| { + Json(1) + }) + .method_handler("foo", async |_params| { + Json(2) + }) + .build(); + + let rpc_client = RpcClient::new(rpc_client); + + // Check that we call the "once" one time and then the second after that. + for i in [1,2,2,2,2] { + let res: i32 = rpc_client.request("foo", rpc_params![]).await.unwrap(); + assert_eq!(res, i); + } + } + + #[tokio::test] + async fn test_method_once() { + let rpc_client = MockRpcClient::builder() + .method_handler_once("foo", async |_params| { + Json(1) + }) + .method_handler_once("foo", async |_params| { + Json(2) + }) + .method_handler_once("foo", async |_params| { + Json(3) + }) + .build(); + + let rpc_client = RpcClient::new(rpc_client); + + // Check that each method is only called once, in the right order. + for i in [1,2,3] { + let res: i32 = rpc_client.request("foo", rpc_params![]).await.unwrap(); + assert_eq!(res, i); + } + + // Check that we get a "method not found" error afterwards. + let err = rpc_client.request::("foo", rpc_params![]).await.unwrap_err(); + let not_found_code = UserError::method_not_found().code; + assert!(matches!(err, Error::User(u) if u.code == not_found_code)); + } + + #[tokio::test] + async fn test_subscription_once_then_handler_then_fallback() { + let rpc_client = MockRpcClient::builder() + .subscription_handler_once("foo", async |_params, _unsub| { + vec![Json(0), Json(0)] + }) + .subscription_handler("foo", async |_params, _unsub| { + vec![Json(1), Json(2), Json(3)] + }) + .subscription_fallback(async |_name, _params, _unsub| { + vec![Json(4)] + }) + .build(); + + let rpc_client = RpcClient::new(rpc_client); + + // "foo" returns 0,0 the first time it's subscribed to + let sub = rpc_client.subscribe::("foo", rpc_params![], "unsub").await.unwrap(); + let res: Vec = sub.map(|i| i.unwrap()).collect().await; + assert_eq!(res, vec![0,0]); + + // then, "foo" returns 1,2,3 in subscription every other time + for _ in 1..5 { + let sub = rpc_client.subscribe::("foo", rpc_params![], "unsub").await.unwrap(); + let res: Vec = sub.map(|i| i.unwrap()).collect().await; + assert_eq!(res, vec![1,2,3]); + } + + // anything else returns 4 + let sub = rpc_client.subscribe::("bar", rpc_params![], "unsub").await.unwrap(); + let res: Vec = sub.map(|i| i.unwrap()).collect().await; + assert_eq!(res, vec![4]); + } + + #[tokio::test] + async fn test_subscription_and_then_with_channel() { + let (tx, rx) = tokio::sync::mpsc::channel(10); + + let rpc_client = MockRpcClient::builder() + .subscription_handler_once("foo", async move |_params, _unsub| { + AndThen( + // These should be sent first.. + vec![Json(1), Json(2), Json(3)], + // .. and then anything the channel is handing back. + rx + ) + }) + .build(); + + let rpc_client = RpcClient::new(rpc_client); + + // Send a few values down the channel to be handed back in "foo" subscription: + tokio::spawn(async move { + for i in 4..=6 { + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + tx.send(Json(i)).await.unwrap(); + } + }); + + // Expect all values back: + let sub = rpc_client.subscribe::("foo", rpc_params![], "unsub").await.unwrap(); + let res: Vec = sub.map(|i| i.unwrap()).collect().await; + assert_eq!(res, vec![1,2,3,4,5,6]); + } +} \ No newline at end of file diff --git a/vendor/pezkuwi-subxt/rpcs/src/client/mod.rs b/vendor/pezkuwi-subxt/rpcs/src/client/mod.rs new file mode 100644 index 00000000..89ffd6c7 --- /dev/null +++ b/vendor/pezkuwi-subxt/rpcs/src/client/mod.rs @@ -0,0 +1,55 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! RPC types and client for interacting with a bizinikiwi node. +//! +//! An RPC client is instantiated and then used to create some methods, for instance +//! [`crate::methods::ChainHeadRpcMethods`], which defines the calls that can be made with it. +//! The core RPC client bits are: +//! +//! - [`RpcClientT`] is the underlying dynamic RPC implementation. This provides the low level +//! [`RpcClientT::request_raw`] and [`RpcClientT::subscribe_raw`] methods. +//! - [`RpcClient`] is the higher level wrapper around this, offering the [`RpcClient::request`] and +//! [`RpcClient::subscribe`] methods. +//! +//! We then expose implementations here (depending on which features are enabled) +//! which implement [`RpcClientT`] and can therefore be used to construct [`RpcClient`]s. +//! +//! - **jsonrpsee**: Enable an RPC client based on `jsonrpsee`. +//! - **unstable-light-client**: Enable an RPC client which uses the Smoldot light client under the +//! hood to communicate with the network of choice. +//! - **reconnecting-rpc-client**: Enable an RPC client based on `jsonrpsee` which handles +//! reconnecting automatically in the event of network issues. +//! - **mock-rpc-client**: Enable a mock RPC client that can be used in tests. + +crate::macros::cfg_jsonrpsee! { + mod jsonrpsee_impl; + pub use jsonrpsee::core::client::Client as JsonrpseeRpcClient; + pub use jsonrpsee_impl::client as jsonrpsee_client; +} + +crate::macros::cfg_unstable_light_client! { + mod lightclient_impl; + pub use pezkuwi_subxt_lightclient::LightClientRpc as LightClientRpcClient; + pub use pezkuwi_subxt_lightclient::LightClient; +} + +crate::macros::cfg_reconnecting_rpc_client! { + pub mod reconnecting_rpc_client; + pub use reconnecting_rpc_client::RpcClient as ReconnectingRpcClient; +} + +crate::macros::cfg_mock_rpc_client! { + pub mod mock_rpc_client; + pub use mock_rpc_client::MockRpcClient; +} + +pub mod round_robin_rpc_client; +pub use round_robin_rpc_client::RoundRobinRpcClient; + +mod rpc_client; +mod rpc_client_t; + +pub use rpc_client::{RpcClient, RpcParams, RpcSubscription, rpc_params}; +pub use rpc_client_t::{RawRpcFuture, RawRpcSubscription, RawValue, RpcClientT}; diff --git a/vendor/pezkuwi-subxt/rpcs/src/client/reconnecting_rpc_client/mod.rs b/vendor/pezkuwi-subxt/rpcs/src/client/reconnecting_rpc_client/mod.rs new file mode 100644 index 00000000..c8d0518d --- /dev/null +++ b/vendor/pezkuwi-subxt/rpcs/src/client/reconnecting_rpc_client/mod.rs @@ -0,0 +1,632 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! # reconnecting-jsonrpsee-ws-client +//! +//! A simple reconnecting JSON-RPC WebSocket client for subxt which +//! automatically reconnects when the connection is lost but +//! it doesn't retain subscriptions and pending method calls when it reconnects. +//! +//! The logic which action to take for individual calls and subscriptions are +//! handled by the subxt backend implementations. +//! + +mod platform; +#[cfg(test)] +mod tests; +mod utils; + +use std::{ + pin::Pin, + sync::Arc, + task::{self, Poll}, + time::Duration, +}; + +use super::{RawRpcFuture, RawRpcSubscription, RpcClientT}; +use crate::Error as SubxtRpcError; + +use finito::Retry; +use futures::{FutureExt, Stream, StreamExt, TryStreamExt}; +use jsonrpsee::core::{ + client::{ + Client as WsClient, ClientT, Subscription as RpcSubscription, SubscriptionClientT, + SubscriptionKind, + }, + traits::ToRpcParams, +}; +use platform::spawn; +use serde_json::value::RawValue; +use tokio::sync::{ + mpsc::{self, UnboundedReceiver, UnboundedSender}, + oneshot, Notify, +}; +use url::Url; +use utils::display_close_reason; + +// re-exports +pub use finito::{ExponentialBackoff, FibonacciBackoff, FixedInterval}; +pub use jsonrpsee::core::client::IdKind; +pub use jsonrpsee::{core::client::error::Error as RpcError, rpc_params, types::SubscriptionId}; + +#[cfg(feature = "native")] +pub use jsonrpsee::ws_client::{HeaderMap, PingConfig}; + +const LOG_TARGET: &str = "subxt-reconnecting-rpc-client"; + +/// Method result. +pub type MethodResult = Result, Error>; +/// Subscription result. +pub type SubscriptionResult = Result, DisconnectedWillReconnect>; + +/// The connection was closed, reconnect initiated and the subscription was dropped. +#[derive(Debug, thiserror::Error)] +#[error("The connection was closed because of `{0:?}` and reconnect initiated")] +pub struct DisconnectedWillReconnect(String); + +/// New-type pattern which implements [`ToRpcParams`] that is required by jsonrpsee. +#[derive(Debug, Clone)] +struct RpcParams(Option>); + +impl ToRpcParams for RpcParams { + fn to_rpc_params(self) -> Result>, serde_json::Error> { + Ok(self.0) + } +} + +#[derive(Debug)] +enum Op { + Call { + method: String, + params: RpcParams, + send_back: oneshot::Sender, + }, + Subscription { + subscribe_method: String, + params: RpcParams, + unsubscribe_method: String, + send_back: oneshot::Sender>, + }, +} + +/// Error that can occur when for a RPC call or subscription. +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// The client was dropped by the user. + #[error("The client was dropped")] + Dropped, + /// The connection was closed and reconnect initiated. + #[error(transparent)] + DisconnectedWillReconnect(#[from] DisconnectedWillReconnect), + /// Other rpc error. + #[error(transparent)] + RpcError(RpcError), +} + +/// Represent a single subscription. +pub struct Subscription { + id: SubscriptionId<'static>, + stream: mpsc::UnboundedReceiver, +} + +impl Subscription { + /// Returns the next notification from the stream. + /// This may return `None` if the subscription has been terminated, + /// which may happen if the channel becomes full or is dropped. + /// + /// **Note:** This has an identical signature to the [`StreamExt::next`] + /// method (and delegates to that). Import [`StreamExt`] if you'd like + /// access to other stream combinator methods. + #[allow(clippy::should_implement_trait)] + pub async fn next(&mut self) -> Option { + StreamExt::next(self).await + } + + /// Get the subscription ID. + pub fn id(&self) -> SubscriptionId<'static> { + self.id.clone() + } +} + +impl Stream for Subscription { + type Item = SubscriptionResult; + + fn poll_next( + mut self: Pin<&mut Self>, + cx: &mut task::Context<'_>, + ) -> task::Poll> { + match self.stream.poll_recv(cx) { + Poll::Ready(Some(msg)) => Poll::Ready(Some(msg)), + Poll::Ready(None) => Poll::Ready(None), + Poll::Pending => Poll::Pending, + } + } +} + +impl std::fmt::Debug for Subscription { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Subscription") + .field("id", &self.id) + .finish() + } +} + +/// JSON-RPC client that reconnects automatically and may loose +/// subscription notifications when it reconnects. +#[derive(Clone, Debug)] +pub struct RpcClient { + tx: mpsc::UnboundedSender, +} + +/// Builder for [`Client`]. +#[derive(Clone, Debug)] +pub struct RpcClientBuilder

{ + max_request_size: u32, + max_response_size: u32, + retry_policy: P, + #[cfg(feature = "native")] + ping_config: Option, + #[cfg(feature = "native")] + // web doesn't support custom headers + // https://stackoverflow.com/a/4361358/6394734 + headers: HeaderMap, + max_redirections: u32, + id_kind: IdKind, + max_log_len: u32, + max_concurrent_requests: u32, + request_timeout: Duration, + connection_timeout: Duration, +} + +impl Default for RpcClientBuilder { + fn default() -> Self { + Self { + max_request_size: 50 * 1024 * 1024, + max_response_size: 50 * 1024 * 1024, + retry_policy: ExponentialBackoff::from_millis(10).max_delay(Duration::from_secs(60)), + #[cfg(feature = "native")] + ping_config: Some(PingConfig::new()), + #[cfg(feature = "native")] + headers: HeaderMap::new(), + max_redirections: 5, + id_kind: IdKind::Number, + max_log_len: 1024, + max_concurrent_requests: 1024, + request_timeout: Duration::from_secs(60), + connection_timeout: Duration::from_secs(10), + } + } +} + +impl RpcClientBuilder { + /// Create a new builder. + pub fn new() -> Self { + Self::default() + } +} + +impl

RpcClientBuilder

+where + P: Iterator + Send + Sync + 'static + Clone, +{ + /// Configure the max request size a for websocket message. + /// + /// Default: 50MB + pub fn max_request_size(mut self, max: u32) -> Self { + self.max_request_size = max; + self + } + + /// Configure the max response size a for websocket message. + /// + /// Default: 50MB + pub fn max_response_size(mut self, max: u32) -> Self { + self.max_response_size = max; + self + } + + /// Set the max number of redirections to perform until a connection is regarded as failed. + /// + /// Default: 5 + pub fn max_redirections(mut self, redirect: u32) -> Self { + self.max_redirections = redirect; + self + } + + /// Configure how many concurrent method calls are allowed. + /// + /// Default: 1024 + pub fn max_concurrent_requests(mut self, max: u32) -> Self { + self.max_concurrent_requests = max; + self + } + + /// Configure how long until a method call is regarded as failed. + /// + /// Default: 1 minute + pub fn request_timeout(mut self, timeout: Duration) -> Self { + self.request_timeout = timeout; + self + } + + /// Set connection timeout for the WebSocket handshake + /// + /// Default: 10 seconds + pub fn connection_timeout(mut self, timeout: Duration) -> Self { + self.connection_timeout = timeout; + self + } + + /// Configure the data type of the request object ID + /// + /// Default: number + pub fn id_format(mut self, kind: IdKind) -> Self { + self.id_kind = kind; + self + } + + /// Set maximum length for logging calls and responses. + /// Logs bigger than this limit will be truncated. + /// + /// Default: 1024 + pub fn set_max_logging_length(mut self, max: u32) -> Self { + self.max_log_len = max; + self + } + + #[cfg(feature = "native")] + #[cfg_attr(docsrs, doc(cfg(feature = "native")))] + /// Configure custom headers to use in the WebSocket handshake. + pub fn set_headers(mut self, headers: HeaderMap) -> Self { + self.headers = headers; + self + } + + /// Configure which retry policy to use when a connection is lost. + /// + /// Default: Exponential backoff 10ms + pub fn retry_policy(self, retry_policy: T) -> RpcClientBuilder { + RpcClientBuilder { + max_request_size: self.max_request_size, + max_response_size: self.max_response_size, + retry_policy, + #[cfg(feature = "native")] + ping_config: self.ping_config, + #[cfg(feature = "native")] + headers: self.headers, + max_redirections: self.max_redirections, + max_log_len: self.max_log_len, + id_kind: self.id_kind, + max_concurrent_requests: self.max_concurrent_requests, + request_timeout: self.request_timeout, + connection_timeout: self.connection_timeout, + } + } + + #[cfg(feature = "native")] + #[cfg_attr(docsrs, doc(cfg(feature = "native")))] + /// Configure the WebSocket ping/pong interval. + /// + /// Default: 30 seconds. + pub fn enable_ws_ping(mut self, ping_config: PingConfig) -> Self { + self.ping_config = Some(ping_config); + self + } + + #[cfg(feature = "native")] + #[cfg_attr(docsrs, doc(cfg(feature = "native")))] + /// Disable WebSocket ping/pongs. + /// + /// Default: 30 seconds. + pub fn disable_ws_ping(mut self) -> Self { + self.ping_config = None; + self + } + + /// Build and connect to the target. + pub async fn build(self, url: impl AsRef) -> Result { + let url = Url::parse(url.as_ref()).map_err(|e| RpcError::Transport(Box::new(e)))?; + let (tx, rx) = mpsc::unbounded_channel(); + let client = Retry::new(self.retry_policy.clone(), || { + platform::ws_client(&url, &self) + }) + .await?; + + platform::spawn(background_task(client, rx, url, self)); + + Ok(RpcClient { tx }) + } +} + +impl RpcClient { + /// Create a builder. + pub fn builder() -> RpcClientBuilder { + RpcClientBuilder::new() + } + + /// Perform a JSON-RPC method call. + pub async fn request( + &self, + method: String, + params: Option>, + ) -> Result, Error> { + let (tx, rx) = oneshot::channel(); + self.tx + .send(Op::Call { + method, + params: RpcParams(params), + send_back: tx, + }) + .map_err(|_| Error::Dropped)?; + + rx.await.map_err(|_| Error::Dropped)? + } + + /// Perform a JSON-RPC subscription. + pub async fn subscribe( + &self, + subscribe_method: String, + params: Option>, + unsubscribe_method: String, + ) -> Result { + let (tx, rx) = oneshot::channel(); + self.tx + .send(Op::Subscription { + subscribe_method, + params: RpcParams(params), + unsubscribe_method, + send_back: tx, + }) + .map_err(|_| Error::Dropped)?; + rx.await.map_err(|_| Error::Dropped)? + } +} + +impl RpcClientT for RpcClient { + fn request_raw<'a>( + &'a self, + method: &'a str, + params: Option>, + ) -> RawRpcFuture<'a, Box> { + async { + self.request(method.to_string(), params) + .await + .map_err(error_to_rpc_error) + } + .boxed() + } + + fn subscribe_raw<'a>( + &'a self, + sub: &'a str, + params: Option>, + unsub: &'a str, + ) -> RawRpcFuture<'a, RawRpcSubscription> { + async { + let sub = self + .subscribe(sub.to_string(), params, unsub.to_string()) + .await + .map_err(error_to_rpc_error)?; + + let id = match sub.id() { + SubscriptionId::Num(n) => n.to_string(), + SubscriptionId::Str(s) => s.to_string(), + }; + let stream = sub + // NOTE: The stream emits only one error `DisconnectWillReconnect if the connection was lost + // and safe to wrap it in a `SubxtRpcError::DisconnectWillReconnect` here + .map_err(|e: DisconnectedWillReconnect| { + SubxtRpcError::DisconnectedWillReconnect(e.to_string()) + }) + .boxed(); + + Ok(RawRpcSubscription { + stream, + id: Some(id), + }) + } + .boxed() + } +} + +/// Convert a reconnecting client Error into the RPC error in this crate. +/// The main reason for this is to capture user errors so that +/// they can be represented/handled without casting. +fn error_to_rpc_error(error: Error) -> SubxtRpcError { + match error { + Error::DisconnectedWillReconnect(reason) => { + SubxtRpcError::DisconnectedWillReconnect(reason.to_string()) + }, + Error::RpcError(RpcError::Call(e)) => { + SubxtRpcError::User(crate::UserError { + code: e.code(), + message: e.message().to_owned(), + data: e.data().map(|d| d.to_owned()) + }) + }, + e => { + SubxtRpcError::Client(Box::new(e)) + } + } +} + +async fn background_task

( + mut client: Arc, + mut rx: UnboundedReceiver, + url: Url, + client_builder: RpcClientBuilder

, +) where + P: Iterator + Send + 'static + Clone, +{ + let disconnect = Arc::new(tokio::sync::Notify::new()); + + loop { + tokio::select! { + // An incoming JSON-RPC call to dispatch. + next_message = rx.recv() => { + match next_message { + None => break, + Some(op) => { + spawn(dispatch_call(client.clone(), op, disconnect.clone())); + } + }; + } + // The connection was terminated and try to reconnect. + _ = client.on_disconnect() => { + let params = ReconnectParams { + url: &url, + client_builder: &client_builder, + close_reason: client.disconnect_reason().await, + }; + + client = match reconnect(params).await { + Ok(client) => client, + Err(e) => { + tracing::debug!(target: LOG_TARGET, "Failed to reconnect: {e}; terminating the connection"); + break; + } + }; + } + } + } + + disconnect.notify_waiters(); +} + +async fn dispatch_call(client: Arc, op: Op, on_disconnect: Arc) { + match op { + Op::Call { + method, + params, + send_back, + } => { + match client.request::, _>(&method, params).await { + Ok(rp) => { + // Fails only if the request is dropped by the client. + let _ = send_back.send(Ok(rp)); + } + Err(RpcError::RestartNeeded(e)) => { + // Fails only if the request is dropped by the client. + let _ = send_back.send(Err(DisconnectedWillReconnect(e.to_string()).into())); + } + Err(e) => { + // Fails only if the request is dropped by the client. + let _ = send_back.send(Err(Error::RpcError(e))); + } + } + } + Op::Subscription { + subscribe_method, + params, + unsubscribe_method, + send_back, + } => { + match client + .subscribe::, _>( + &subscribe_method, + params.clone(), + &unsubscribe_method, + ) + .await + { + Ok(sub) => { + let (tx, rx) = mpsc::unbounded_channel(); + let sub_id = match sub.kind() { + SubscriptionKind::Subscription(id) => id.clone().into_owned(), + _ => unreachable!("No method subscriptions possible in this crate; qed"), + }; + + platform::spawn(subscription_handler( + tx.clone(), + sub, + on_disconnect.clone(), + client.clone(), + )); + + let stream = Subscription { + id: sub_id, + stream: rx, + }; + + // Fails only if the request is dropped by the client. + let _ = send_back.send(Ok(stream)); + } + Err(RpcError::RestartNeeded(e)) => { + // Fails only if the request is dropped by the client. + let _ = send_back.send(Err(DisconnectedWillReconnect(e.to_string()).into())); + } + Err(e) => { + // Fails only if the request is dropped. + let _ = send_back.send(Err(Error::RpcError(e))); + } + } + } + } +} + +/// Handler for each individual subscription. +async fn subscription_handler( + sub_tx: UnboundedSender, + mut rpc_sub: RpcSubscription>, + client_closed: Arc, + client: Arc, +) { + loop { + tokio::select! { + next_msg = rpc_sub.next() => { + let Some(notif) = next_msg else { + let close = client.disconnect_reason().await; + _ = sub_tx.send(Err(DisconnectedWillReconnect(close.to_string()))); + break; + }; + + let msg = notif.expect("RawValue is valid JSON; qed"); + + // Fails only if subscription was closed by the user. + if sub_tx.send(Ok(msg)).is_err() { + break; + } + } + // This channel indices whether the subscription was closed by user. + _ = sub_tx.closed() => { + break; + } + // This channel indicates whether the main task has been closed. + // at this point no further messages are processed. + _ = client_closed.notified() => { + break; + } + } + } +} + +struct ReconnectParams<'a, P> { + url: &'a Url, + client_builder: &'a RpcClientBuilder

, + close_reason: RpcError, +} + +async fn reconnect

(params: ReconnectParams<'_, P>) -> Result, RpcError> +where + P: Iterator + Send + 'static + Clone, +{ + let ReconnectParams { + url, + client_builder, + close_reason, + } = params; + + let retry_policy = client_builder.retry_policy.clone(); + + tracing::debug!(target: LOG_TARGET, "Connection to {url} was closed: `{}`; starting to reconnect", display_close_reason(&close_reason)); + + let client = Retry::new(retry_policy.clone(), || { + platform::ws_client(url, client_builder) + }) + .await?; + + tracing::debug!(target: LOG_TARGET, "Connection to {url} was successfully re-established"); + + Ok(client) +} diff --git a/vendor/pezkuwi-subxt/rpcs/src/client/reconnecting_rpc_client/platform.rs b/vendor/pezkuwi-subxt/rpcs/src/client/reconnecting_rpc_client/platform.rs new file mode 100644 index 00000000..2fc9965b --- /dev/null +++ b/vendor/pezkuwi-subxt/rpcs/src/client/reconnecting_rpc_client/platform.rs @@ -0,0 +1,84 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::{RpcClientBuilder, RpcError}; +use jsonrpsee::core::client::Client; +use std::sync::Arc; +use url::Url; + +#[cfg(feature = "native")] +pub use tokio::spawn; + +#[cfg(feature = "web")] +pub use wasm_bindgen_futures::spawn_local as spawn; + +#[cfg(feature = "native")] +pub async fn ws_client

( + url: &Url, + builder: &RpcClientBuilder

, +) -> Result, RpcError> { + use jsonrpsee::ws_client::WsClientBuilder; + + let RpcClientBuilder { + max_request_size, + max_response_size, + ping_config, + headers, + max_redirections, + id_kind, + max_concurrent_requests, + max_log_len, + request_timeout, + connection_timeout, + .. + } = builder; + + let mut ws_client_builder = WsClientBuilder::new() + .max_request_size(*max_request_size) + .max_response_size(*max_response_size) + .set_headers(headers.clone()) + .max_redirections(*max_redirections as usize) + .max_buffer_capacity_per_subscription(tokio::sync::Semaphore::MAX_PERMITS) + .max_concurrent_requests(*max_concurrent_requests as usize) + .set_max_logging_length(*max_log_len) + .set_tcp_no_delay(true) + .request_timeout(*request_timeout) + .connection_timeout(*connection_timeout) + .id_format(*id_kind); + + if let Some(ping) = ping_config { + ws_client_builder = ws_client_builder.enable_ws_ping(*ping); + } + + let client = ws_client_builder.build(url.as_str()).await?; + + Ok(Arc::new(client)) +} + +#[cfg(feature = "web")] +pub async fn ws_client

( + url: &Url, + builder: &RpcClientBuilder

(), value: vec![] }, + )]), + }; + + let metadata = v15::RuntimeMetadataV15::new( + pallets, + extrinsic, + meta_type::(), + runtime_apis, + outer_enums, + custom_values, + ); + + assert_is_in_types!(A B C D E F G H I J K L M N O P => metadata.types); + + let only_first_pallet = { + let mut md = metadata.clone(); + md.strip_metadata(|name| name == "First", |_| true); + md + }; + + assert_is_in_types!(A B E F G H I J K L M N O P => only_first_pallet.types); + assert_not_in_types!(C D => only_first_pallet.types); + assert_eq!(only_first_pallet.pallets.len(), 1); + assert_eq!(&only_first_pallet.pallets[0].name, "First"); + + let only_second_pallet = { + let mut md = metadata.clone(); + md.strip_metadata(|name| name == "Second", |_| true); + md + }; + + assert_is_in_types!(C D E F G H I J K L M N O P => only_second_pallet.types); + assert_not_in_types!(A B => only_second_pallet.types); + assert_eq!(only_second_pallet.pallets.len(), 1); + assert_eq!(&only_second_pallet.pallets[0].name, "Second"); + + let no_pallets = { + let mut md = metadata.clone(); + md.strip_metadata(|_| false, |_| true); + md + }; + + assert_is_in_types!(E F G H I J K L M N O P => no_pallets.types); + assert_not_in_types!(A B C D => no_pallets.types); + assert_eq!(no_pallets.pallets.len(), 0); + + let only_second_runtime_api = { + let mut md = metadata.clone(); + md.strip_metadata(|_| true, |api| api == "AnotherApi"); + md + }; + + assert_is_in_types!(A B C D E F G H K L M N O P => only_second_runtime_api.types); + assert_not_in_types!(I J => only_second_runtime_api.types); + assert_eq!(only_second_runtime_api.pallets.len(), 2); + assert_eq!(only_second_runtime_api.apis.len(), 1); + } + + #[test] + fn v16_stripping_works() { + make_types!(A B C D E F G H I J K L M N O P); + + let pallets = vec![ + v16::PalletMetadata { + name: "First", + index: 0, + calls: None, + storage: Some(v16::PalletStorageMetadata { + prefix: "___", + entries: vec![v16::StorageEntryMetadata { + name: "Hello", + modifier: v16::StorageEntryModifier::Optional, + ty: frame_metadata::v16::StorageEntryType::Plain(meta_type::()), + default: vec![], + docs: vec![], + deprecation_info: v16::ItemDeprecationInfo::NotDeprecated, + }], + }), + event: Some(v16::PalletEventMetadata { + ty: meta_type::(), + deprecation_info: v16::EnumDeprecationInfo::nothing_deprecated(), + }), + constants: vec![], + associated_types: vec![], + view_functions: vec![], + error: None, + docs: vec![], + deprecation_info: v16::ItemDeprecationInfo::NotDeprecated, + }, + v16::PalletMetadata { + name: "Second", + index: 1, + calls: Some(v16::PalletCallMetadata { + ty: meta_type::(), + deprecation_info: v16::EnumDeprecationInfo::nothing_deprecated(), + }), + storage: None, + event: None, + constants: vec![v16::PalletConstantMetadata { + name: "SomeConstant", + ty: meta_type::(), + value: vec![], + docs: vec![], + deprecation_info: v16::ItemDeprecationInfo::NotDeprecated, + }], + associated_types: vec![v16::PalletAssociatedTypeMetadata { + name: "Hasher", + ty: meta_type::(), + docs: vec![], + }], + view_functions: vec![v16::PalletViewFunctionMetadata { + name: "some_view_function", + id: [0; 32], + inputs: vec![v16::FunctionParamMetadata { + name: "input1", + ty: meta_type::(), + }], + output: meta_type::(), + docs: vec![], + deprecation_info: v16::ItemDeprecationInfo::NotDeprecated, + }], + error: None, + docs: vec![], + deprecation_info: v16::ItemDeprecationInfo::NotDeprecated, + }, + ]; + + let extrinsic = v16::ExtrinsicMetadata { + call_ty: meta_type::(), // same as outer_enums.call_enum_ty + versions: vec![0], + transaction_extensions_by_version: BTreeMap::new(), + transaction_extensions: vec![], + address_ty: meta_type::(), + signature_ty: meta_type::(), + }; + + let runtime_apis = vec![ + v16::RuntimeApiMetadata { + name: "SomeApi", + version: Compact(2), + docs: vec![], + deprecation_info: v16::ItemDeprecationInfo::NotDeprecated, + methods: vec![v16::RuntimeApiMethodMetadata { + name: "some_method", + inputs: vec![v16::FunctionParamMetadata { + name: "input1", + ty: meta_type::(), + }], + output: meta_type::(), + docs: vec![], + deprecation_info: v16::ItemDeprecationInfo::NotDeprecated, + }], + }, + v16::RuntimeApiMetadata { + name: "AnotherApi", + version: Compact(1), + docs: vec![], + deprecation_info: v16::ItemDeprecationInfo::NotDeprecated, + methods: vec![v16::RuntimeApiMethodMetadata { + name: "another_method", + inputs: vec![v16::FunctionParamMetadata { + name: "input1", + ty: meta_type::(), + }], + output: meta_type::(), + docs: vec![], + deprecation_info: v16::ItemDeprecationInfo::NotDeprecated, + }], + }, + ]; + + let outer_enums = v16::OuterEnums { + call_enum_ty: meta_type::(), + error_enum_ty: meta_type::(), + event_enum_ty: meta_type::

(), + }; + + let custom_values = v16::CustomMetadata { + map: BTreeMap::from_iter(vec![( + "Item", + v16::CustomValueMetadata { ty: meta_type::(), value: vec![] }, + )]), + }; + + let metadata = v16::RuntimeMetadataV16::new( + pallets, + extrinsic, + runtime_apis, + outer_enums, + custom_values, + ); + + assert_is_in_types!(A B C D E F G H I J K L M N O P => metadata.types); + + let only_first_pallet = { + let mut md = metadata.clone(); + md.strip_metadata(|name| name == "First", |_| true); + md + }; + + assert_is_in_types!(A B H I J K L M N O P => only_first_pallet.types); + assert_not_in_types!(C D E F G => only_first_pallet.types); + assert_eq!(only_first_pallet.pallets.len(), 1); + assert_eq!(&only_first_pallet.pallets[0].name, "First"); + + let only_second_pallet = { + let mut md = metadata.clone(); + md.strip_metadata(|name| name == "Second", |_| true); + md + }; + + assert_is_in_types!(C D E F G H I J K L M N O P => only_second_pallet.types); + assert_not_in_types!(A B => only_second_pallet.types); + assert_eq!(only_second_pallet.pallets.len(), 1); + assert_eq!(&only_second_pallet.pallets[0].name, "Second"); + + let no_pallets = { + let mut md = metadata.clone(); + md.strip_metadata(|_| false, |_| true); + md + }; + + assert_is_in_types!(H I J K L M N O P => no_pallets.types); + assert_not_in_types!(A B C D E F G => no_pallets.types); + assert_eq!(no_pallets.pallets.len(), 0); + + let only_second_runtime_api = { + let mut md = metadata.clone(); + md.strip_metadata(|_| true, |api| api == "AnotherApi"); + md + }; + + assert_is_in_types!(A B C D E F G H I L M N O P => only_second_runtime_api.types); + assert_not_in_types!(J K => only_second_runtime_api.types); + assert_eq!(only_second_runtime_api.pallets.len(), 2); + assert_eq!(only_second_runtime_api.apis.len(), 1); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/.config/lingua.dic b/vendor/pezkuwi-zombienet-sdk/.config/lingua.dic new file mode 100644 index 00000000..d35a7a11 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/.config/lingua.dic @@ -0,0 +1,50 @@ +90 + += +CLI +Deserialization +Deserialized +IFF +IPv4 +JSON +NetworkNode +Ok +P2P +PjsResult +PoS +RPC +RUN_IN_CI +SDK +WASM +arg +args +chain_spec_command +cmd +declaratively +deserialize +deserialized +dir +env +fs +invulnerables +ip +js +k8s +msg +multiaddress +natively +ns +p2p +parachaing +pjs_rs +polkadot +polkadot_ +rococo_local_testnet +rpc +serde_json +tgz +tmp +u128 +u64 +validator +ws diff --git a/vendor/pezkuwi-zombienet-sdk/.config/spellcheck.toml b/vendor/pezkuwi-zombienet-sdk/.config/spellcheck.toml new file mode 100644 index 00000000..e061c29a --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/.config/spellcheck.toml @@ -0,0 +1,13 @@ +[hunspell] +lang = "en_US" +search_dirs = ["."] +extra_dictionaries = ["lingua.dic"] +skip_os_lookups = true +use_builtin = true + +[hunspell.quirks] +# `Type`'s +# 5x +transform_regex = ["^'([^\\s])'$", "^[0-9]+(?:\\.[0-9]*)?x$", "^'s$", "^\\+$", "[><+-]"] +allow_concatenation = true +allow_dashes = true diff --git a/vendor/pezkuwi-zombienet-sdk/.dockerignore b/vendor/pezkuwi-zombienet-sdk/.dockerignore new file mode 100644 index 00000000..d8db2f75 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/.dockerignore @@ -0,0 +1,5 @@ +target +Dockerfile +.dockerignore +.git +.gitignore \ No newline at end of file diff --git a/vendor/pezkuwi-zombienet-sdk/.github/CODEOWNERS b/vendor/pezkuwi-zombienet-sdk/.github/CODEOWNERS new file mode 100644 index 00000000..352ab850 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/.github/CODEOWNERS @@ -0,0 +1,2 @@ +@pepoviola +@l0r1s \ No newline at end of file diff --git a/vendor/pezkuwi-zombienet-sdk/.github/ISSUE_TEMPLATE/bug_report.yaml b/vendor/pezkuwi-zombienet-sdk/.github/ISSUE_TEMPLATE/bug_report.yaml new file mode 100644 index 00000000..7efba5af --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -0,0 +1,124 @@ +name: Bug Report +description: File a bug report +labels: ["triage-needed"] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to fill out this bug report! + **NOTE** A number of issues reported against Zombienet are often found to already be fixed in more current versions of the project. + Before reporting an issue, please verify the version you are running with `zombienet version` and compare it to the latest release. + If they differ, please update your version of Zombienet to the latest possible and retry your command before creating an issue. + + + - type: textarea + id: description + attributes: + label: Issue Description + description: Please explain your issue + value: "Describe your issue" + validations: + required: true + + - type: textarea + id: reproducer + attributes: + label: Steps to reproduce the issue + description: Please explain the steps to reproduce the issue, including configuration files needed. + value: "Steps to reproduce the issue\n1.\n2.\n3.\n" + validations: + required: true + + - type: textarea + id: received_results + attributes: + label: Describe the results you received + description: Please explain the results you are noticing, including stacktrace and error logs. + value: "Describe the results you received" + validations: + required: true + + - type: textarea + id: expected_results + attributes: + label: Describe the results you expected + description: Please explain the results you are expecting + value: "Describe the results you expected" + validations: + required: true + + - type: input + id: zombienet_version + attributes: + label: Zombienet version + description: Which zombienet version are you using ? + validations: + required: true + + - type: dropdown + id: provider + attributes: + label: Provider + description: Which provider are you using ? + options: + - Native + - Kubernetes + - Podman + validations: + required: true + + - type: textarea + id: provider_version + attributes: + label: Provider version + description: Which provider version / binaries versions are you using ? + value: | + ## For binaries + polkadot 0.9.40-a2b62fb872b + polkadot-parachain 0.9.380-fe24f39507f + + ## For Kubernetes/Podman + podman version 4.4.1 + + OR + + kubectl version v0.26.3 + cluster version 1.25.2 + render: yaml + validations: + required: true + + - type: dropdown + id: upstream_latest + attributes: + label: Upstream Latest Release + description: Have you tried running the [latest upstream release](https://github.com/paritytech/zombienet/releases/latest) + options: + - 'Yes' + - 'No' + validations: + required: true + + - type: textarea + id: additional_environment + attributes: + label: Additional environment details + description: Please describe any additional environment details like (Cloud, Local, OS, Provider versions...) + value: "Additional environment details" + + - type: textarea + id: additional_info + attributes: + label: Additional information + description: Please explain the additional information you deem important + value: "Additional information like issue happens only occasionally or issue happens with a particular architecture or on a particular setting" + validations: + required: false + + - type: textarea + id: screenshots + attributes: + label: Screenshots + description: Provide us with screenshots if needed to have a better understanding of the issue + validations: + required: false \ No newline at end of file diff --git a/vendor/pezkuwi-zombienet-sdk/.github/ISSUE_TEMPLATE/feature_report.yaml b/vendor/pezkuwi-zombienet-sdk/.github/ISSUE_TEMPLATE/feature_report.yaml new file mode 100644 index 00000000..7f214b1e --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/.github/ISSUE_TEMPLATE/feature_report.yaml @@ -0,0 +1,43 @@ +name: Feature request +description: File a feature request +labels: ["triage-needed"] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to fill out this feature report! + Please make sure to describe your feature and the problem it would solve. + + + - type: textarea + id: description + attributes: + label: Is your feature request related to a problem? Please describe. + description: A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + value: "Describe the feature" + validations: + required: true + + - type: textarea + id: solution + attributes: + label: Describe the solution you'd like + description: A clear and concise description of what you want to happen.. + validations: + required: true + + - type: textarea + id: alt_solution + attributes: + label: Describe alternatives you've considered + description: A clear and concise description of any alternative solutions or features you've considered. + validations: + required: false + + - type: textarea + id: additional_context + attributes: + label: Additional context + description: Add any other context or screenshots about the feature request here. + validations: + required: false \ No newline at end of file diff --git a/vendor/pezkuwi-zombienet-sdk/.github/workflows/ci.yml b/vendor/pezkuwi-zombienet-sdk/.github/workflows/ci.yml new file mode 100644 index 00000000..dcb1c06a --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/.github/workflows/ci.yml @@ -0,0 +1,97 @@ +name: Cargo Build & Test + +on: + push: + branches: [main] + pull_request: + branches: [main] + +env: + CARGO_TERM_COLOR: always + RUSTFLAGS: "-Dwarnings" + +jobs: + build: + name: Zombienet SDK - latest + runs-on: ubuntu-latest + strategy: + matrix: + toolchain: + - stable + # TODO 24-02-08: Disable nightly due to tkaitchuck/aHash#200. + #- nightly + steps: + # https://github.com/jlumbroso/free-disk-space + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@main + with: + tool-cache: false + + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + + - name: install_deps + run: sudo apt-get update && sudo apt-get install protobuf-compiler + + - name: Init nigthly install for fmt + run: rustup update nightly && rustup default nightly && rustup component add rustfmt + + - name: Check format + run: cargo +nightly fmt --check --all + + - name: Init install + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} && rustup component add clippy + + - name: Fetch cache + uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 + with: + shared-key: "zombie-cache" + + - name: Clippy + # disable needless_lifetimes until we align the version with polakdot-sdk + run: cargo clippy --all-targets --all-features -- -A clippy::needless_lifetimes + + - name: Build + run: cargo build + + - name: Tests + run: cargo test --workspace -- --skip ci_k8s + + # TODO: fix and re-enable + # coverage: + # name: Zombienet SDK - coverage + # needs: build + # runs-on: ubuntu-20.04 + # if: github.event_name == 'pull_request' + + # permissions: + # issues: write + # pull-requests: write + + # steps: + # - uses: actions/checkout@v3 + + # # https://github.com/jlumbroso/free-disk-space + # - name: Free Disk Space (Ubuntu) + # uses: jlumbroso/free-disk-space@main + # with: + # tool-cache: false + + # - name: Fetch cache + # uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 + # with: + # shared-key: "zombie-cache" + + # - name: Install latest nextest release + # uses: taiki-e/install-action@nextest + + # - name: Install cargo-llvm-cov + # uses: taiki-e/install-action@cargo-llvm-cov + + # - name: Collect coverage data + # run: cargo llvm-cov nextest --workspace --exclude zombienet-sdk --test-threads 1 --lcov --output-path lcov.info + + # - name: Report code coverage + # uses: Nef10/lcov-reporter-action@v0.4.0 + # with: + # lcov-file: lcov.info + # pr-number: ${{ github.event.pull_request.number }} diff --git a/vendor/pezkuwi-zombienet-sdk/.github/workflows/ci_integration.yml b/vendor/pezkuwi-zombienet-sdk/.github/workflows/ci_integration.yml new file mode 100644 index 00000000..7899c695 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/.github/workflows/ci_integration.yml @@ -0,0 +1,206 @@ +name: Integration test + +on: + push: + branches: [main] + pull_request: + branches: [main] + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + RUN_IN_CONTAINER: 1 + FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR: 1 + GHA_CLUSTER_SERVER_ADDR: "https://kubernetes.default:443" + CARGO_TERM_COLOR: always + RUSTFLAGS: "-Dwarnings" + BASE_IMAGE: docker.io/paritytech/ci-unified:bullseye-1.88.0-2025-06-27-v202506301118 + RUN_IN_CI: "1" + RUST_LOG: "zombienet_orchestrator=debug,zombienet_provider=debug" + CARGO_TARGET_DIR: /tmp/target + +jobs: + build-tests: + runs-on: ubuntu-latest + timeout-minutes: 60 + container: + image: docker.io/paritytech/ci-unified:bullseye-1.88.0-2025-06-27-v202506301118 + steps: + - name: Checkout + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + + - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + with: + cache-on-failure: true + + - name: Build tests + run: | + cargo build --tests --keep-going --locked + mkdir -p artifacts + cd artifacts + find /tmp/target/debug/deps/ -maxdepth 1 -name "smoke-*" ! -name "*.d" -exec mv {} $(pwd)/smoke \; + find /tmp/target/debug/deps/ -maxdepth 1 -name "smoke_native-*" ! -name "*.d" -exec mv {} $(pwd)/smoke_native \; + cd .. + tar cvfz artifacts.tar.gz artifacts + + - name: Upload artifacts + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: zombienet-tests-${{ github.sha }} + path: artifacts.tar.gz + + k8s-integration-test-smoke: + runs-on: ubuntu-latest + needs: build-tests + timeout-minutes: 60 + container: + image: docker.io/paritytech/ci-unified:bullseye-1.88.0-2025-06-27-v202506301118 + steps: + - name: Download artifacts + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + with: + name: zombienet-tests-${{ github.sha }} + path: /tmp + + - name: script + timeout-minutes: 45 + run: | + export ZOMBIE_K8S_CI_NAMESPACE=$(cat /data/namespace) + export ZOMBIE_PROVIDER="k8s" + cd /tmp + ls -la + tar xvfz artifacts.tar.gz + ./artifacts/smoke --nocapture + + - name: dump logs + if: always() + run: | + export ZOMBIE_K8S_CI_NAMESPACE=$(cat /data/namespace) + mkdir -p /tmp/zombie-1/logs + + # Install kubectl if not available + if ! command -v kubectl &> /dev/null; then + echo "Installing kubectl..." + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + chmod +x kubectl + mv kubectl /usr/local/bin/ + fi + + echo "Listing pods in namespace $ZOMBIE_K8S_CI_NAMESPACE..." + kubectl get pods -n "$ZOMBIE_K8S_CI_NAMESPACE" -o wide || true + for pod in $(kubectl get pods -n "$ZOMBIE_K8S_CI_NAMESPACE" -o jsonpath='{.items[*].metadata.name}' 2>/dev/null || true); do + echo "Dumping logs for pod: $pod" + kubectl logs -n "$ZOMBIE_K8S_CI_NAMESPACE" "$pod" --all-containers=true > "/tmp/zombie-1/logs/${pod}.log" 2>&1 || true + done + find /tmp/zombie* -name "*.log" -type f ! -regex '.*/[0-9]+\.log' -exec cp {} /tmp/zombie-1/logs/ \; 2>/dev/null || true + echo "Collected logs:" + ls -la /tmp/zombie-1/logs/ || true + + - name: upload logs + if: always() + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: zombienet-logs-${{ github.job }}-${{ github.sha }} + path: | + /tmp/zombie-1/logs/* + + docker-integration-test-smoke: + runs-on: ubuntu-latest + needs: build-tests + timeout-minutes: 60 + steps: + - name: Download artifacts + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + with: + name: zombienet-tests-${{ github.sha }} + path: /tmp + + - name: Install dependencies + run: | + sudo apt-get -y update + sudo apt-get -y install wget + # Manually download and install the OpenSSL 1.1 library + wget http://security.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_amd64.deb + sudo dpkg -i libssl1.1_1.1.1f-1ubuntu2_amd64.deb + + - name: script + timeout-minutes: 45 + run: | + export ZOMBIE_PROVIDER="docker" + cd /tmp + ls -la + tar xvfz artifacts.tar.gz + ./artifacts/smoke --nocapture + + - name: dump logs + if: always() + run: | + mkdir -p /tmp/zombie-1/logs + for container in $(docker ps -a --filter "name=zombie" --format "{{.Names}}" 2>/dev/null || true); do + echo "Dumping logs for container: $container" + docker logs "$container" > "/tmp/zombie-1/logs/${container}.log" 2>&1 || true + done + find /tmp/zombie* -name "*.log" -type f ! -regex '.*/[0-9]+\.log' -exec cp {} /tmp/zombie-1/logs/ \; 2>/dev/null || true + ls -la /tmp/zombie-1/logs/ || true + + - name: upload logs + if: always() + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: zombienet-logs-${{ github.job }}-${{ github.sha }} + path: | + /tmp/zombie-1/logs/* + + native-integration-test-smoke: + runs-on: ubuntu-latest + needs: build-tests + timeout-minutes: 60 + container: + image: docker.io/paritytech/ci-unified:bullseye-1.88.0-2025-06-27-v202506301118 + steps: + - name: Download artifacts + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + with: + name: zombienet-tests-${{ github.sha }} + path: /tmp + + - name: Download bins + shell: bash + run: | + for bin in polkadot polkadot-execute-worker polkadot-prepare-worker polkadot-omni-node polkadot-parachain; do + echo "downloading $bin"; + curl -L -o /tmp/$bin https://github.com/paritytech/polkadot-sdk/releases/download/polkadot-stable2503-1/$bin; + chmod 755 /tmp/$bin; + done + ls -ltr /tmp + export PATH=/tmp:$PATH + echo $PATH + + - name: script + run: | + export PATH=/tmp:$PATH + echo $PATH + # mv artifacts.tar.gz /tmp + cd /tmp + ls -la + tar xvfz artifacts.tar.gz + export ZOMBIE_PROVIDER="native" + ./artifacts/smoke_native --nocapture + # cargo test --test smoke-native -- --nocapture + + - name: collect logs + if: always() + run: | + mkdir -p /tmp/zombie-1/logs + find /tmp/zombie* -name "*.log" -type f ! -regex '.*/[0-9]+\.log' -exec cp {} /tmp/zombie-1/logs/ \; 2>/dev/null || true + ls -la /tmp/zombie-1/logs/ || true + + - name: upload logs + if: always() + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: zombienet-logs-${{ github.job }}-${{ github.sha }} + path: | + /tmp/zombie-1/logs/* diff --git a/vendor/pezkuwi-zombienet-sdk/.github/workflows/documentation.yml b/vendor/pezkuwi-zombienet-sdk/.github/workflows/documentation.yml new file mode 100644 index 00000000..a08556e1 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/.github/workflows/documentation.yml @@ -0,0 +1,62 @@ +name: Cargo Create Docs + +on: + push: + branches: [main] + pull_request: + branches: [main] + +env: + CARGO_TERM_COLOR: always + RUSTFLAGS: "-Dwarnings" + +jobs: + build-rust-doc: + name: Zombienet SDK - Rust Docs + runs-on: ubuntu-latest + strategy: + matrix: + toolchain: + # TODO 24-02-08: Disable nightly due to tkaitchuck/aHash#200. + #- nightly + - stable + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + + - name: Init nigthly install for fmt + run: rustup update nightly && rustup default nightly && rustup component add rustfmt + + - name: Check format + run: cargo +nightly fmt --check --all + + - name: Init install + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} && rustup component add clippy + + - name: install_deps + run: sudo apt-get update && sudo apt-get install protobuf-compiler + + - name: Create docs + run: | + cargo doc --no-deps + echo "" > target/doc/index.html + + + + - name: Move docs + run: | + mkdir -p ./doc + mv ./target/doc/* ./doc + git config user.email "github-action@users.noreply.github.com" + git config user.name "GitHub Action" + git config user.password "${{ secrets.GH_PAGES_TOKEN }}" + git checkout --orphan gh-pages + mkdir to_delete + shopt -s extglob + mv !(to_delete) ./to_delete + mv ./to_delete/doc/* . + rm -rf ./to_delete + git add --all + git commit -m "Documentation" + shell: bash # Necessary for `shopt` to work + - run: git push -f origin gh-pages:gh-pages + if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} diff --git a/vendor/pezkuwi-zombienet-sdk/.github/workflows/fileserver.yml b/vendor/pezkuwi-zombienet-sdk/.github/workflows/fileserver.yml new file mode 100644 index 00000000..8b61ea9c --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/.github/workflows/fileserver.yml @@ -0,0 +1,52 @@ +name: File server build & image publish +run-name: Deploy file server ${{ github.ref }} + +on: + push: + branches: + - main + paths: + - "Cargo.toml" + - "crates/file-server/**" + workflow_dispatch: {} + +env: + PROJECT_ID: "parity-zombienet" + GCR_REGISTRY: "europe-west3-docker.pkg.dev" + GCR_REPOSITORY: "zombienet-public-images" + +jobs: + build_and_push: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + + - name: Setup gcloud CLI + uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db # v3.0.1 + with: + service_account_key: ${{ secrets.GCP_SA_KEY }} + project_id: ${{ env.PROJECT_ID }} + export_default_credentials: true + + - name: Login to GCP + uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 # v3.0.0 + with: + credentials_json: ${{ secrets.GCP_SA_KEY }} + + - name: Artifact registry authentication + run: | + gcloud auth configure-docker ${{ env.GCR_REGISTRY }} + + - name: Build, tag, and push image to GCP Artifact registry + id: build-image + env: + IMAGE: "${{ env.GCR_REGISTRY }}/${{ env.PROJECT_ID }}/${{ env.GCR_REPOSITORY }}/zombienet-file-server" + + run: | + docker build -t $IMAGE:${{ github.sha }} -f ./crates/file-server/Dockerfile . + docker tag $IMAGE:${{ github.sha }} $IMAGE:latest + docker push --all-tags $IMAGE + echo "image=$IMAGE:${{ github.sha }}" >> $GITHUB_OUTPUT + echo "image=$IMAGE:latest" >> $GITHUB_OUTPUT \ No newline at end of file diff --git a/vendor/pezkuwi-zombienet-sdk/.github/workflows/publish.yml b/vendor/pezkuwi-zombienet-sdk/.github/workflows/publish.yml new file mode 100644 index 00000000..862cdc0e --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/.github/workflows/publish.yml @@ -0,0 +1,26 @@ +name: Publish to crates.io + +on: + release: + types: [published] + +jobs: + check-publish: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + + - name: Rust Cache + uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 + with: + save-if: ${{ github.ref == 'refs/heads/main' }} + + - name: install parity-publish + run: cargo install parity-publish@0.10.6 --locked -q + + - name: parity-publish check + run: parity-publish --color always check --allow-unpublished + + # TODO: remove dry-run once we confirm everything works as expected + - name: parity-publish dry-run + run: parity-publish --color always apply --dry-run diff --git a/vendor/pezkuwi-zombienet-sdk/.github/workflows/release.yml b/vendor/pezkuwi-zombienet-sdk/.github/workflows/release.yml new file mode 100644 index 00000000..11dc8f6d --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/.github/workflows/release.yml @@ -0,0 +1,67 @@ +name: Release bin for zombie-cli + +on: + push: + tags: + - 'v*.*.*' + +jobs: + build: + strategy: + matrix: + include: + - os: ubuntu-latest + target: x86_64-unknown-linux-gnu + - os: macos-latest + target: aarch64-apple-darwin + + runs-on: ${{ matrix.os }} + + steps: + - name: Checkout repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # 5.0.0 + + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + with: + targets: ${{ matrix.target }} + + - name: Build release binary + run: cargo build --release --bin zombie-cli --target ${{ matrix.target }} + + - name: Package binary + run: | + mkdir -p dist + cp target/${{ matrix.target }}/release/zombie-cli dist/zombie-cli-${{ matrix.target }} + + - name: Upload artifact + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: zombie-cli-${{ matrix.os }}-${{ matrix.target }}-${{ github.ref_name }} + path: dist/* + + release: + runs-on: ubuntu-latest + needs: build + + steps: + - name: Download artifacts + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + with: + path: artifacts + + - name: Generate checksums + run: | + cd artifacts + sha256sum * > checksums.txt + + - name: Create Release + uses: softprops/action-gh-release@6cbd405e2c4e67a21c47fa9e383d020e4e28b836 # v2.3.3 + with: + files: | + artifacts/**/* + artifacts/checksums.txt + generate_release_notes: true + draft: true + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/vendor/pezkuwi-zombienet-sdk/.github/workflows/spellcheck.yml b/vendor/pezkuwi-zombienet-sdk/.github/workflows/spellcheck.yml new file mode 100644 index 00000000..ef2feb8d --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/.github/workflows/spellcheck.yml @@ -0,0 +1,31 @@ +name: Spellcheck + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + spellcheck: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + + - name: Install Rust + uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # v1.0.0 + with: + toolchain: stable + + - name: Install cargo-spellcheck + run: | + sudo apt-get install libclang-dev + export LIBCLANG_PATH=/usr/lib/llvm-18/lib/ + cargo install cargo-spellcheck + + - name: Run cargo-spellcheck + run: cargo spellcheck diff --git a/vendor/pezkuwi-zombienet-sdk/.gitignore b/vendor/pezkuwi-zombienet-sdk/.gitignore new file mode 100644 index 00000000..2693fd20 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/.gitignore @@ -0,0 +1,26 @@ +# Generated by Cargo +# will have compiled files and executables +debug/ +target/ + +# These are backup files generated by rustfmt +**/*.rs.bk + +# MSVC Windows builds of rustc generate these, which store debugging information +*.pdb + +node_modules +dist +log.md +.env +bins +.DS_Store +**/target/ +*.swp +.vscode + +# nix +result + +# docs +docs diff --git a/vendor/pezkuwi-zombienet-sdk/Cargo.toml b/vendor/pezkuwi-zombienet-sdk/Cargo.toml new file mode 100644 index 00000000..9144c66b --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/Cargo.toml @@ -0,0 +1,5 @@ +# This is a virtual manifest for the vendored pezkuwi-zombienet-sdk crates +# Individual crates are managed by the main pezkuwi-sdk workspace + +[workspace] +# Empty workspace - crates are part of parent workspace diff --git a/vendor/pezkuwi-zombienet-sdk/LICENSE b/vendor/pezkuwi-zombienet-sdk/LICENSE new file mode 100644 index 00000000..f288702d --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/vendor/pezkuwi-zombienet-sdk/README.md b/vendor/pezkuwi-zombienet-sdk/README.md new file mode 100644 index 00000000..f8fff798 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/README.md @@ -0,0 +1,84 @@ +# 🚧⚠️ [WIP] ZombieNet SDK ⚠️🚧 + + +[Rust Docs](https://paritytech.github.io/zombienet-sdk) + +# The Vision + +This issue will track the progress of the new ZombieNet SDK. + +We want to create a new SDK for `ZombieNet` that allow users to build more complex use cases and interact with the network in a more flexible and programatic way. +The SDK will provide a set of `building blocks` that users can combine in order to spawn and interact (test/query/etc) with the network providing a *fluent* api to craft different topologies and assertions to the running network. The new `SDK` will support the same range of `providers` and configurations that can be created in the current version (v1). + +We also want to continue supporting the `CLI` interface *but* should be updated to use the `SDK` under the hood. + +# The Plan + +We plan to divide the work phases to. ensure we cover all the requirement and inside each phase in small tasks, covering one of the building blocks and the interaction between them. + +## Prototype building blocks + +Prototype each building block with a clear interface and how to interact with it +- [Building block Network #2](https://github.com/paritytech/zombienet-sdk/issues/2) +- [Building block Node #3](https://github.com/paritytech/zombienet-sdk/issues/3) +- [Building block NodeGroup #4](https://github.com/paritytech/zombienet-sdk/issues/4) +- [Building block Parachain #5](https://github.com/paritytech/zombienet-sdk/issues/5) +- [Building block Collator #6](https://github.com/paritytech/zombienet-sdk/issues/6) +- [Building block CollatorGroup #7](https://github.com/paritytech/zombienet-sdk/issues/7) +- [Building block Assertion #8](https://github.com/paritytech/zombienet-sdk/issues/8) + +## Integrate, test interactions and document + +We want to integrate the interactions for all building blocks and document the way that they work together. + +- [Spawning Integration #9](https://github.com/paritytech/zombienet-sdk/issues/9) +- [Assertion Integration #10](https://github.com/paritytech/zombienet-sdk/issues/10) +- [Documentation #11](https://github.com/paritytech/zombienet-sdk/issues/11) + +## Refactor `CLI` and ensure backwards compatibility + +Refactor the `CLI` module to use the new `SDK` under the hood. + +- [Refactor CLI #12](https://github.com/paritytech/zombienet-sdk/issues/12) +- [Ensure that spawning from toml works #13](https://github.com/paritytech/zombienet-sdk/issues/13) +- [Ensure that test-runner from DSL works #14](https://github.com/paritytech/zombienet-sdk/issues/14) + +## ROADMAP + +## Infra +- Chaos testing, add examples and explore possibilities in `native` and `podman` provider +- Add `docker` provider +- Add `nomad` provider +- Create [helm chart](https://helm.sh/docs/topics/charts/) to allow other use zombienet in k8s +- Auth system to not use k8s users +- Create GitHub Action and publish in NPM marketplace (Completed) +- Rename `@paritytech/zombienet` npm package to `zombienet`. Keep all zombienet modules under `@zombienet/*` org (Completed) + +## Internal teams +- Add more teams (wip) + +## Registry +- Create decorators registry and allow override by paras (wip) +- Explore how to get info from paras. + +## Functional tasks +- Add subxt integration, allow to compile/run on the fly +- Move parser to pest (wip) +- Detach phases and use JSON to communicate instead of `paths` +- Add relative values assertions (for metrics/scripts) +- Allow to define nodes that are not started in the launching phase and can be started by the test-runner +- Allow to define `race` assertions +- Rust integration -> Create multiples libs (crates) +- Explore backchannel use case +- Add support to run test agains a running network (wip) +- Add more CLI subcommands +- Add js/subxt snippets ready to use in assertions (e.g transfers) +- Add XCM support in built-in assertions +- Add `ink! smart contract` support +- Add support to start from a live network (fork-off) [check subalfred] +- Create "default configuration" - (if `zombieconfig.json` exists in same dir with zombienet then the config applied in it will override the default configuration of zombienet. E.G if user wants to have as default `native` instead of `k8s` he can add to + +## UI +- Create UI to create `.zndls` and `network` files. +- Improve VSCode extension (grammar/snippets/syntax highlighting/file validations) ([repo](https://github.com/paritytech/zombienet-vscode-extension)) +- Create UI app (desktop) to run zombienet without the need of terminal. diff --git a/vendor/pezkuwi-zombienet-sdk/adrs/001-node-rust-foreign-function-interface/001-node-to-rust-foreign-function-interface.md b/vendor/pezkuwi-zombienet-sdk/adrs/001-node-rust-foreign-function-interface/001-node-to-rust-foreign-function-interface.md new file mode 100644 index 00000000..1ce8035f --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/adrs/001-node-rust-foreign-function-interface/001-node-to-rust-foreign-function-interface.md @@ -0,0 +1,112 @@ +# Mechanism to call Rust code from Javascript/Typescript + +### Status: proposed | rejected | **accepted** | deprecated + +### Deciders: [@pepoviola](https://github.com/pepoviola) [@wirednkod](https://github.com/wirednkod) [@l0r1s](https://github.com/l0r1s) + +### Creation date: 18/05/2023 + +### Update date: - + +--- + +## Context and Problem Statement + +The `zombienet-sdk` will be developed in Rust. Our objective is make it easily integrable into existing Typescript/Javascript project. To achieve this goal, we need to find a way to call the Rust code from a Javascript/Typescript program. + +Many mechanisms exists for this purpose like Wasm or N(ode)-API but some may or may not fit our use case, for example, executing async code. + +--- + +## Decision drivers + +- We can use the standard library (for filesystem or networking in providers). + +- We can execute asynchronous code: our goal is not to make the program fully sequential as many operations (e.g: bootstrapping the relaychain nodes) can be done concurrently. + +- Easy to package and deploy + +--- + +## Considered Options + +- #### WASM + + - [wasm-pack](https://github.com/neon-bindings/neon) + +- #### Native node modules (Node-API / V8 / libuv) + - [napi-rs](https://github.com/napi-rs/napi-rs) + +--- + +## Prototyping + +To demonstrate and learn which options fit the best for our use case, we will create a small test program which will have the following functionalities: + +- Has a function taking an arbitratry object and a callback as parameters in the Typescript code, calling the callback with the function result on Rust side. +- Has a function taking an arbitrary object as parameter and a returning a promise in Typescript, signaling an asynchronous operation on Rust side. +- Make an HTTP request asynchronously in the Rust code, using a dependency using the standard library. + +The prototype assume versions of `rustc` and `cargo` to be `1.69.0`, use of `stable` channel and `Linux` on `amd64` architecture. + + +- ### [Boilerplate app to execute prototype](boilerplate-app-prototype.md) + +- ### [Wasm-pack prototype](wasm-prototype.md) + +- ### [Napi-rs prototype](napi-prototype.md) + +--- + +## Pros and cons of each options + +- ### Napi-rs + - Pros 👍 + - Support many types correctly including typed callback, typed array, class and all JS primitives types (Null, Undefined, Numbers, String, BigInt, ...) + + - Support top level async function because it detects if it needs to be run inside an async runtime (tokio by default) + + - Standard library can be used without limitations, including threading, networking, etc... + + - Extremely well documented with examples + + - Provide full Github action pipeline template to compile on all architecture easily + + - Support complex use cases + + - Used by many big names (Prisma, Parcel, Tailwind, Next.js, Bitwarden) + + - Cons 👎 + - Node-API is not simple for complex use case + + - Bound to NodeJS, if we want to expose the same logic to others languages (Go, C++, Python, ...) we need to wrap the Rust code inside a dynamic library and adapt to others languages primitives by creating a small adapter over the library + + - Not universally compiled + + +- ### Wasm-pack + - Pros 👍 + - Rich ecosystem and developing fast + + - Used in many places across web, backend (Docker supports WASM) + + - Easy to use and distribute + + - Universally compiled and used across languages (if they support WASM execution) + + - Good for simple use case where you do pure function (taking input, returning output, without side effects like writing to filesystem or making networking calls) + + - Cons 👎 + - Limited in the use of the standard library, can't access networking/filesystem primitives without having to use WASI which is inconsistent across languages/runtimes + + - Only support 32 bits + + - No support for concurrent programming (async/threads), even if we can returns Promise from WASM exposed functions but could see the light in few months (maybe?) + + - wasm-bindgen types are too generic, for example, we return a JsValue but we would like to be more specific for the type + +## Decision outcome + +- ### **Napi-rs** for crates dependant on async, filesystem or networking: *support*, *orchestrator*, *test-runner*, *providers* from [schema](https://github.com/paritytech/zombienet-sdk/issues/22) + +- ### **Wasm-pack** for the rest of the crates: *configuration* from [schema](https://github.com/paritytech/zombienet-sdk/issues/22) \ No newline at end of file diff --git a/vendor/pezkuwi-zombienet-sdk/adrs/001-node-rust-foreign-function-interface/boilerplate-app-prototype.md b/vendor/pezkuwi-zombienet-sdk/adrs/001-node-rust-foreign-function-interface/boilerplate-app-prototype.md new file mode 100644 index 00000000..167606fc --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/adrs/001-node-rust-foreign-function-interface/boilerplate-app-prototype.md @@ -0,0 +1,32 @@ +## [Back](001-node-to-rust-foreign-function-interface.md) + +## Boilerplate app to execute prototypes + +1. Create the new node app : + +```bash +$ mkdir -p ffi-prototype/app && cd ffi-prototype/app && npm init -y +``` + +2. Install required packages : + +```bash +[ffi-prototype/app]$ npm i -D @tsconfig/recommended ts-node typescript +``` + +3. Add a new script : + +```json +{ + "scripts": { + "build+exec": "tsc && node ./index.js" + } +} +``` + +4. Add tsconfig.json +```json +{ + "extends": "@tsconfig/recommended/tsconfig.json" +} +``` \ No newline at end of file diff --git a/vendor/pezkuwi-zombienet-sdk/adrs/001-node-rust-foreign-function-interface/napi-prototype.md b/vendor/pezkuwi-zombienet-sdk/adrs/001-node-rust-foreign-function-interface/napi-prototype.md new file mode 100644 index 00000000..aff0f12c --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/adrs/001-node-rust-foreign-function-interface/napi-prototype.md @@ -0,0 +1,142 @@ +## [Back](001-node-to-rust-foreign-function-interface.md) + +## Napi-rs prototype +___ + +1. Install the napi CLI + +```bash +[ffi-prototype]$ npm install -g @napi-rs/cli +``` + +2. Create a new napi project + +```bash +[ffi-prototype]$ napi new napi-prototype +``` + +3. Install cargo dependencies + +```bash +[ffi-prototype/napi-prototype]$ cargo add tokio --features full +[ffi-prototype/napi-prototype]$ cargo add reqwest --features blocking +[ffi-prototype/napi-prototype]$ cargo add napi --no-default-features --features napi4,async +``` + +4. Copy the following code to `napi-prototype/src/lib.rs` + +```rust +#![deny(clippy::all)] + +use std::thread; + +use napi::{ + bindgen_prelude::*, + threadsafe_function::{ + ErrorStrategy, ThreadSafeCallContext, ThreadsafeFunction, ThreadsafeFunctionCallMode, + }, +}; +use reqwest; + +#[macro_use] +extern crate napi_derive; + +// native async with tokio is supported without annotating a main function +#[napi] +pub async fn fetch_promise() -> Result { + let body = reqwest::get("https://paritytech.github.io/zombienet/") + .await + .map_err(|_| napi::Error::from_reason("Error while fetching page"))? + .text() + .await + .map_err(|_| napi::Error::from_reason("Error while extracting body"))?; + + Ok(body) +} + +#[napi] +pub fn fetch_callback(callback: JsFunction) -> Result<()> { + // createa thread safe callback from the JsFunction + let thread_safe_callback: ThreadsafeFunction = callback + .create_threadsafe_function(0, |ctx: ThreadSafeCallContext| { + ctx.env.create_string(&ctx.value).map(|s| vec![s]) + })?; + + // spawn a thread to execute our logic + thread::spawn(move || { + let response = reqwest::blocking::get("https://paritytech.github.io/zombienet/"); + + if response.is_err() { + let response = response + .map(|_| "".into()) + .map_err(|_| napi::Error::from_reason("Error while fetching page")); + + // error are returned by calling the callback with an empty response and the error mapped + return thread_safe_callback.call(response, ThreadsafeFunctionCallMode::Blocking); + } + + let body = response.unwrap().text(); + + if body.is_err() { + let body = body + .map(|_| "".into()) + .map_err(|_| napi::Error::from_reason("Error while extracting body")); + + return thread_safe_callback.call(body, ThreadsafeFunctionCallMode::Blocking); + } + + // result is returned as a string + thread_safe_callback.call(Ok(body.unwrap()), ThreadsafeFunctionCallMode::Blocking) + }); + + Ok(()) +} +``` + +5. Build the project : +```bash +[ffi-prototype/napi-prototype]$ npm run build +``` + +6. Copy artifacts : +```bash +[ffi-prorotype/napi-prototype]$ mv napi-prototype.linux-x64-gnu.node index.d.ts index.js npm/linux-x64-gnu +``` + +7. Install package in ```ffi-prototype/app``` : +```bash +[ffi-prototype/app]$ npm i ../napi-prototype/npm/linux-x64-gnu/ +``` + +8. Copy the following code to the ```ffi-prototype/app/index.ts``` file : + +```ts +import { fetchCallback, fetchPromise } from "napi-prototype-linux-x64-gnu"; + +(async () => { + fetchCallback((_err: any, result: string) => { + console.log(`HTTP request through FFI with callback: ${result.length}`); + }); + + console.log( + `HTTP request through FFI with promise ${(await fetchPromise()).length}` + ); +})(); +``` + +9. Build and execute the app : + +```bash +[ffi-prototype/app]$ npm run build+exec +``` + +Expected output: +```tty +> app@1.0.0 build+exec +> tsc && node ./index.js + +HTTP request through FFI with promise 12057 +HTTP request through FFI with callback: 12057 +``` + +That's it ! \ No newline at end of file diff --git a/vendor/pezkuwi-zombienet-sdk/adrs/001-node-rust-foreign-function-interface/wasm-prototype.md b/vendor/pezkuwi-zombienet-sdk/adrs/001-node-rust-foreign-function-interface/wasm-prototype.md new file mode 100644 index 00000000..e26203d3 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/adrs/001-node-rust-foreign-function-interface/wasm-prototype.md @@ -0,0 +1,153 @@ +## [Back](001-node-to-rust-foreign-function-interface.md) + +## Wasm-pack prototype +___ + +1. Install the wasm-pack CLI + +```bash +curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh +``` + +2. Create a new wasm-pack project + +```bash +[ffi-prototype]$ wasm-pack new wasm-prototype +``` + +3. Install cargo dependencies +```bash +[ffi-prototype/wasm-prototype]$ cargo add tokio --features full +[ffi-prototype/wasm-prototype]$ cargo add reqwest --features blocking +[ffi-prototype/wasm-prototype]$ cargo add wasm-bindgen-futures +cargo add js-sys +``` + +4. Copy the following code to `wasm-prototype/src/lib.rs` +```rust +mod utils; + +use wasm_bindgen::prelude::*; + +// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global +// allocator. +#[cfg(feature = "wee_alloc")] +#[global_allocator] +static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; + +#[wasm_bindgen] +pub async fn fetch_promise() -> Result { + let body = reqwest::get("https://paritytech.github.io/zombienet/") + .await + .map_err(|_| JsError::new("Error while fetching page"))? + .text() + .await + .map_err(|_| JsError::new("Error while extracting body"))?; + + Ok(body) +} + +#[wasm_bindgen] +pub fn fetch_callback(callback: &js_sys::Function) -> Result { + let this = JsValue::null(); + + let response = reqwest::blocking::get("https://paritytech.github.io/zombienet/"); + + if response.is_err() { + return callback.call2( + &this, + &JsError::new("Error while fetching page").into(), + &JsValue::null(), + ); + } + + let body = response.unwrap().text(); + + if body.is_err() { + return callback.call2( + &this, + &JsError::new("Error while extracting body").into(), + &JsValue::null(), + ); + } + + Ok(body.unwrap().into()) +} +``` + +5. Build the project : +```bash +[ffi-prototype/wasm-prototype]$ wasm-pack build -t nodejs +``` + +Error are shown, this is expected because WASM doesn't support networking primitives, +as you can see, we removed the thread call from the fetch_callback function because ```JsValue``` +is using *const u8 under the hood and it's not ```Send``` so can't be passed safely across thread: + +```bash +[INFO]: 🎯 Checking for the Wasm target... +[INFO]: 🌀 Compiling to Wasm... + Compiling mio v0.8.6 + Compiling parking_lot v0.12.1 + Compiling serde_json v1.0.96 + Compiling url v2.3.1 +error[E0432]: unresolved import `crate::sys::IoSourceState` + --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/io_source.rs:12:5 + | +12 | use crate::sys::IoSourceState; + | ^^^^^^^^^^^^^^^^^^^^^^^^^ no `IoSourceState` in `sys` + +error[E0432]: unresolved import `crate::sys::tcp` + --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/net/tcp/listener.rs:15:17 + | +15 | use crate::sys::tcp::{bind, listen, new_for_addr}; + | ^^^ could not find `tcp` in `sys` + +error[E0432]: unresolved import `crate::sys::tcp` + --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/net/tcp/stream.rs:13:17 + | +13 | use crate::sys::tcp::{connect, new_for_addr}; + | ^^^ could not find `tcp` in `sys` + +error[E0433]: failed to resolve: could not find `Selector` in `sys` + --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/poll.rs:301:18 + | +301 | sys::Selector::new().map(|selector| Poll { + | ^^^^^^^^ could not find `Selector` in `sys` + +error[E0433]: failed to resolve: could not find `event` in `sys` + --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/event/event.rs:24:14 + | +24 | sys::event::token(&self.inner) + | ^^^^^ could not find `event` in `sys` + +error[E0433]: failed to resolve: could not find `event` in `sys` + --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/event/event.rs:38:14 + | +38 | sys::event::is_readable(&self.inner) + | ^^^^^ could not find `event` in `sys` + +error[E0433]: failed to resolve: could not find `event` in `sys` + --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/event/event.rs:43:14 + | +43 | sys::event::is_writable(&self.inner) + | ^^^^^ could not find `event` in `sys` + +error[E0433]: failed to resolve: could not find `event` in `sys` + --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/event/event.rs:68:14 + | +68 | sys::event::is_error(&self.inner) + | ^^^^^ could not find `event` in `sys` + +error[E0433]: failed to resolve: could not find `event` in `sys` + --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/event/event.rs:99:14 + | +99 | sys::event::is_read_closed(&self.inner) + | ^^^^^ could not find `event` in `sys` + +error[E0433]: failed to resolve: could not find `event` in `sys` + --> /home/user/.cargo/registry/src/github.com-1ecc6299db9ec823/mio-0.8.6/src/event/event.rs:129:14 + | +129 | sys::event::is_write_closed(&self.inner) + | ^^^^^ could not find `event` in `sys` +``` \ No newline at end of file diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/.gitignore b/vendor/pezkuwi-zombienet-sdk/crates/configuration/.gitignore new file mode 100644 index 00000000..4fffb2f8 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/.gitignore @@ -0,0 +1,2 @@ +/target +/Cargo.lock diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/Cargo.toml b/vendor/pezkuwi-zombienet-sdk/crates/configuration/Cargo.toml new file mode 100644 index 00000000..23829779 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "zombienet-configuration" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +publish = true +license.workspace = true +repository.workspace = true +description = "Zombienet sdk config builder, allow to build a network configuration" +keywords = ["zombienet", "configuration", "sdk"] + +[dependencies] +regex = { workspace = true } +lazy_static = { workspace = true } +multiaddr = { workspace = true } +url = { workspace = true, features = ["serde"] } +thiserror = { workspace = true } +anyhow = { workspace = true } +serde = { workspace = true, features = ["derive"] } +toml = { workspace = true } +serde_json = { workspace = true } +reqwest = { workspace = true } +tokio = { workspace = true, features = ["fs"] } +tracing = { workspace = true } + +# zombienet deps +support = { workspace = true } + diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/global_settings.rs b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/global_settings.rs new file mode 100644 index 00000000..21b57190 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/global_settings.rs @@ -0,0 +1,383 @@ +use std::{ + error::Error, + fmt::Display, + net::IpAddr, + path::{Path, PathBuf}, + str::FromStr, +}; + +use multiaddr::Multiaddr; +use serde::{Deserialize, Serialize}; + +use crate::{ + shared::{ + errors::{ConfigError, FieldError}, + helpers::{merge_errors, merge_errors_vecs}, + types::Duration, + }, + utils::{default_as_true, default_node_spawn_timeout, default_timeout}, +}; + +/// Global settings applied to an entire network. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct GlobalSettings { + /// Global bootnodes to use (we will then add more) + #[serde(skip_serializing_if = "std::vec::Vec::is_empty", default)] + bootnodes_addresses: Vec, + // TODO: parse both case in zombienet node version to avoid renamed ? + /// Global spawn timeout + #[serde(rename = "timeout", default = "default_timeout")] + network_spawn_timeout: Duration, + // TODO: not used yet + /// Node spawn timeout + #[serde(default = "default_node_spawn_timeout")] + node_spawn_timeout: Duration, + // TODO: not used yet + /// Local ip to use for construct the direct links + local_ip: Option, + /// Directory to use as base dir + /// Used to reuse the same files (database) from a previous run, + /// also note that we will override the content of some of those files. + base_dir: Option, + /// Number of concurrent spawning process to launch, None means try to spawn all at the same time. + spawn_concurrency: Option, + /// If enabled, will launch a task to monitor nodes' liveness and tear down the network if there are any. + #[serde(default = "default_as_true")] + tear_down_on_failure: bool, +} + +impl GlobalSettings { + /// External bootnode address. + pub fn bootnodes_addresses(&self) -> Vec<&Multiaddr> { + self.bootnodes_addresses.iter().collect() + } + + /// Global spawn timeout in seconds. + pub fn network_spawn_timeout(&self) -> Duration { + self.network_spawn_timeout + } + + /// Individual node spawn timeout in seconds. + pub fn node_spawn_timeout(&self) -> Duration { + self.node_spawn_timeout + } + + /// Local IP used to expose local services (including RPC, metrics and monitoring). + pub fn local_ip(&self) -> Option<&IpAddr> { + self.local_ip.as_ref() + } + + /// Base directory to use (instead a random tmp one) + /// All the artifacts will be created in this directory. + pub fn base_dir(&self) -> Option<&Path> { + self.base_dir.as_deref() + } + + /// Number of concurrent spawning process to launch + pub fn spawn_concurrency(&self) -> Option { + self.spawn_concurrency + } + + /// A flag to tear down the network if there are any unresponsive nodes detected. + pub fn tear_down_on_failure(&self) -> bool { + self.tear_down_on_failure + } +} + +impl Default for GlobalSettings { + fn default() -> Self { + Self { + bootnodes_addresses: Default::default(), + network_spawn_timeout: default_timeout(), + node_spawn_timeout: default_node_spawn_timeout(), + local_ip: Default::default(), + base_dir: Default::default(), + spawn_concurrency: Default::default(), + tear_down_on_failure: true, + } + } +} + +/// A global settings builder, used to build [`GlobalSettings`] declaratively with fields validation. +#[derive(Default)] +pub struct GlobalSettingsBuilder { + config: GlobalSettings, + errors: Vec, +} + +impl GlobalSettingsBuilder { + pub fn new() -> Self { + Self::default() + } + + // Transition to the next state of the builder. + fn transition(config: GlobalSettings, errors: Vec) -> Self { + Self { config, errors } + } + + /// Set the external bootnode address. + /// + /// Note: Bootnode address replacements are NOT supported here. + /// Only arguments (`args`) support dynamic replacements. Bootnode addresses must be a valid address. + pub fn with_raw_bootnodes_addresses(self, bootnodes_addresses: Vec) -> Self + where + T: TryInto + Display + Copy, + T::Error: Error + Send + Sync + 'static, + { + let mut addrs = vec![]; + let mut errors = vec![]; + + for (index, addr) in bootnodes_addresses.into_iter().enumerate() { + match addr.try_into() { + Ok(addr) => addrs.push(addr), + Err(error) => errors.push( + FieldError::BootnodesAddress(index, addr.to_string(), error.into()).into(), + ), + } + } + + Self::transition( + GlobalSettings { + bootnodes_addresses: addrs, + ..self.config + }, + merge_errors_vecs(self.errors, errors), + ) + } + + /// Set global spawn timeout in seconds. + pub fn with_network_spawn_timeout(self, timeout: Duration) -> Self { + Self::transition( + GlobalSettings { + network_spawn_timeout: timeout, + ..self.config + }, + self.errors, + ) + } + + /// Set individual node spawn timeout in seconds. + pub fn with_node_spawn_timeout(self, timeout: Duration) -> Self { + Self::transition( + GlobalSettings { + node_spawn_timeout: timeout, + ..self.config + }, + self.errors, + ) + } + + /// Set local IP used to expose local services (including RPC, metrics and monitoring). + pub fn with_local_ip(self, local_ip: &str) -> Self { + match IpAddr::from_str(local_ip) { + Ok(local_ip) => Self::transition( + GlobalSettings { + local_ip: Some(local_ip), + ..self.config + }, + self.errors, + ), + Err(error) => Self::transition( + self.config, + merge_errors(self.errors, FieldError::LocalIp(error.into()).into()), + ), + } + } + + /// Set the directory to use as base (instead of a random tmp one). + pub fn with_base_dir(self, base_dir: impl Into) -> Self { + Self::transition( + GlobalSettings { + base_dir: Some(base_dir.into()), + ..self.config + }, + self.errors, + ) + } + + /// Set the spawn concurrency + pub fn with_spawn_concurrency(self, spawn_concurrency: usize) -> Self { + Self::transition( + GlobalSettings { + spawn_concurrency: Some(spawn_concurrency), + ..self.config + }, + self.errors, + ) + } + + /// Set the `tear_down_on_failure` flag + pub fn with_tear_down_on_failure(self, tear_down_on_failure: bool) -> Self { + Self::transition( + GlobalSettings { + tear_down_on_failure, + ..self.config + }, + self.errors, + ) + } + + /// Seals the builder and returns a [`GlobalSettings`] if there are no validation errors, else returns errors. + pub fn build(self) -> Result> { + if !self.errors.is_empty() { + return Err(self + .errors + .into_iter() + .map(|error| ConfigError::GlobalSettings(error).into()) + .collect::>()); + } + + Ok(self.config) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn global_settings_config_builder_should_succeeds_and_returns_a_global_settings_config() { + let global_settings_config = GlobalSettingsBuilder::new() + .with_raw_bootnodes_addresses(vec![ + "/ip4/10.41.122.55/tcp/45421", + "/ip4/51.144.222.10/tcp/2333", + ]) + .with_network_spawn_timeout(600) + .with_node_spawn_timeout(120) + .with_local_ip("10.0.0.1") + .with_base_dir("/home/nonroot/mynetwork") + .with_spawn_concurrency(5) + .with_tear_down_on_failure(true) + .build() + .unwrap(); + + let bootnodes_addresses: Vec = vec![ + "/ip4/10.41.122.55/tcp/45421".try_into().unwrap(), + "/ip4/51.144.222.10/tcp/2333".try_into().unwrap(), + ]; + assert_eq!( + global_settings_config.bootnodes_addresses(), + bootnodes_addresses.iter().collect::>() + ); + assert_eq!(global_settings_config.network_spawn_timeout(), 600); + assert_eq!(global_settings_config.node_spawn_timeout(), 120); + assert_eq!( + global_settings_config + .local_ip() + .unwrap() + .to_string() + .as_str(), + "10.0.0.1" + ); + assert_eq!( + global_settings_config.base_dir().unwrap(), + Path::new("/home/nonroot/mynetwork") + ); + assert_eq!(global_settings_config.spawn_concurrency().unwrap(), 5); + assert!(global_settings_config.tear_down_on_failure()); + } + + #[test] + fn global_settings_config_builder_should_succeeds_when_node_spawn_timeout_is_missing() { + let global_settings_config = GlobalSettingsBuilder::new() + .with_raw_bootnodes_addresses(vec![ + "/ip4/10.41.122.55/tcp/45421", + "/ip4/51.144.222.10/tcp/2333", + ]) + .with_network_spawn_timeout(600) + .with_local_ip("10.0.0.1") + .build() + .unwrap(); + + let bootnodes_addresses: Vec = vec![ + "/ip4/10.41.122.55/tcp/45421".try_into().unwrap(), + "/ip4/51.144.222.10/tcp/2333".try_into().unwrap(), + ]; + assert_eq!( + global_settings_config.bootnodes_addresses(), + bootnodes_addresses.iter().collect::>() + ); + assert_eq!(global_settings_config.network_spawn_timeout(), 600); + assert_eq!(global_settings_config.node_spawn_timeout(), 600); + assert_eq!( + global_settings_config + .local_ip() + .unwrap() + .to_string() + .as_str(), + "10.0.0.1" + ); + } + + #[test] + fn global_settings_builder_should_fails_and_returns_an_error_if_one_bootnode_address_is_invalid( + ) { + let errors = GlobalSettingsBuilder::new() + .with_raw_bootnodes_addresses(vec!["/ip4//tcp/45421"]) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "global_settings.bootnodes_addresses[0]: '/ip4//tcp/45421' failed to parse: invalid IPv4 address syntax" + ); + } + + #[test] + fn global_settings_builder_should_fails_and_returns_multiple_errors_if_multiple_bootnodes_addresses_are_invalid( + ) { + let errors = GlobalSettingsBuilder::new() + .with_raw_bootnodes_addresses(vec!["/ip4//tcp/45421", "//10.42.153.10/tcp/43111"]) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 2); + assert_eq!( + errors.first().unwrap().to_string(), + "global_settings.bootnodes_addresses[0]: '/ip4//tcp/45421' failed to parse: invalid IPv4 address syntax" + ); + assert_eq!( + errors.get(1).unwrap().to_string(), + "global_settings.bootnodes_addresses[1]: '//10.42.153.10/tcp/43111' unknown protocol string: " + ); + } + + #[test] + fn global_settings_builder_should_fails_and_returns_an_error_if_local_ip_is_invalid() { + let errors = GlobalSettingsBuilder::new() + .with_local_ip("invalid") + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "global_settings.local_ip: invalid IP address syntax" + ); + } + + #[test] + fn global_settings_builder_should_fails_and_returns_multiple_errors_if_multiple_fields_are_invalid( + ) { + let errors = GlobalSettingsBuilder::new() + .with_raw_bootnodes_addresses(vec!["/ip4//tcp/45421", "//10.42.153.10/tcp/43111"]) + .with_local_ip("invalid") + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 3); + assert_eq!( + errors.first().unwrap().to_string(), + "global_settings.bootnodes_addresses[0]: '/ip4//tcp/45421' failed to parse: invalid IPv4 address syntax" + ); + assert_eq!( + errors.get(1).unwrap().to_string(), + "global_settings.bootnodes_addresses[1]: '//10.42.153.10/tcp/43111' unknown protocol string: " + ); + assert_eq!( + errors.get(2).unwrap().to_string(), + "global_settings.local_ip: invalid IP address syntax" + ); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/hrmp_channel.rs b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/hrmp_channel.rs new file mode 100644 index 00000000..228ff2ef --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/hrmp_channel.rs @@ -0,0 +1,137 @@ +use std::marker::PhantomData; + +use serde::{Deserialize, Serialize}; + +use crate::shared::{macros::states, types::ParaId}; + +/// HRMP channel configuration, with fine-grained configuration options. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct HrmpChannelConfig { + sender: ParaId, + recipient: ParaId, + max_capacity: u32, + max_message_size: u32, +} + +impl HrmpChannelConfig { + /// The sending parachain ID. + pub fn sender(&self) -> ParaId { + self.sender + } + + /// The receiving parachain ID. + pub fn recipient(&self) -> ParaId { + self.recipient + } + + /// The maximum capacity of messages in the channel. + pub fn max_capacity(&self) -> u32 { + self.max_capacity + } + + /// The maximum size of a message in the channel. + pub fn max_message_size(&self) -> u32 { + self.max_message_size + } +} + +states! { + Initial, + WithSender, + WithRecipient +} + +/// HRMP channel configuration builder, used to build an [`HrmpChannelConfig`] declaratively with fields validation. +pub struct HrmpChannelConfigBuilder { + config: HrmpChannelConfig, + _state: PhantomData, +} + +impl Default for HrmpChannelConfigBuilder { + fn default() -> Self { + Self { + config: HrmpChannelConfig { + sender: 0, + recipient: 0, + max_capacity: 8, + max_message_size: 512, + }, + _state: PhantomData, + } + } +} + +impl HrmpChannelConfigBuilder { + fn transition(&self, config: HrmpChannelConfig) -> HrmpChannelConfigBuilder { + HrmpChannelConfigBuilder { + config, + _state: PhantomData, + } + } +} + +impl HrmpChannelConfigBuilder { + pub fn new() -> Self { + Self::default() + } + + /// Set the sending parachain ID. + pub fn with_sender(self, sender: ParaId) -> HrmpChannelConfigBuilder { + self.transition(HrmpChannelConfig { + sender, + ..self.config + }) + } +} + +impl HrmpChannelConfigBuilder { + /// Set the receiving parachain ID. + pub fn with_recipient(self, recipient: ParaId) -> HrmpChannelConfigBuilder { + self.transition(HrmpChannelConfig { + recipient, + ..self.config + }) + } +} + +impl HrmpChannelConfigBuilder { + /// Set the max capacity of messages in the channel. + pub fn with_max_capacity(self, max_capacity: u32) -> Self { + self.transition(HrmpChannelConfig { + max_capacity, + ..self.config + }) + } + + /// Set the maximum size of a message in the channel. + pub fn with_max_message_size(self, max_message_size: u32) -> Self { + self.transition(HrmpChannelConfig { + max_message_size, + ..self.config + }) + } + + pub fn build(self) -> HrmpChannelConfig { + self.config + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn hrmp_channel_config_builder_should_build_a_new_hrmp_channel_config_correctly() { + let hrmp_channel_config = HrmpChannelConfigBuilder::new() + .with_sender(1000) + .with_recipient(2000) + .with_max_capacity(50) + .with_max_message_size(100) + .build(); + + assert_eq!(hrmp_channel_config.sender(), 1000); + assert_eq!(hrmp_channel_config.recipient(), 2000); + assert_eq!(hrmp_channel_config.max_capacity(), 50); + assert_eq!(hrmp_channel_config.max_message_size(), 100); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/lib.rs b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/lib.rs new file mode 100644 index 00000000..eff9ee31 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/lib.rs @@ -0,0 +1,102 @@ +//! This crate is used to create type safe configuration for Zombienet SDK using nested builders. +//! +//! +//! The main entry point of this crate is the [`NetworkConfigBuilder`] which is used to build a full network configuration +//! but all inner builders are also exposed to allow more granular control over the configuration. +//! +//! **Note**: Not all options can be checked at compile time and some will be checked at runtime when spawning a +//! network (e.g.: supported args for a specific node version). +//! +//! # Example +//! ``` +//! use zombienet_configuration::NetworkConfigBuilder; +//! +//! let simple_configuration = NetworkConfigBuilder::new() +//! .with_relaychain(|relaychain| { +//! relaychain +//! .with_chain("polkadot") +//! .with_random_nominators_count(10) +//! .with_default_resources(|resources| { +//! resources +//! .with_limit_cpu("1000m") +//! .with_request_memory("1Gi") +//! .with_request_cpu(100_000) +//! }) +//! .with_node(|node| { +//! node.with_name("node") +//! .with_command("command") +//! .validator(true) +//! }) +//! }) +//! .with_parachain(|parachain| { +//! parachain +//! .with_id(1000) +//! .with_chain("myparachain1") +//! .with_initial_balance(100_000) +//! .with_default_image("myimage:version") +//! .with_collator(|collator| { +//! collator +//! .with_name("collator1") +//! .with_command("command1") +//! .validator(true) +//! }) +//! }) +//! .with_parachain(|parachain| { +//! parachain +//! .with_id(2000) +//! .with_chain("myparachain2") +//! .with_initial_balance(50_0000) +//! .with_collator(|collator| { +//! collator +//! .with_name("collator2") +//! .with_command("command2") +//! .validator(true) +//! }) +//! }) +//! .with_hrmp_channel(|hrmp_channel1| { +//! hrmp_channel1 +//! .with_sender(1) +//! .with_recipient(2) +//! .with_max_capacity(200) +//! .with_max_message_size(500) +//! }) +//! .with_hrmp_channel(|hrmp_channel2| { +//! hrmp_channel2 +//! .with_sender(2) +//! .with_recipient(1) +//! .with_max_capacity(100) +//! .with_max_message_size(250) +//! }) +//! .with_global_settings(|global_settings| { +//! global_settings +//! .with_network_spawn_timeout(1200) +//! .with_node_spawn_timeout(240) +//! }) +//! .build(); +//! +//! assert!(simple_configuration.is_ok()) +//! ``` + +#![allow(clippy::expect_fun_call)] +mod global_settings; +mod hrmp_channel; +mod network; +mod relaychain; +pub mod shared; +mod teyrchain; +mod utils; + +pub use global_settings::{GlobalSettings, GlobalSettingsBuilder}; +pub use hrmp_channel::{HrmpChannelConfig, HrmpChannelConfigBuilder}; +pub use network::{NetworkConfig, NetworkConfigBuilder, WithRelaychain}; +pub use relaychain::{RelaychainConfig, RelaychainConfigBuilder}; +// re-export shared +pub use shared::{node::NodeConfig, types}; +pub use teyrchain::{ + states as para_states, RegistrationStrategy, TeyrchainConfig, TeyrchainConfigBuilder, +}; + +// Backward compatibility aliases for external crates that use Polkadot SDK terminology +// These allow zombienet-orchestrator and other external crates to work with our renamed types +pub type ParachainConfig = TeyrchainConfig; +pub type ParachainConfigBuilder = TeyrchainConfigBuilder; diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/network.rs b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/network.rs new file mode 100644 index 00000000..115d4866 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/network.rs @@ -0,0 +1,1923 @@ +use std::{cell::RefCell, collections::HashSet, fs, marker::PhantomData, rc::Rc}; + +use anyhow::anyhow; +use regex::Regex; +use serde::{Deserialize, Serialize}; +use support::{ + constants::{ + NO_ERR_DEF_BUILDER, RELAY_NOT_NONE, RW_FAILED, THIS_IS_A_BUG, VALIDATION_CHECK, VALID_REGEX, + }, + replacer::apply_env_replacements, +}; +use tracing::trace; + +use crate::{ + global_settings::{GlobalSettings, GlobalSettingsBuilder}, + hrmp_channel::{self, HrmpChannelConfig, HrmpChannelConfigBuilder}, + relaychain::{self, RelaychainConfig, RelaychainConfigBuilder}, + shared::{ + errors::{ConfigError, ValidationError}, + helpers::{generate_unique_node_name_from_names, merge_errors, merge_errors_vecs}, + macros::states, + node::{GroupNodeConfig, NodeConfig}, + types::{Arg, AssetLocation, Chain, Command, Image, ValidationContext}, + }, + teyrchain::{self, TeyrchainConfig, TeyrchainConfigBuilder}, + types::ParaId, + RegistrationStrategy, +}; + +/// A network configuration, composed of a relaychain, teyrchains and HRMP channels. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct NetworkConfig { + #[serde(rename = "settings", default = "GlobalSettings::default")] + global_settings: GlobalSettings, + relaychain: Option, + #[serde(skip_serializing_if = "std::vec::Vec::is_empty", default)] + teyrchains: Vec, + #[serde(skip_serializing_if = "std::vec::Vec::is_empty", default)] + hrmp_channels: Vec, +} + +impl NetworkConfig { + /// The global settings of the network. + pub fn global_settings(&self) -> &GlobalSettings { + &self.global_settings + } + + /// The relay chain of the network. + pub fn relaychain(&self) -> &RelaychainConfig { + self.relaychain + .as_ref() + .expect(&format!("{RELAY_NOT_NONE}, {THIS_IS_A_BUG}")) + } + + /// The teyrchains of the network. + pub fn teyrchains(&self) -> Vec<&TeyrchainConfig> { + self.teyrchains.iter().collect::>() + } + + /// Backward compatibility alias for teyrchains(). + pub fn parachains(&self) -> Vec<&TeyrchainConfig> { + self.teyrchains() + } + + /// The HRMP channels of the network. + pub fn hrmp_channels(&self) -> Vec<&HrmpChannelConfig> { + self.hrmp_channels.iter().collect::>() + } + + fn set_teyrchains(&mut self, teyrchains: Vec) { + self.teyrchains = teyrchains; + } + + /// A helper function to dump the network configuration to a TOML string. + pub fn dump_to_toml(&self) -> Result { + // This regex is used to replace the "" enclosed u128 value to a raw u128 because u128 is not supported for TOML serialization/deserialization. + let re = Regex::new(r#""U128%(?\d+)""#) + .expect(&format!("{VALID_REGEX} {THIS_IS_A_BUG}")); + let toml_string = toml::to_string_pretty(&self)?; + + Ok(re.replace_all(&toml_string, "$u128_value").to_string()) + } + + /// A helper function to load a network configuration from a TOML file. + pub fn load_from_toml_with_settings( + path: &str, + settings: &GlobalSettings, + ) -> Result { + let mut network_config = NetworkConfig::load_from_toml(path)?; + network_config.global_settings = settings.clone(); + Ok(network_config) + } + + /// A helper function to load a network configuration from a TOML file. + pub fn load_from_toml(path: &str) -> Result { + let file_str = fs::read_to_string(path).expect(&format!("{RW_FAILED} {THIS_IS_A_BUG}")); + let re: Regex = Regex::new(r"(?(initial_)?balance)\s+=\s+(?\d+)") + .expect(&format!("{VALID_REGEX} {THIS_IS_A_BUG}")); + + let toml_text = re.replace_all(&file_str, "$field_name = \"$u128_value\""); + trace!("toml text to parse: {}", toml_text); + // apply replacements from env in toml + let toml_text = apply_env_replacements(&toml_text); + trace!("toml text after replacements: {}", toml_text); + let mut network_config: NetworkConfig = toml::from_str(&toml_text)?; + trace!("parsed config {network_config:#?}"); + + // All unwraps below are safe, because we ensure that the relaychain is not None at this point + if network_config.relaychain.is_none() { + Err(anyhow!("Relay chain does not exist."))? + } + + // retrieve the defaults relaychain for assigning to nodes if needed + let mut relaychain_default_command: Option = + network_config.relaychain().default_command().cloned(); + + if relaychain_default_command.is_none() { + relaychain_default_command = network_config.relaychain().command().cloned(); + } + let relaychain_default_image: Option = + network_config.relaychain().default_image().cloned(); + + let relaychain_default_db_snapshot: Option = + network_config.relaychain().default_db_snapshot().cloned(); + + let default_args: Vec = network_config + .relaychain() + .default_args() + .into_iter() + .cloned() + .collect(); + + let mut nodes: Vec = network_config + .relaychain() + .nodes() + .into_iter() + .cloned() + .collect(); + + let group_nodes: Vec = network_config + .relaychain() + .group_node_configs() + .into_iter() + .cloned() + .collect(); + + if let Some(group) = group_nodes.iter().find(|n| n.count == 0) { + return Err(anyhow!( + "Group node '{}' must have a count greater than 0.", + group.base_config.name() + )); + } + + let mut teyrchains: Vec = + network_config.teyrchains().into_iter().cloned().collect(); + + // Validation checks for relay + TryInto::::try_into(network_config.relaychain().chain().as_str())?; + if relaychain_default_image.is_some() { + TryInto::::try_into(relaychain_default_image.clone().expect(VALIDATION_CHECK))?; + } + if relaychain_default_command.is_some() { + TryInto::::try_into( + relaychain_default_command.clone().expect(VALIDATION_CHECK), + )?; + } + + // Keep track of node names to ensure uniqueness + let mut names = HashSet::new(); + + for node in nodes.iter_mut() { + if relaychain_default_command.is_some() { + // we modify only nodes which don't already have a command + if node.command.is_none() { + node.command.clone_from(&relaychain_default_command); + } + } + + if relaychain_default_image.is_some() && node.image.is_none() { + node.image.clone_from(&relaychain_default_image); + } + + if relaychain_default_db_snapshot.is_some() && node.db_snapshot.is_none() { + node.db_snapshot.clone_from(&relaychain_default_db_snapshot); + } + + if !default_args.is_empty() && node.args().is_empty() { + node.set_args(default_args.clone()); + } + + let unique_name = generate_unique_node_name_from_names(node.name(), &mut names); + node.name = unique_name; + } + + for para in teyrchains.iter_mut() { + // retrieve the defaults parachain for assigning to collators if needed + let teyrchain_default_command: Option = para.default_command().cloned(); + + let teyrchain_default_image: Option = para.default_image().cloned(); + + let teyrchain_default_db_snapshot: Option = + para.default_db_snapshot().cloned(); + + let default_args: Vec = para.default_args().into_iter().cloned().collect(); + + let group_collators: Vec = para + .group_collators_configs() + .into_iter() + .cloned() + .collect(); + + if let Some(group) = group_collators.iter().find(|n| n.count == 0) { + return Err(anyhow!( + "Group node '{}' must have a count greater than 0.", + group.base_config.name() + )); + } + + let mut collators: Vec = para.collators.clone(); + + for collator in collators.iter_mut() { + populate_collator_with_defaults( + collator, + &teyrchain_default_command, + &teyrchain_default_image, + &teyrchain_default_db_snapshot, + &default_args, + ); + let unique_name = generate_unique_node_name_from_names(collator.name(), &mut names); + collator.name = unique_name; + } + + para.collators = collators; + + if para.collator.is_some() { + let mut collator = para.collator.clone().unwrap(); + populate_collator_with_defaults( + &mut collator, + &teyrchain_default_command, + &teyrchain_default_image, + &teyrchain_default_db_snapshot, + &default_args, + ); + let unique_name = generate_unique_node_name_from_names(collator.name(), &mut names); + collator.name = unique_name; + para.collator = Some(collator); + } + } + + network_config + .relaychain + .as_mut() + .expect(&format!("{NO_ERR_DEF_BUILDER}, {THIS_IS_A_BUG}")) + .set_nodes(nodes); + + network_config.set_teyrchains(teyrchains); + + // Validation checks for teyrchains + network_config.teyrchains().iter().for_each(|parachain| { + if parachain.default_image().is_some() { + let _ = TryInto::::try_into(parachain.default_image().unwrap().as_str()); + } + if parachain.default_command().is_some() { + let _ = TryInto::::try_into(parachain.default_command().unwrap().as_str()); + } + }); + Ok(network_config) + } +} + +fn populate_collator_with_defaults( + collator: &mut NodeConfig, + teyrchain_default_command: &Option, + teyrchain_default_image: &Option, + teyrchain_default_db_snapshot: &Option, + default_args: &[Arg], +) { + if teyrchain_default_command.is_some() { + // we modify only nodes which don't already have a command + if collator.command.is_none() { + collator.command.clone_from(teyrchain_default_command); + } + } + + if teyrchain_default_image.is_some() && collator.image.is_none() { + collator.image.clone_from(teyrchain_default_image); + } + + if teyrchain_default_db_snapshot.is_some() && collator.db_snapshot.is_none() { + collator + .db_snapshot + .clone_from(teyrchain_default_db_snapshot); + } + + if !default_args.is_empty() && collator.args().is_empty() { + collator.set_args(default_args.to_owned()); + } +} + +states! { + Initial, + WithRelaychain +} + +/// A network configuration builder, used to build a [`NetworkConfig`] declaratively with fields validation. +/// +/// # Example: +/// +/// ``` +/// use zombienet_configuration::NetworkConfigBuilder; +/// +/// let network_config = NetworkConfigBuilder::new() +/// .with_relaychain(|relaychain| { +/// relaychain +/// .with_chain("polkadot") +/// .with_random_nominators_count(10) +/// .with_default_resources(|resources| { +/// resources +/// .with_limit_cpu("1000m") +/// .with_request_memory("1Gi") +/// .with_request_cpu(100_000) +/// }) +/// .with_node(|node| { +/// node.with_name("node") +/// .with_command("command") +/// .validator(true) +/// }) +/// }) +/// .with_parachain(|parachain| { +/// parachain +/// .with_id(1000) +/// .with_chain("myparachain1") +/// .with_initial_balance(100_000) +/// .with_default_image("myimage:version") +/// .with_collator(|collator| { +/// collator +/// .with_name("collator1") +/// .with_command("command1") +/// .validator(true) +/// }) +/// }) +/// .with_parachain(|parachain| { +/// parachain +/// .with_id(2000) +/// .with_chain("myparachain2") +/// .with_initial_balance(50_0000) +/// .with_collator(|collator| { +/// collator +/// .with_name("collator2") +/// .with_command("command2") +/// .validator(true) +/// }) +/// }) +/// .with_hrmp_channel(|hrmp_channel1| { +/// hrmp_channel1 +/// .with_sender(1) +/// .with_recipient(2) +/// .with_max_capacity(200) +/// .with_max_message_size(500) +/// }) +/// .with_hrmp_channel(|hrmp_channel2| { +/// hrmp_channel2 +/// .with_sender(2) +/// .with_recipient(1) +/// .with_max_capacity(100) +/// .with_max_message_size(250) +/// }) +/// .with_global_settings(|global_settings| { +/// global_settings +/// .with_network_spawn_timeout(1200) +/// .with_node_spawn_timeout(240) +/// }) +/// .build(); +/// +/// assert!(network_config.is_ok()) +/// ``` +pub struct NetworkConfigBuilder { + config: NetworkConfig, + validation_context: Rc>, + errors: Vec, + _state: PhantomData, +} + +impl Default for NetworkConfigBuilder { + fn default() -> Self { + Self { + config: NetworkConfig { + global_settings: GlobalSettingsBuilder::new() + .build() + .expect(&format!("{NO_ERR_DEF_BUILDER}, {THIS_IS_A_BUG}")), + relaychain: None, + teyrchains: vec![], + hrmp_channels: vec![], + }, + validation_context: Default::default(), + errors: vec![], + _state: PhantomData, + } + } +} + +impl NetworkConfigBuilder { + fn transition( + config: NetworkConfig, + validation_context: Rc>, + errors: Vec, + ) -> NetworkConfigBuilder { + NetworkConfigBuilder { + config, + errors, + validation_context, + _state: PhantomData, + } + } +} + +impl NetworkConfigBuilder { + pub fn new() -> NetworkConfigBuilder { + Self::default() + } + + /// uses the default options for both the relay chain and the validator nodes + /// the only required fields are the name of the validator nodes, + /// and the name of the relay chain ("rococo-local", "polkadot", etc.) + pub fn with_chain_and_nodes( + relay_name: &str, + node_names: Vec, + ) -> NetworkConfigBuilder { + let network_config = NetworkConfigBuilder::new().with_relaychain(|relaychain| { + let mut relaychain_with_node = + relaychain.with_chain(relay_name).with_validator(|node| { + node.with_name(node_names.first().unwrap_or(&"".to_string())) + }); + + for node_name in node_names.iter().skip(1) { + relaychain_with_node = relaychain_with_node + .with_validator(|node_builder| node_builder.with_name(node_name)); + } + relaychain_with_node + }); + + Self::transition( + network_config.config, + network_config.validation_context, + network_config.errors, + ) + } + + /// Set the relay chain using a nested [`RelaychainConfigBuilder`]. + pub fn with_relaychain( + self, + f: impl FnOnce( + RelaychainConfigBuilder, + ) -> RelaychainConfigBuilder, + ) -> NetworkConfigBuilder { + match f(RelaychainConfigBuilder::new( + self.validation_context.clone(), + )) + .build() + { + Ok(relaychain) => Self::transition( + NetworkConfig { + relaychain: Some(relaychain), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(errors) => Self::transition(self.config, self.validation_context, errors), + } + } +} + +impl NetworkConfigBuilder { + /// Set the global settings using a nested [`GlobalSettingsBuilder`]. + pub fn with_global_settings( + self, + f: impl FnOnce(GlobalSettingsBuilder) -> GlobalSettingsBuilder, + ) -> Self { + match f(GlobalSettingsBuilder::new()).build() { + Ok(global_settings) => Self::transition( + NetworkConfig { + global_settings, + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(errors) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs(self.errors, errors), + ), + } + } + + /// Add a teyrchain using a nested [`TeyrchainConfigBuilder`]. + pub fn with_teyrchain( + self, + f: impl FnOnce( + TeyrchainConfigBuilder, + ) -> TeyrchainConfigBuilder< + teyrchain::states::WithAtLeastOneCollator, + teyrchain::states::Bootstrap, + >, + ) -> Self { + match f(TeyrchainConfigBuilder::new(self.validation_context.clone())).build() { + Ok(teyrchain) => Self::transition( + NetworkConfig { + teyrchains: [self.config.teyrchains, vec![teyrchain]].concat(), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(errors) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs(self.errors, errors), + ), + } + } + + /// Backward compatibility alias for with_teyrchain(). + pub fn with_parachain( + self, + f: impl FnOnce( + TeyrchainConfigBuilder, + ) -> TeyrchainConfigBuilder< + teyrchain::states::WithAtLeastOneCollator, + teyrchain::states::Bootstrap, + >, + ) -> Self { + self.with_teyrchain(f) + } + + /// uses default settings for setting for: + /// - the parachain, + /// - the global settings + /// - the hrmp channels + /// + /// the only required parameters are the names of the collators as a vector, + /// and the id of the parachain + pub fn with_parachain_id_and_collators(self, id: u32, collator_names: Vec) -> Self { + if collator_names.is_empty() { + return Self::transition( + self.config, + self.validation_context, + merge_errors( + self.errors, + ConfigError::Teyrchain(id, ValidationError::CantBeEmpty().into()).into(), + ), + ); + } + + self.with_parachain(|parachain| { + let mut parachain_config = parachain.with_id(id).with_collator(|collator| { + collator + .with_name(collator_names.first().unwrap_or(&"".to_string())) + .validator(true) + }); + + for collator_name in collator_names.iter().skip(1) { + parachain_config = parachain_config + .with_collator(|collator| collator.with_name(collator_name).validator(true)); + } + parachain_config + }) + + // TODO: if need to set global settings and hrmp channels + // we can also do in here + } + + /// Add an HRMP channel using a nested [`HrmpChannelConfigBuilder`]. + pub fn with_hrmp_channel( + self, + f: impl FnOnce( + HrmpChannelConfigBuilder, + ) -> HrmpChannelConfigBuilder, + ) -> Self { + let new_hrmp_channel = f(HrmpChannelConfigBuilder::new()).build(); + + Self::transition( + NetworkConfig { + hrmp_channels: [self.config.hrmp_channels, vec![new_hrmp_channel]].concat(), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Seals the builder and returns a [`NetworkConfig`] if there are no validation errors, else returns errors. + pub fn build(self) -> Result> { + let mut paras_to_register: HashSet = Default::default(); + let mut errs: Vec = self + .config + .teyrchains + .iter() + .filter_map(|para| { + if let Some(RegistrationStrategy::Manual) = para.registration_strategy() { + return None; + }; + + if paras_to_register.insert(para.id()) { + None + } else { + // already in the set + Some(anyhow!( + "ParaId {} already set to be registered, only one should be.", + para.id() + )) + } + }) + .collect(); + + if !self.errors.is_empty() || !errs.is_empty() { + let mut ret_errs = self.errors; + ret_errs.append(&mut errs); + return Err(ret_errs); + } + + Ok(self.config) + } +} + +#[cfg(test)] +mod tests { + use std::path::PathBuf; + + use super::*; + use crate::teyrchain::RegistrationStrategy; + + #[test] + fn network_config_builder_should_succeeds_and_returns_a_network_config() { + let network_config = NetworkConfigBuilder::new() + .with_relaychain(|relaychain| { + relaychain + .with_chain("polkadot") + .with_random_nominators_count(10) + .with_validator(|node| node.with_name("node").with_command("command")) + }) + .with_parachain(|parachain| { + parachain + .with_id(1) + .with_chain("myparachain1") + .with_initial_balance(100_000) + .with_collator(|collator| { + collator + .with_name("collator1") + .with_command("command1") + .validator(true) + }) + }) + .with_parachain(|parachain| { + parachain + .with_id(2) + .with_chain("myparachain2") + .with_initial_balance(0) + .with_collator(|collator| { + collator + .with_name("collator2") + .with_command("command2") + .validator(true) + }) + }) + .with_hrmp_channel(|hrmp_channel1| { + hrmp_channel1 + .with_sender(1) + .with_recipient(2) + .with_max_capacity(200) + .with_max_message_size(500) + }) + .with_hrmp_channel(|hrmp_channel2| { + hrmp_channel2 + .with_sender(2) + .with_recipient(1) + .with_max_capacity(100) + .with_max_message_size(250) + }) + .with_global_settings(|global_settings| { + global_settings + .with_network_spawn_timeout(1200) + .with_node_spawn_timeout(240) + }) + .build() + .unwrap(); + + // relaychain + assert_eq!(network_config.relaychain().chain().as_str(), "polkadot"); + assert_eq!(network_config.relaychain().nodes().len(), 1); + let &node = network_config.relaychain().nodes().first().unwrap(); + assert_eq!(node.name(), "node"); + assert_eq!(node.command().unwrap().as_str(), "command"); + assert!(node.is_validator()); + assert_eq!( + network_config + .relaychain() + .random_nominators_count() + .unwrap(), + 10 + ); + + // teyrchains + assert_eq!(network_config.teyrchains().len(), 2); + + // parachain1 + let ¶chain1 = network_config.teyrchains().first().unwrap(); + assert_eq!(parachain1.id(), 1); + assert_eq!(parachain1.collators().len(), 1); + let &collator = parachain1.collators().first().unwrap(); + assert_eq!(collator.name(), "collator1"); + assert_eq!(collator.command().unwrap().as_str(), "command1"); + assert!(collator.is_validator()); + assert_eq!(parachain1.initial_balance(), 100_000); + assert_eq!(parachain1.unique_id(), "1"); + + // parachain2 + let ¶chain2 = network_config.teyrchains().last().unwrap(); + assert_eq!(parachain2.id(), 2); + assert_eq!(parachain2.collators().len(), 1); + let &collator = parachain2.collators().first().unwrap(); + assert_eq!(collator.name(), "collator2"); + assert_eq!(collator.command().unwrap().as_str(), "command2"); + assert!(collator.is_validator()); + assert_eq!(parachain2.initial_balance(), 0); + + // hrmp_channels + assert_eq!(network_config.hrmp_channels().len(), 2); + + // hrmp_channel1 + let &hrmp_channel1 = network_config.hrmp_channels().first().unwrap(); + assert_eq!(hrmp_channel1.sender(), 1); + assert_eq!(hrmp_channel1.recipient(), 2); + assert_eq!(hrmp_channel1.max_capacity(), 200); + assert_eq!(hrmp_channel1.max_message_size(), 500); + + // hrmp_channel2 + let &hrmp_channel2 = network_config.hrmp_channels().last().unwrap(); + assert_eq!(hrmp_channel2.sender(), 2); + assert_eq!(hrmp_channel2.recipient(), 1); + assert_eq!(hrmp_channel2.max_capacity(), 100); + assert_eq!(hrmp_channel2.max_message_size(), 250); + + // global settings + assert_eq!( + network_config.global_settings().network_spawn_timeout(), + 1200 + ); + assert_eq!(network_config.global_settings().node_spawn_timeout(), 240); + assert!(network_config.global_settings().tear_down_on_failure()); + } + + #[test] + fn network_config_builder_should_fails_and_returns_multiple_errors_if_relaychain_is_invalid() { + let errors = NetworkConfigBuilder::new() + .with_relaychain(|relaychain| { + relaychain + .with_chain("polkadot") + .with_random_nominators_count(10) + .with_default_image("invalid.image") + .with_validator(|node| node.with_name("node").with_command("invalid command")) + }) + .with_parachain(|parachain| { + parachain + .with_id(1) + .with_chain("myparachain") + .with_initial_balance(100_000) + .with_collator(|collator| { + collator + .with_name("collator1") + .with_command("command1") + .validator(true) + }) + }) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 2); + assert_eq!( + errors.first().unwrap().to_string(), + "relaychain.default_image: 'invalid.image' doesn't match regex '^([ip]|[hostname]/)?[tag_name]:[tag_version]?$'" + ); + assert_eq!( + errors.get(1).unwrap().to_string(), + "relaychain.nodes['node'].command: 'invalid command' shouldn't contains whitespace" + ); + } + + #[test] + fn network_config_builder_should_fails_and_returns_multiple_errors_if_parachain_is_invalid() { + let errors = NetworkConfigBuilder::new() + .with_relaychain(|relaychain| { + relaychain + .with_chain("polkadot") + .with_random_nominators_count(10) + .with_validator(|node| node.with_name("node").with_command("command")) + }) + .with_parachain(|parachain| { + parachain + .with_id(1000) + .with_chain("myparachain") + .with_initial_balance(100_000) + .with_collator(|collator| { + collator + .with_name("collator1") + .with_command("invalid command") + .with_image("invalid.image") + .validator(true) + }) + }) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 2); + assert_eq!( + errors.first().unwrap().to_string(), + "teyrchain[1000].collators['collator1'].command: 'invalid command' shouldn't contains whitespace" + ); + assert_eq!( + errors.get(1).unwrap().to_string(), + "teyrchain[1000].collators['collator1'].image: 'invalid.image' doesn't match regex '^([ip]|[hostname]/)?[tag_name]:[tag_version]?$'" + ); + } + + #[test] + fn network_config_builder_should_fails_and_returns_multiple_errors_if_multiple_teyrchains_are_invalid( + ) { + let errors = NetworkConfigBuilder::new() + .with_relaychain(|relaychain| { + relaychain + .with_chain("polkadot") + .with_random_nominators_count(10) + .with_validator(|node| node.with_name("node").with_command("command")) + }) + .with_parachain(|parachain| { + parachain + .with_id(1000) + .with_chain("myparachain1") + .with_initial_balance(100_000) + .with_collator(|collator| { + collator + .with_name("collator1") + .with_command("invalid command") + }) + }) + .with_parachain(|parachain| { + parachain + .with_id(2000) + .with_chain("myparachain2") + .with_initial_balance(100_000) + .with_collator(|collator| { + collator + .with_name("collator2") + .validator(true) + .with_resources(|resources| { + resources + .with_limit_cpu("1000m") + .with_request_memory("1Gi") + .with_request_cpu("invalid") + }) + }) + }) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 2); + assert_eq!( + errors.first().unwrap().to_string(), + "teyrchain[1000].collators['collator1'].command: 'invalid command' shouldn't contains whitespace" + ); + assert_eq!( + errors.get(1).unwrap().to_string(), + "teyrchain[2000].collators['collator2'].resources.request_cpu: 'invalid' doesn't match regex '^\\d+(.\\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'" + ); + } + + #[test] + fn network_config_builder_should_fails_and_returns_multiple_errors_if_global_settings_is_invalid( + ) { + let errors = NetworkConfigBuilder::new() + .with_relaychain(|relaychain| { + relaychain + .with_chain("polkadot") + .with_random_nominators_count(10) + .with_validator(|node| node.with_name("node").with_command("command")) + }) + .with_parachain(|parachain| { + parachain + .with_id(1000) + .with_chain("myparachain") + .with_initial_balance(100_000) + .with_collator(|collator| { + collator + .with_name("collator") + .with_command("command") + .validator(true) + }) + }) + .with_global_settings(|global_settings| { + global_settings + .with_local_ip("127.0.0000.1") + .with_raw_bootnodes_addresses(vec!["/ip4//tcp/45421"]) + }) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 2); + assert_eq!( + errors.first().unwrap().to_string(), + "global_settings.local_ip: invalid IP address syntax" + ); + assert_eq!( + errors.get(1).unwrap().to_string(), + "global_settings.bootnodes_addresses[0]: '/ip4//tcp/45421' failed to parse: invalid IPv4 address syntax" + ); + } + + #[test] + fn network_config_builder_should_fails_and_returns_multiple_errors_if_multiple_fields_are_invalid( + ) { + let errors = NetworkConfigBuilder::new() + .with_relaychain(|relaychain| { + relaychain + .with_chain("polkadot") + .with_random_nominators_count(10) + .with_validator(|node| node.with_name("node").with_command("invalid command")) + }) + .with_parachain(|parachain| { + parachain + .with_id(1000) + .with_chain("myparachain") + .with_initial_balance(100_000) + .with_collator(|collator| { + collator + .with_name("collator") + .with_command("command") + .with_image("invalid.image") + }) + }) + .with_global_settings(|global_settings| global_settings.with_local_ip("127.0.0000.1")) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 3); + assert_eq!( + errors.first().unwrap().to_string(), + "relaychain.nodes['node'].command: 'invalid command' shouldn't contains whitespace" + ); + assert_eq!( + errors.get(1).unwrap().to_string(), + "teyrchain[1000].collators['collator'].image: 'invalid.image' doesn't match regex '^([ip]|[hostname]/)?[tag_name]:[tag_version]?$'" + ); + assert_eq!( + errors.get(2).unwrap().to_string(), + "global_settings.local_ip: invalid IP address syntax" + ); + } + + #[test] + fn network_config_should_be_dumpable_to_a_toml_config_for_a_small_network() { + let network_config = NetworkConfigBuilder::new() + .with_relaychain(|relaychain| { + relaychain + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image("docker.io/parity/polkadot:latest") + .with_default_args(vec![("-lparachain", "debug").into()]) + .with_validator(|node| node.with_name("alice")) + .with_validator(|node| { + node.with_name("bob") + .invulnerable(false) + .bootnode(true) + .with_args(vec![("--database", "paritydb-experimental").into()]) + }) + }) + .build() + .unwrap(); + + let got = network_config.dump_to_toml().unwrap(); + let expected = fs::read_to_string("./testing/snapshots/0000-small-network.toml").unwrap(); + assert_eq!(got, expected); + } + + #[test] + fn network_config_should_be_dumpable_to_a_toml_config_for_a_big_network() { + let network_config = NetworkConfigBuilder::new() + .with_relaychain(|relaychain| { + relaychain + .with_chain("polkadot") + .with_default_command("polkadot") + .with_default_image("docker.io/parity/polkadot:latest") + .with_default_resources(|resources| { + resources + .with_request_cpu(100000) + .with_request_memory("500M") + .with_limit_cpu("10Gi") + .with_limit_memory("4000M") + }) + .with_validator(|node| { + node.with_name("alice") + .with_initial_balance(1_000_000_000) + .bootnode(true) + .invulnerable(true) + }) + .with_validator(|node| node.with_name("bob").invulnerable(true).bootnode(true)) + }) + .with_parachain(|parachain| { + parachain + .with_id(1000) + .with_chain("myparachain") + .with_chain_spec_path("/path/to/my/chain/spec.json") + .with_registration_strategy(RegistrationStrategy::UsingExtrinsic) + .onboard_as_parachain(false) + .with_default_db_snapshot("https://storage.com/path/to/db_snapshot.tgz") + .with_collator(|collator| { + collator + .with_name("john") + .bootnode(true) + .invulnerable(true) + .with_initial_balance(5_000_000_000) + }) + .with_fullnode(|collator| { + collator + .with_name("charles") + .bootnode(true) + .invulnerable(true) + .with_initial_balance(0) + }) + .with_collator(|collator| { + collator + .with_name("frank") + .invulnerable(false) + .bootnode(true) + .with_initial_balance(1_000_000_000) + }) + }) + .with_parachain(|parachain| { + parachain + .with_id(2000) + .with_chain("myotherparachain") + .with_chain_spec_path("/path/to/my/other/chain/spec.json") + .with_collator(|collator| { + collator + .with_name("mike") + .bootnode(true) + .invulnerable(true) + .with_initial_balance(5_000_000_000) + }) + .with_fullnode(|collator| { + collator + .with_name("georges") + .bootnode(true) + .invulnerable(true) + .with_initial_balance(0) + }) + .with_collator(|collator| { + collator + .with_name("victor") + .invulnerable(false) + .bootnode(true) + .with_initial_balance(1_000_000_000) + }) + }) + .with_hrmp_channel(|hrmp_channel| { + hrmp_channel + .with_sender(1000) + .with_recipient(2000) + .with_max_capacity(150) + .with_max_message_size(5000) + }) + .with_hrmp_channel(|hrmp_channel| { + hrmp_channel + .with_sender(2000) + .with_recipient(1000) + .with_max_capacity(200) + .with_max_message_size(8000) + }) + .build() + .unwrap(); + + let got = network_config.dump_to_toml().unwrap(); + let expected = fs::read_to_string("./testing/snapshots/0001-big-network.toml").unwrap(); + assert_eq!(got, expected); + } + + #[test] + fn network_config_builder_should_be_dumplable_to_a_toml_config_a_overrides_default_correctly() { + let network_config = NetworkConfigBuilder::new() + .with_relaychain(|relaychain| { + relaychain + .with_chain("polkadot") + .with_default_command("polkadot") + .with_default_image("docker.io/parity/polkadot:latest") + .with_default_args(vec![("-name", "value").into(), "--flag".into()]) + .with_default_db_snapshot("https://storage.com/path/to/db_snapshot.tgz") + .with_default_resources(|resources| { + resources + .with_request_cpu(100000) + .with_request_memory("500M") + .with_limit_cpu("10Gi") + .with_limit_memory("4000M") + }) + .with_validator(|node| { + node.with_name("alice") + .with_initial_balance(1_000_000_000) + .bootnode(true) + .invulnerable(true) + }) + .with_validator(|node| { + node.with_name("bob") + .invulnerable(true) + .bootnode(true) + .with_image("mycustomimage:latest") + .with_command("my-custom-command") + .with_db_snapshot("https://storage.com/path/to/other/db_snapshot.tgz") + .with_resources(|resources| { + resources + .with_request_cpu(1000) + .with_request_memory("250Mi") + .with_limit_cpu("5Gi") + .with_limit_memory("2Gi") + }) + .with_args(vec![("-myothername", "value").into()]) + }) + }) + .with_parachain(|parachain| { + parachain + .with_id(1000) + .with_chain("myparachain") + .with_chain_spec_path("/path/to/my/chain/spec.json") + .with_default_db_snapshot("https://storage.com/path/to/other_snapshot.tgz") + .with_default_command("my-default-command") + .with_default_image("mydefaultimage:latest") + .with_collator(|collator| { + collator + .with_name("john") + .bootnode(true) + .invulnerable(true) + .with_initial_balance(5_000_000_000) + .with_command("my-non-default-command") + .with_image("anotherimage:latest") + }) + .with_fullnode(|collator| { + collator + .with_name("charles") + .bootnode(true) + .invulnerable(true) + .with_initial_balance(0) + }) + }) + .build() + .unwrap(); + + let got = network_config.dump_to_toml().unwrap(); + let expected = + fs::read_to_string("./testing/snapshots/0002-overridden-defaults.toml").unwrap(); + assert_eq!(got, expected); + } + + #[test] + fn the_toml_config_with_custom_settings() { + let settings = GlobalSettingsBuilder::new() + .with_base_dir("/tmp/test-demo") + .build() + .unwrap(); + + let load_from_toml = NetworkConfig::load_from_toml_with_settings( + "./testing/snapshots/0000-small-network.toml", + &settings, + ) + .unwrap(); + + assert_eq!( + Some(PathBuf::from("/tmp/test-demo").as_path()), + load_from_toml.global_settings.base_dir() + ); + } + + #[test] + fn the_toml_config_should_be_imported_and_match_a_network() { + let load_from_toml = + NetworkConfig::load_from_toml("./testing/snapshots/0000-small-network.toml").unwrap(); + + let expected = NetworkConfigBuilder::new() + .with_relaychain(|relaychain| { + relaychain + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image("docker.io/parity/polkadot:latest") + .with_default_args(vec![("-lparachain=debug").into()]) + .with_validator(|node| { + node.with_name("alice") + .validator(true) + .invulnerable(true) + .bootnode(false) + .with_initial_balance(2000000000000) + }) + .with_validator(|node| { + node.with_name("bob") + .with_args(vec![("--database", "paritydb-experimental").into()]) + .invulnerable(false) + .bootnode(true) + .with_initial_balance(2000000000000) + }) + }) + .build() + .unwrap(); + + // We need to assert parts of the network config separately because the expected one contains the chain default context which + // is used for dumbing to tomp while the + // while loaded + assert_eq!( + expected.relaychain().chain(), + load_from_toml.relaychain().chain() + ); + assert_eq!( + expected.relaychain().default_args(), + load_from_toml.relaychain().default_args() + ); + assert_eq!( + expected.relaychain().default_command(), + load_from_toml.relaychain().default_command() + ); + assert_eq!( + expected.relaychain().default_image(), + load_from_toml.relaychain().default_image() + ); + + // Check the nodes without the Chain Default Context + expected + .relaychain() + .nodes() + .iter() + .zip(load_from_toml.relaychain().nodes().iter()) + .for_each(|(expected_node, loaded_node)| { + assert_eq!(expected_node.name(), loaded_node.name()); + assert_eq!(expected_node.command(), loaded_node.command()); + assert_eq!(expected_node.args(), loaded_node.args()); + assert_eq!( + expected_node.is_invulnerable(), + loaded_node.is_invulnerable() + ); + assert_eq!(expected_node.is_validator(), loaded_node.is_validator()); + assert_eq!(expected_node.is_bootnode(), loaded_node.is_bootnode()); + assert_eq!( + expected_node.initial_balance(), + loaded_node.initial_balance() + ); + }); + } + + #[test] + fn the_toml_config_without_settings_should_be_imported_and_match_a_network() { + let load_from_toml = NetworkConfig::load_from_toml( + "./testing/snapshots/0004-small-network-without-settings.toml", + ) + .unwrap(); + + let expected = NetworkConfigBuilder::new() + .with_relaychain(|relaychain| { + relaychain + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_validator(|node| node.with_name("alice")) + .with_validator(|node| node.with_name("bob")) + }) + .build() + .unwrap(); + + assert_eq!( + load_from_toml.global_settings().network_spawn_timeout(), + expected.global_settings().network_spawn_timeout() + ) + } + + #[test] + fn the_toml_config_should_be_imported_and_match_a_network_with_teyrchains() { + let load_from_toml = + NetworkConfig::load_from_toml("./testing/snapshots/0001-big-network.toml").unwrap(); + + let expected = NetworkConfigBuilder::new() + .with_relaychain(|relaychain| { + relaychain + .with_chain("polkadot") + .with_default_command("polkadot") + .with_default_image("docker.io/parity/polkadot:latest") + .with_default_resources(|resources| { + resources + .with_request_cpu(100000) + .with_request_memory("500M") + .with_limit_cpu("10Gi") + .with_limit_memory("4000M") + }) + .with_validator(|node| { + node.with_name("alice") + .with_initial_balance(1_000_000_000) + .bootnode(true) + .invulnerable(true) + }) + .with_validator(|node| node.with_name("bob").invulnerable(true).bootnode(true)) + }) + .with_parachain(|parachain| { + parachain + .with_id(1000) + .with_chain("myparachain") + .with_chain_spec_path("/path/to/my/chain/spec.json") + .with_registration_strategy(RegistrationStrategy::UsingExtrinsic) + .onboard_as_parachain(false) + .with_default_db_snapshot("https://storage.com/path/to/db_snapshot.tgz") + .with_collator(|collator| { + collator + .with_name("john") + .bootnode(true) + .invulnerable(true) + .with_initial_balance(5_000_000_000) + }) + .with_fullnode(|collator| { + collator + .with_name("charles") + .bootnode(true) + .invulnerable(true) + .with_initial_balance(0) + }) + .with_collator(|collator| { + collator + .with_name("frank") + .invulnerable(false) + .bootnode(true) + .with_initial_balance(1_000_000_000) + }) + }) + .with_parachain(|parachain| { + parachain + .with_id(2000) + .with_chain("myotherparachain") + .with_chain_spec_path("/path/to/my/other/chain/spec.json") + .with_collator(|collator| { + collator + .with_name("mike") + .bootnode(true) + .invulnerable(true) + .with_initial_balance(5_000_000_000) + }) + .with_fullnode(|collator| { + collator + .with_name("georges") + .bootnode(true) + .invulnerable(true) + .with_initial_balance(0) + }) + .with_collator(|collator| { + collator + .with_name("victor") + .invulnerable(false) + .bootnode(true) + .with_initial_balance(1_000_000_000) + }) + }) + .with_hrmp_channel(|hrmp_channel| { + hrmp_channel + .with_sender(1000) + .with_recipient(2000) + .with_max_capacity(150) + .with_max_message_size(5000) + }) + .with_hrmp_channel(|hrmp_channel| { + hrmp_channel + .with_sender(2000) + .with_recipient(1000) + .with_max_capacity(200) + .with_max_message_size(8000) + }) + .build() + .unwrap(); + + // Check the relay chain + assert_eq!( + expected.relaychain().default_resources(), + load_from_toml.relaychain().default_resources() + ); + + // Check the nodes without the Chain Default Context + expected + .relaychain() + .nodes() + .iter() + .zip(load_from_toml.relaychain().nodes().iter()) + .for_each(|(expected_node, loaded_node)| { + assert_eq!(expected_node.name(), loaded_node.name()); + assert_eq!(expected_node.command(), loaded_node.command()); + assert_eq!(expected_node.args(), loaded_node.args()); + assert_eq!(expected_node.is_validator(), loaded_node.is_validator()); + assert_eq!(expected_node.is_bootnode(), loaded_node.is_bootnode()); + assert_eq!( + expected_node.initial_balance(), + loaded_node.initial_balance() + ); + assert_eq!( + expected_node.is_invulnerable(), + loaded_node.is_invulnerable() + ); + }); + + expected + .teyrchains() + .iter() + .zip(load_from_toml.teyrchains().iter()) + .for_each(|(expected_parachain, loaded_parachain)| { + assert_eq!(expected_parachain.id(), loaded_parachain.id()); + assert_eq!(expected_parachain.chain(), loaded_parachain.chain()); + assert_eq!( + expected_parachain.chain_spec_path(), + loaded_parachain.chain_spec_path() + ); + assert_eq!( + expected_parachain.registration_strategy(), + loaded_parachain.registration_strategy() + ); + assert_eq!( + expected_parachain.onboard_as_parachain(), + loaded_parachain.onboard_as_parachain() + ); + assert_eq!( + expected_parachain.default_db_snapshot(), + loaded_parachain.default_db_snapshot() + ); + assert_eq!( + expected_parachain.default_command(), + loaded_parachain.default_command() + ); + assert_eq!( + expected_parachain.default_image(), + loaded_parachain.default_image() + ); + assert_eq!( + expected_parachain.collators().len(), + loaded_parachain.collators().len() + ); + expected_parachain + .collators() + .iter() + .zip(loaded_parachain.collators().iter()) + .for_each(|(expected_collator, loaded_collator)| { + assert_eq!(expected_collator.name(), loaded_collator.name()); + assert_eq!(expected_collator.command(), loaded_collator.command()); + assert_eq!(expected_collator.image(), loaded_collator.image()); + assert_eq!( + expected_collator.is_validator(), + loaded_collator.is_validator() + ); + assert_eq!( + expected_collator.is_bootnode(), + loaded_collator.is_bootnode() + ); + assert_eq!( + expected_collator.is_invulnerable(), + loaded_collator.is_invulnerable() + ); + assert_eq!( + expected_collator.initial_balance(), + loaded_collator.initial_balance() + ); + }); + }); + + expected + .hrmp_channels() + .iter() + .zip(load_from_toml.hrmp_channels().iter()) + .for_each(|(expected_hrmp_channel, loaded_hrmp_channel)| { + assert_eq!(expected_hrmp_channel.sender(), loaded_hrmp_channel.sender()); + assert_eq!( + expected_hrmp_channel.recipient(), + loaded_hrmp_channel.recipient() + ); + assert_eq!( + expected_hrmp_channel.max_capacity(), + loaded_hrmp_channel.max_capacity() + ); + assert_eq!( + expected_hrmp_channel.max_message_size(), + loaded_hrmp_channel.max_message_size() + ); + }); + } + + #[test] + fn the_toml_config_should_be_imported_and_match_a_network_with_overriden_defaults() { + let load_from_toml = + NetworkConfig::load_from_toml("./testing/snapshots/0002-overridden-defaults.toml") + .unwrap(); + + let expected = NetworkConfigBuilder::new() + .with_relaychain(|relaychain| { + relaychain + .with_chain("polkadot") + .with_default_command("polkadot") + .with_default_image("docker.io/parity/polkadot:latest") + .with_default_args(vec![("-name", "value").into(), "--flag".into()]) + .with_default_db_snapshot("https://storage.com/path/to/db_snapshot.tgz") + .with_default_resources(|resources| { + resources + .with_request_cpu(100000) + .with_request_memory("500M") + .with_limit_cpu("10Gi") + .with_limit_memory("4000M") + }) + .with_validator(|node| { + node.with_name("alice") + .with_initial_balance(1_000_000_000) + .bootnode(true) + .invulnerable(true) + }) + .with_validator(|node| { + node.with_name("bob") + .invulnerable(true) + .bootnode(true) + .with_image("mycustomimage:latest") + .with_command("my-custom-command") + .with_db_snapshot("https://storage.com/path/to/other/db_snapshot.tgz") + .with_resources(|resources| { + resources + .with_request_cpu(1000) + .with_request_memory("250Mi") + .with_limit_cpu("5Gi") + .with_limit_memory("2Gi") + }) + .with_args(vec![("-myothername", "value").into()]) + }) + }) + .with_parachain(|parachain| { + parachain + .with_id(1000) + .with_chain("myparachain") + .with_chain_spec_path("/path/to/my/chain/spec.json") + .with_default_db_snapshot("https://storage.com/path/to/other_snapshot.tgz") + .with_default_command("my-default-command") + .with_default_image("mydefaultimage:latest") + .with_collator(|collator| { + collator + .with_name("john") + .bootnode(true) + .validator(true) + .invulnerable(true) + .with_initial_balance(5_000_000_000) + .with_command("my-non-default-command") + .with_image("anotherimage:latest") + }) + .with_fullnode(|collator| { + collator + .with_name("charles") + .bootnode(true) + .invulnerable(true) + .with_initial_balance(0) + }) + }) + .build() + .unwrap(); + + expected + .teyrchains() + .iter() + .zip(load_from_toml.teyrchains().iter()) + .for_each(|(expected_parachain, loaded_parachain)| { + assert_eq!(expected_parachain.id(), loaded_parachain.id()); + assert_eq!(expected_parachain.chain(), loaded_parachain.chain()); + assert_eq!( + expected_parachain.chain_spec_path(), + loaded_parachain.chain_spec_path() + ); + assert_eq!( + expected_parachain.registration_strategy(), + loaded_parachain.registration_strategy() + ); + assert_eq!( + expected_parachain.onboard_as_parachain(), + loaded_parachain.onboard_as_parachain() + ); + assert_eq!( + expected_parachain.default_db_snapshot(), + loaded_parachain.default_db_snapshot() + ); + assert_eq!( + expected_parachain.default_command(), + loaded_parachain.default_command() + ); + assert_eq!( + expected_parachain.default_image(), + loaded_parachain.default_image() + ); + assert_eq!( + expected_parachain.collators().len(), + loaded_parachain.collators().len() + ); + expected_parachain + .collators() + .iter() + .zip(loaded_parachain.collators().iter()) + .for_each(|(expected_collator, loaded_collator)| { + assert_eq!(expected_collator.name(), loaded_collator.name()); + assert_eq!(expected_collator.command(), loaded_collator.command()); + assert_eq!(expected_collator.image(), loaded_collator.image()); + assert_eq!( + expected_collator.is_validator(), + loaded_collator.is_validator() + ); + assert_eq!( + expected_collator.is_bootnode(), + loaded_collator.is_bootnode() + ); + assert_eq!( + expected_collator.is_invulnerable(), + loaded_collator.is_invulnerable() + ); + assert_eq!( + expected_collator.initial_balance(), + loaded_collator.initial_balance() + ); + }); + }); + } + + #[test] + fn with_chain_and_nodes_works() { + let network_config = NetworkConfigBuilder::with_chain_and_nodes( + "rococo-local", + vec!["alice".to_string(), "bob".to_string()], + ) + .build() + .unwrap(); + + // relaychain + assert_eq!(network_config.relaychain().chain().as_str(), "rococo-local"); + assert_eq!(network_config.relaychain().nodes().len(), 2); + let mut node_names = network_config.relaychain().nodes().into_iter(); + let node1 = node_names.next().unwrap().name(); + assert_eq!(node1, "alice"); + let node2 = node_names.next().unwrap().name(); + assert_eq!(node2, "bob"); + + // teyrchains + assert_eq!(network_config.teyrchains().len(), 0); + } + + #[test] + fn with_chain_and_nodes_should_fail_with_empty_relay_name() { + let errors = NetworkConfigBuilder::with_chain_and_nodes("", vec!["alice".to_string()]) + .build() + .unwrap_err(); + + assert_eq!( + errors.first().unwrap().to_string(), + "relaychain.chain: can't be empty" + ); + } + + #[test] + fn with_chain_and_nodes_should_fail_with_empty_node_list() { + let errors = NetworkConfigBuilder::with_chain_and_nodes("rococo-local", vec![]) + .build() + .unwrap_err(); + + assert_eq!( + errors.first().unwrap().to_string(), + "relaychain.nodes[''].name: can't be empty" + ); + } + + #[test] + fn with_chain_and_nodes_should_fail_with_empty_node_name() { + let errors = NetworkConfigBuilder::with_chain_and_nodes( + "rococo-local", + vec!["alice".to_string(), "".to_string()], + ) + .build() + .unwrap_err(); + + assert_eq!( + errors.first().unwrap().to_string(), + "relaychain.nodes[''].name: can't be empty" + ); + } + + #[test] + fn with_parachain_id_and_collators_works() { + let network_config = NetworkConfigBuilder::with_chain_and_nodes( + "rococo-local", + vec!["alice".to_string(), "bob".to_string()], + ) + .with_parachain_id_and_collators( + 100, + vec!["collator1".to_string(), "collator2".to_string()], + ) + .build() + .unwrap(); + + // relaychain + assert_eq!(network_config.relaychain().chain().as_str(), "rococo-local"); + assert_eq!(network_config.relaychain().nodes().len(), 2); + let mut node_names = network_config.relaychain().nodes().into_iter(); + let node1 = node_names.next().unwrap().name(); + assert_eq!(node1, "alice"); + let node2 = node_names.next().unwrap().name(); + assert_eq!(node2, "bob"); + + // teyrchains + assert_eq!(network_config.teyrchains().len(), 1); + let ¶chain1 = network_config.teyrchains().first().unwrap(); + assert_eq!(parachain1.id(), 100); + assert_eq!(parachain1.collators().len(), 2); + let mut collator_names = parachain1.collators().into_iter(); + let collator1 = collator_names.next().unwrap().name(); + assert_eq!(collator1, "collator1"); + let collator2 = collator_names.next().unwrap().name(); + assert_eq!(collator2, "collator2"); + + assert_eq!(parachain1.initial_balance(), 2_000_000_000_000); + } + + #[test] + fn with_parachain_id_and_collators_should_fail_with_empty_collator_list() { + let errors = + NetworkConfigBuilder::with_chain_and_nodes("polkadot", vec!["alice".to_string()]) + .with_parachain_id_and_collators(1, vec![]) + .build() + .unwrap_err(); + + assert_eq!( + errors.first().unwrap().to_string(), + "teyrchain[1].can't be empty" + ); + } + + #[test] + fn with_parachain_id_and_collators_should_fail_with_empty_collator_name() { + let errors = + NetworkConfigBuilder::with_chain_and_nodes("polkadot", vec!["alice".to_string()]) + .with_parachain_id_and_collators(1, vec!["collator1".to_string(), "".to_string()]) + .build() + .unwrap_err(); + + assert_eq!( + errors.first().unwrap().to_string(), + "teyrchain[1].collators[''].name: can't be empty" + ); + } + + #[test] + fn wasm_override_in_toml_should_work() { + let load_from_toml = NetworkConfig::load_from_toml( + "./testing/snapshots/0005-small-networl-with-wasm-override.toml", + ) + .unwrap(); + + let expected = NetworkConfigBuilder::new() + .with_relaychain(|relaychain| { + relaychain + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_wasm_override("/some/path/runtime.wasm") + .with_validator(|node| node.with_name("alice")) + .with_validator(|node| node.with_name("bob")) + }) + .with_parachain(|p| { + p.with_id(1000) + .with_wasm_override("https://some.com/runtime.wasm") + .with_collator(|c| c.with_name("john")) + }) + .build() + .unwrap(); + + assert_eq!( + load_from_toml.relaychain().wasm_override(), + expected.relaychain().wasm_override() + ); + assert_eq!( + load_from_toml.teyrchains()[0].wasm_override(), + expected.teyrchains()[0].wasm_override() + ); + } + + #[test] + fn multiple_paras_with_same_id_should_work() { + let network_config = NetworkConfigBuilder::new() + .with_relaychain(|relaychain| { + relaychain + .with_chain("polkadot") + .with_fullnode(|node| node.with_name("node").with_command("command")) + }) + .with_parachain(|parachain| { + parachain + .with_id(1) + .with_chain("myparachain1") + .with_collator(|collator| { + collator.with_name("collator1").with_command("command1") + }) + }) + .with_parachain(|parachain| { + parachain + .with_id(1) + .with_chain("myparachain1") + .with_registration_strategy(RegistrationStrategy::Manual) + .with_collator(|collator| { + collator.with_name("collator2").with_command("command1") + }) + }) + .build() + .unwrap(); + + let ¶chain2 = network_config.teyrchains().last().unwrap(); + assert_eq!(parachain2.unique_id(), "1-1"); + } + + #[test] + fn multiple_paras_with_same_id_both_for_register_should_fail() { + let errors = NetworkConfigBuilder::new() + .with_relaychain(|relaychain| { + relaychain + .with_chain("polkadot") + .with_fullnode(|node| node.with_name("node").with_command("command")) + }) + .with_parachain(|parachain| { + parachain + .with_id(1) + .with_chain("myparachain1") + .with_collator(|collator| { + collator.with_name("collator1").with_command("command1") + }) + }) + .with_parachain(|parachain| { + parachain + .with_id(1) + .with_chain("myparachain1") + // .with_registration_strategy(RegistrationStrategy::UsingExtrinsic) + .with_collator(|collator| { + collator + .with_name("collator2") + .with_command("command1") + }) + }) + .build() + .unwrap_err(); + + assert_eq!( + errors.first().unwrap().to_string(), + "ParaId 1 already set to be registered, only one should be." + ); + } + + #[test] + fn network_config_should_work_from_toml_without_chain_name() { + let loaded_from_toml = + NetworkConfig::load_from_toml("./testing/snapshots/0006-without-rc-chain-name.toml") + .unwrap(); + + assert_eq!( + "rococo-local", + loaded_from_toml.relaychain().chain().as_str() + ); + } + + #[test] + fn network_config_should_work_from_toml_with_duplicate_name_between_collator_and_relay_node() { + let loaded_from_toml = NetworkConfig::load_from_toml( + "./testing/snapshots/0007-small-network_w_teyrchain_w_duplicate_node_names.toml", + ) + .unwrap(); + + assert_eq!( + loaded_from_toml + .relaychain() + .nodes() + .iter() + .filter(|n| n.name() == "alice") + .count(), + 1 + ); + assert_eq!( + loaded_from_toml + .teyrchains() + .iter() + .flat_map(|para| para.collators()) + .filter(|n| n.name() == "alice-1") + .count(), + 1 + ); + } + + #[test] + fn raw_spec_override_in_toml_should_work() { + let load_from_toml = NetworkConfig::load_from_toml( + "./testing/snapshots/0008-small-network-with-raw-spec-override.toml", + ) + .unwrap(); + + let expected = NetworkConfigBuilder::new() + .with_relaychain(|relaychain| { + relaychain + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_raw_spec_override("/some/path/raw_spec_override.json") + .with_validator(|node| node.with_name("alice")) + .with_validator(|node| node.with_name("bob")) + }) + .with_parachain(|p| { + p.with_id(1000) + .with_raw_spec_override("https://some.com/raw_spec_override.json") + .with_collator(|c| c.with_name("john")) + }) + .build() + .unwrap(); + + assert_eq!( + load_from_toml.relaychain().raw_spec_override(), + expected.relaychain().raw_spec_override() + ); + assert_eq!( + load_from_toml.teyrchains()[0].raw_spec_override(), + expected.teyrchains()[0].raw_spec_override() + ); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/relaychain.rs b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/relaychain.rs new file mode 100644 index 00000000..603ed1d5 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/relaychain.rs @@ -0,0 +1,1096 @@ +use std::{cell::RefCell, error::Error, fmt::Debug, marker::PhantomData, rc::Rc}; + +use serde::{Deserialize, Serialize}; +use support::constants::{DEFAULT_TYPESTATE, THIS_IS_A_BUG}; + +use crate::{ + shared::{ + errors::{ConfigError, FieldError}, + helpers::{merge_errors, merge_errors_vecs}, + macros::states, + node::{self, GroupNodeConfig, GroupNodeConfigBuilder, NodeConfig, NodeConfigBuilder}, + resources::{Resources, ResourcesBuilder}, + types::{ + Arg, AssetLocation, Chain, ChainDefaultContext, Command, Image, ValidationContext, + }, + }, + types::{ChainSpecRuntime, JsonOverrides}, + utils::{default_command_polkadot, default_relaychain_chain, is_false}, +}; + +/// A relay chain configuration, composed of nodes and fine-grained configuration options. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct RelaychainConfig { + #[serde(default = "default_relaychain_chain")] + chain: Chain, + #[serde(default = "default_command_polkadot")] + default_command: Option, + default_image: Option, + default_resources: Option, + default_db_snapshot: Option, + #[serde(skip_serializing_if = "std::vec::Vec::is_empty", default)] + default_args: Vec, + /// chain-spec to use (location can be url or file path) + chain_spec_path: Option, + /// Full _template_ command, will be rendered (using custom token replacements) + /// and executed for generate the chain-spec. + /// available tokens {{chainName}} / {{disableBootnodes}} + chain_spec_command: Option, + /// runtime to use for generating the chain-spec. + /// Location can be url or file path and an optional preset + chain_spec_runtime: Option, + #[serde(skip_serializing_if = "is_false", default)] + chain_spec_command_is_local: bool, + chain_spec_command_output_path: Option, + random_nominators_count: Option, + max_nominations: Option, + #[serde(skip_serializing_if = "std::vec::Vec::is_empty", default)] + nodes: Vec, + #[serde(skip_serializing_if = "std::vec::Vec::is_empty", default)] + node_groups: Vec, + #[serde(rename = "genesis", skip_serializing_if = "Option::is_none")] + runtime_genesis_patch: Option, + // Path or url to override the runtime (:code) in the chain-spec + wasm_override: Option, + command: Option, + // Inline json or asset location to override raw chainspec + raw_spec_override: Option, +} + +impl RelaychainConfig { + /// The chain name. + pub fn chain(&self) -> &Chain { + &self.chain + } + + /// The default command used for nodes. + pub fn default_command(&self) -> Option<&Command> { + self.default_command.as_ref() + } + + /// The default container image used for nodes. + pub fn default_image(&self) -> Option<&Image> { + self.default_image.as_ref() + } + + /// The default resources limits used for nodes. + pub fn default_resources(&self) -> Option<&Resources> { + self.default_resources.as_ref() + } + + /// The default database snapshot location that will be used for state. + pub fn default_db_snapshot(&self) -> Option<&AssetLocation> { + self.default_db_snapshot.as_ref() + } + + /// The default arguments that will be used to launch the node command. + pub fn default_args(&self) -> Vec<&Arg> { + self.default_args.iter().collect::>() + } + + /// The location of an pre-existing chain specification for the relay chain. + pub fn chain_spec_path(&self) -> Option<&AssetLocation> { + self.chain_spec_path.as_ref() + } + + /// The location of a wasm runtime to override in the chain-spec. + pub fn wasm_override(&self) -> Option<&AssetLocation> { + self.wasm_override.as_ref() + } + + /// The full _template_ command to genera the chain-spec + pub fn chain_spec_command(&self) -> Option<&str> { + self.chain_spec_command.as_deref() + } + + /// Does the chain_spec_command needs to be run locally + pub fn chain_spec_command_is_local(&self) -> bool { + self.chain_spec_command_is_local + } + + /// The file where the `chain_spec_command` will write the chain-spec into. + /// Defaults to /dev/stdout. + pub fn chain_spec_command_output_path(&self) -> Option<&str> { + self.chain_spec_command_output_path.as_deref() + } + + /// The non-default command used for nodes. + pub fn command(&self) -> Option<&Command> { + self.command.as_ref() + } + + /// The number of `random nominators` to create for chains using staking, this is used in tandem with `max_nominations` to simulate the amount of nominators and nominations. + pub fn random_nominators_count(&self) -> Option { + self.random_nominators_count + } + + /// The maximum number of nominations to create per nominator. + pub fn max_nominations(&self) -> Option { + self.max_nominations + } + + /// The genesis overrides as a JSON value. + pub fn runtime_genesis_patch(&self) -> Option<&serde_json::Value> { + self.runtime_genesis_patch.as_ref() + } + + /// The nodes of the relay chain. + pub fn nodes(&self) -> Vec<&NodeConfig> { + self.nodes.iter().collect::>() + } + + /// The group nodes of the relay chain. + pub fn group_node_configs(&self) -> Vec<&GroupNodeConfig> { + self.node_groups.iter().collect::>() + } + + /// The location of a file or inline json to override raw chain-spec. + pub fn raw_spec_override(&self) -> Option<&JsonOverrides> { + self.raw_spec_override.as_ref() + } + + /// Set the nodes to build + pub(crate) fn set_nodes(&mut self, nodes: Vec) { + self.nodes = nodes; + } + + /// The location of runtime to use by chain-spec builder lib (from `sc-chain-spec` crate) + pub fn chain_spec_runtime(&self) -> Option<&ChainSpecRuntime> { + self.chain_spec_runtime.as_ref() + } +} + +states! { + Initial, + WithChain, + WithAtLeastOneNode +} + +/// A relay chain configuration builder, used to build a [`RelaychainConfig`] declaratively with fields validation. +pub struct RelaychainConfigBuilder { + config: RelaychainConfig, + validation_context: Rc>, + errors: Vec, + _state: PhantomData, +} + +impl Default for RelaychainConfigBuilder { + fn default() -> Self { + Self { + config: RelaychainConfig { + chain: "default" + .try_into() + .expect(&format!("{DEFAULT_TYPESTATE} {THIS_IS_A_BUG}")), + default_command: None, + default_image: None, + default_resources: None, + default_db_snapshot: None, + default_args: vec![], + chain_spec_path: None, + chain_spec_command: None, + chain_spec_command_output_path: None, + chain_spec_runtime: None, + wasm_override: None, + chain_spec_command_is_local: false, // remote cmd by default + command: None, + random_nominators_count: None, + max_nominations: None, + runtime_genesis_patch: None, + nodes: vec![], + node_groups: vec![], + raw_spec_override: None, + }, + validation_context: Default::default(), + errors: vec![], + _state: PhantomData, + } + } +} + +impl RelaychainConfigBuilder { + fn transition( + config: RelaychainConfig, + validation_context: Rc>, + errors: Vec, + ) -> RelaychainConfigBuilder { + RelaychainConfigBuilder { + config, + validation_context, + errors, + _state: PhantomData, + } + } + + fn default_chain_context(&self) -> ChainDefaultContext { + ChainDefaultContext { + default_command: self.config.default_command.clone(), + default_image: self.config.default_image.clone(), + default_resources: self.config.default_resources.clone(), + default_db_snapshot: self.config.default_db_snapshot.clone(), + default_args: self.config.default_args.clone(), + } + } + + fn create_node_builder(&self, f: F) -> NodeConfigBuilder + where + F: FnOnce(NodeConfigBuilder) -> NodeConfigBuilder, + { + f(NodeConfigBuilder::new( + self.default_chain_context(), + self.validation_context.clone(), + )) + } +} + +impl RelaychainConfigBuilder { + pub fn new( + validation_context: Rc>, + ) -> RelaychainConfigBuilder { + Self { + validation_context, + ..Self::default() + } + } + + /// Set the chain name (e.g. rococo-local). + pub fn with_chain(self, chain: T) -> RelaychainConfigBuilder + where + T: TryInto, + T::Error: Error + Send + Sync + 'static, + { + match chain.try_into() { + Ok(chain) => Self::transition( + RelaychainConfig { + chain, + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(error) => Self::transition( + self.config, + self.validation_context, + merge_errors(self.errors, FieldError::Chain(error.into()).into()), + ), + } + } +} + +impl RelaychainConfigBuilder { + /// Set the default command used for nodes. Can be overridden. + pub fn with_default_command(self, command: T) -> Self + where + T: TryInto, + T::Error: Error + Send + Sync + 'static, + { + match command.try_into() { + Ok(command) => Self::transition( + RelaychainConfig { + default_command: Some(command), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(error) => Self::transition( + self.config, + self.validation_context, + merge_errors(self.errors, FieldError::DefaultCommand(error.into()).into()), + ), + } + } + + /// Set the default container image used for nodes. Can be overridden. + pub fn with_default_image(self, image: T) -> Self + where + T: TryInto, + T::Error: Error + Send + Sync + 'static, + { + match image.try_into() { + Ok(image) => Self::transition( + RelaychainConfig { + default_image: Some(image), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(error) => Self::transition( + self.config, + self.validation_context, + merge_errors(self.errors, FieldError::DefaultImage(error.into()).into()), + ), + } + } + + /// Set the default resources limits used for nodes. Can be overridden. + pub fn with_default_resources( + self, + f: impl FnOnce(ResourcesBuilder) -> ResourcesBuilder, + ) -> Self { + match f(ResourcesBuilder::new()).build() { + Ok(default_resources) => Self::transition( + RelaychainConfig { + default_resources: Some(default_resources), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(errors) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs( + self.errors, + errors + .into_iter() + .map(|error| FieldError::DefaultResources(error).into()) + .collect::>(), + ), + ), + } + } + + /// Set the default database snapshot location that will be used for state. Can be overridden. + pub fn with_default_db_snapshot(self, location: impl Into) -> Self { + Self::transition( + RelaychainConfig { + default_db_snapshot: Some(location.into()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the default arguments that will be used to execute the node command. Can be overridden. + pub fn with_default_args(self, args: Vec) -> Self { + Self::transition( + RelaychainConfig { + default_args: args, + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the location of a pre-existing chain specification for the relay chain. + pub fn with_chain_spec_path(self, location: impl Into) -> Self { + Self::transition( + RelaychainConfig { + chain_spec_path: Some(location.into()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the location of a wasm to override the chain-spec. + pub fn with_wasm_override(self, location: impl Into) -> Self { + Self::transition( + RelaychainConfig { + wasm_override: Some(location.into()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the chain-spec command _template_ for the relay chain. + pub fn with_chain_spec_command(self, cmd_template: impl Into) -> Self { + Self::transition( + RelaychainConfig { + chain_spec_command: Some(cmd_template.into()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the runtime path to use for generating the chain-spec and an optiona preset. + /// If the preset is not set, we will try to match [`local_testnet`, `development`, `dev`] + /// with the available ones and fallback to the default configuration as last option. + pub fn with_chain_spec_runtime( + self, + location: impl Into, + preset: Option<&str>, + ) -> Self { + let chain_spec_runtime = if let Some(preset) = preset { + ChainSpecRuntime::with_preset(location.into(), preset.to_string()) + } else { + ChainSpecRuntime::new(location.into()) + }; + Self::transition( + RelaychainConfig { + chain_spec_runtime: Some(chain_spec_runtime), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set if the chain-spec command needs to be run locally or not (false by default) + pub fn chain_spec_command_is_local(self, choice: bool) -> Self { + Self::transition( + RelaychainConfig { + chain_spec_command_is_local: choice, + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the output path for the chain-spec command. + pub fn with_chain_spec_command_output_path(self, output_path: &str) -> Self { + Self::transition( + RelaychainConfig { + chain_spec_command_output_path: Some(output_path.to_string()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the number of `random nominators` to create for chains using staking, this is used in tandem with `max_nominations` to simulate the amount of nominators and nominations. + pub fn with_random_nominators_count(self, random_nominators_count: u32) -> Self { + Self::transition( + RelaychainConfig { + random_nominators_count: Some(random_nominators_count), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the maximum number of nominations to create per nominator. + pub fn with_max_nominations(self, max_nominations: u8) -> Self { + Self::transition( + RelaychainConfig { + max_nominations: Some(max_nominations), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the genesis overrides as a JSON object. + pub fn with_genesis_overrides(self, genesis_overrides: impl Into) -> Self { + Self::transition( + RelaychainConfig { + runtime_genesis_patch: Some(genesis_overrides.into()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Add a new validator node using a nested [`NodeConfigBuilder`]. + /// The node will be configured as a validator (authority) with the --validator flag. + pub fn with_validator( + self, + f: impl FnOnce(NodeConfigBuilder) -> NodeConfigBuilder, + ) -> RelaychainConfigBuilder { + match self.create_node_builder(f).validator(true).build() { + Ok(node) => Self::transition( + RelaychainConfig { + nodes: [self.config.nodes, vec![node]].concat(), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err((name, errors)) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs( + self.errors, + errors + .into_iter() + .map(|error| ConfigError::Node(name.clone(), error).into()) + .collect::>(), + ), + ), + } + } + + /// Add a new full node using a nested [`NodeConfigBuilder`]. + /// The node will be configured as a full node (non-validator). + pub fn with_fullnode( + self, + f: impl FnOnce(NodeConfigBuilder) -> NodeConfigBuilder, + ) -> RelaychainConfigBuilder { + match self.create_node_builder(f).validator(false).build() { + Ok(node) => Self::transition( + RelaychainConfig { + nodes: [self.config.nodes, vec![node]].concat(), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err((name, errors)) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs( + self.errors, + errors + .into_iter() + .map(|error| ConfigError::Node(name.clone(), error).into()) + .collect::>(), + ), + ), + } + } + + /// Add a new node using a nested [`NodeConfigBuilder`]. + /// + /// **Deprecated**: Use [`with_validator`] for validator nodes or [`with_fullnode`] for full nodes instead. + #[deprecated( + since = "0.4.0", + note = "Use `with_validator()` for validator nodes or `with_fullnode()` for full nodes instead" + )] + pub fn with_node( + self, + f: impl FnOnce(NodeConfigBuilder) -> NodeConfigBuilder, + ) -> RelaychainConfigBuilder { + match self.create_node_builder(f).build() { + Ok(node) => Self::transition( + RelaychainConfig { + nodes: vec![node], + ..self.config + }, + self.validation_context, + self.errors, + ), + Err((name, errors)) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs( + self.errors, + errors + .into_iter() + .map(|error| ConfigError::Node(name.clone(), error).into()) + .collect::>(), + ), + ), + } + } + + /// Add a new group node using a nested [`GroupNodeConfigBuilder`]. + pub fn with_node_group( + self, + f: impl FnOnce(GroupNodeConfigBuilder) -> GroupNodeConfigBuilder, + ) -> RelaychainConfigBuilder { + match f(GroupNodeConfigBuilder::new( + self.default_chain_context(), + self.validation_context.clone(), + )) + .build() + { + Ok(group_node) => Self::transition( + RelaychainConfig { + node_groups: vec![group_node], + ..self.config + }, + self.validation_context, + self.errors, + ), + Err((name, errors)) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs( + self.errors, + errors + .into_iter() + .map(|error| ConfigError::Node(name.clone(), error).into()) + .collect::>(), + ), + ), + } + } + + /// Set the location or inline value of a json to override the raw chain-spec. + pub fn with_raw_spec_override(self, overrides: impl Into) -> Self { + Self::transition( + RelaychainConfig { + raw_spec_override: Some(overrides.into()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } +} + +impl RelaychainConfigBuilder { + /// Add a new validator node using a nested [`NodeConfigBuilder`]. + /// The node will be configured as a validator (authority) with the --validator flag. + pub fn with_validator( + self, + f: impl FnOnce(NodeConfigBuilder) -> NodeConfigBuilder, + ) -> RelaychainConfigBuilder { + match self.create_node_builder(f).validator(true).build() { + Ok(node) => Self::transition( + RelaychainConfig { + nodes: [self.config.nodes, vec![node]].concat(), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err((name, errors)) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs( + self.errors, + errors + .into_iter() + .map(|error| ConfigError::Node(name.clone(), error).into()) + .collect::>(), + ), + ), + } + } + + /// Add a new full node using a nested [`NodeConfigBuilder`]. + /// The node will be configured as a full node (non-validator). + pub fn with_fullnode( + self, + f: impl FnOnce(NodeConfigBuilder) -> NodeConfigBuilder, + ) -> Self { + match self.create_node_builder(f).validator(false).build() { + Ok(node) => Self::transition( + RelaychainConfig { + nodes: [self.config.nodes, vec![node]].concat(), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err((name, errors)) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs( + self.errors, + errors + .into_iter() + .map(|error| ConfigError::Node(name.clone(), error).into()) + .collect::>(), + ), + ), + } + } + + /// Add a new node using a nested [`NodeConfigBuilder`]. + /// + /// **Deprecated**: Use [`with_validator`] for validator nodes or [`with_fullnode`] for full nodes instead. + #[deprecated( + since = "0.4.0", + note = "Use `with_validator()` for validator nodes or `with_fullnode()` for full nodes instead" + )] + pub fn with_node( + self, + f: impl FnOnce(NodeConfigBuilder) -> NodeConfigBuilder, + ) -> Self { + match self.create_node_builder(f).build() { + Ok(node) => Self::transition( + RelaychainConfig { + nodes: [self.config.nodes, vec![node]].concat(), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err((name, errors)) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs( + self.errors, + errors + .into_iter() + .map(|error| ConfigError::Node(name.clone(), error).into()) + .collect::>(), + ), + ), + } + } + + /// Add a new group node using a nested [`GroupNodeConfigBuilder`]. + pub fn with_node_group( + self, + f: impl FnOnce(GroupNodeConfigBuilder) -> GroupNodeConfigBuilder, + ) -> Self { + match f(GroupNodeConfigBuilder::new( + self.default_chain_context(), + self.validation_context.clone(), + )) + .build() + { + Ok(group_node) => Self::transition( + RelaychainConfig { + node_groups: [self.config.node_groups, vec![group_node]].concat(), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err((name, errors)) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs( + self.errors, + errors + .into_iter() + .map(|error| ConfigError::Node(name.clone(), error).into()) + .collect::>(), + ), + ), + } + } + + /// Seals the builder and returns a [`RelaychainConfig`] if there are no validation errors, else returns errors. + pub fn build(self) -> Result> { + if !self.errors.is_empty() { + return Err(self + .errors + .into_iter() + .map(|error| ConfigError::Relaychain(error).into()) + .collect::>()); + } + + Ok(self.config) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn relaychain_config_builder_should_succeeds_and_returns_a_relaychain_config() { + let relaychain_config = RelaychainConfigBuilder::new(Default::default()) + .with_chain("polkadot") + .with_default_image("myrepo:myimage") + .with_default_command("default_command") + .with_default_resources(|resources| { + resources + .with_limit_cpu("500M") + .with_limit_memory("1G") + .with_request_cpu("250M") + }) + .with_default_db_snapshot("https://www.urltomysnapshot.com/file.tgz") + .with_chain_spec_path("./path/to/chain/spec.json") + .with_chain_spec_runtime("./path/to/runtime.wasm", Some("local_testnet")) + .with_wasm_override("./path/to/override/runtime.wasm") + .with_raw_spec_override(serde_json::json!({"some_override_key": "some_override_val"})) + .with_default_args(vec![("--arg1", "value1").into(), "--option2".into()]) + .with_random_nominators_count(42) + .with_max_nominations(5) + .with_fullnode(|node| node.with_name("node1").bootnode(true)) + .with_validator(|node| node.with_name("node2").with_command("command2")) + .build() + .unwrap(); + + assert_eq!(relaychain_config.chain().as_str(), "polkadot"); + assert_eq!(relaychain_config.nodes().len(), 2); + let &node1 = relaychain_config.nodes().first().unwrap(); + assert_eq!(node1.name(), "node1"); + assert_eq!(node1.command().unwrap().as_str(), "default_command"); + assert!(node1.is_bootnode()); + let &node2 = relaychain_config.nodes().last().unwrap(); + assert_eq!(node2.name(), "node2"); + assert_eq!(node2.command().unwrap().as_str(), "command2"); + assert!(node2.is_validator()); + assert_eq!( + relaychain_config.default_command().unwrap().as_str(), + "default_command" + ); + assert_eq!( + relaychain_config.default_image().unwrap().as_str(), + "myrepo:myimage" + ); + let default_resources = relaychain_config.default_resources().unwrap(); + assert_eq!(default_resources.limit_cpu().unwrap().as_str(), "500M"); + assert_eq!(default_resources.limit_memory().unwrap().as_str(), "1G"); + assert_eq!(default_resources.request_cpu().unwrap().as_str(), "250M"); + assert!(matches!( + relaychain_config.default_db_snapshot().unwrap(), + AssetLocation::Url(value) if value.as_str() == "https://www.urltomysnapshot.com/file.tgz", + )); + assert!(matches!( + relaychain_config.chain_spec_path().unwrap(), + AssetLocation::FilePath(value) if value.to_str().unwrap() == "./path/to/chain/spec.json" + )); + assert!(matches!( + &relaychain_config.chain_spec_runtime().unwrap().location, + AssetLocation::FilePath(value) if value.to_str().unwrap() == "./path/to/runtime.wasm" + )); + assert_eq!( + relaychain_config + .chain_spec_runtime() + .unwrap() + .preset + .as_deref(), + Some("local_testnet") + ); + assert!(matches!( + relaychain_config.wasm_override().unwrap(), + AssetLocation::FilePath(value) if value.to_str().unwrap() == "./path/to/override/runtime.wasm" + )); + let args: Vec = vec![("--arg1", "value1").into(), "--option2".into()]; + assert_eq!( + relaychain_config.default_args(), + args.iter().collect::>() + ); + assert_eq!(relaychain_config.random_nominators_count().unwrap(), 42); + assert_eq!(relaychain_config.max_nominations().unwrap(), 5); + + assert!(matches!( + relaychain_config.raw_spec_override().unwrap(), + JsonOverrides::Json(value) if *value == serde_json::json!({"some_override_key": "some_override_val"}) + )); + } + + #[test] + fn relaychain_config_builder_should_fails_and_returns_an_error_if_chain_is_invalid() { + let errors = RelaychainConfigBuilder::new(Default::default()) + .with_chain("invalid chain") + .with_validator(|node| node.with_name("node").with_command("command")) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "relaychain.chain: 'invalid chain' shouldn't contains whitespace" + ); + } + + #[test] + fn relaychain_config_builder_should_fails_and_returns_an_error_if_default_command_is_invalid() { + let errors = RelaychainConfigBuilder::new(Default::default()) + .with_chain("chain") + .with_default_command("invalid command") + .with_validator(|node| node.with_name("node").with_command("command")) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "relaychain.default_command: 'invalid command' shouldn't contains whitespace" + ); + } + + #[test] + fn relaychain_config_builder_should_fails_and_returns_an_error_if_default_image_is_invalid() { + let errors = RelaychainConfigBuilder::new(Default::default()) + .with_chain("chain") + .with_default_image("invalid image") + .with_validator(|node| node.with_name("node").with_command("command")) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + r"relaychain.default_image: 'invalid image' doesn't match regex '^([ip]|[hostname]/)?[tag_name]:[tag_version]?$'" + ); + } + + #[test] + fn relaychain_config_builder_should_fails_and_returns_an_error_if_default_resources_are_invalid( + ) { + let errors = RelaychainConfigBuilder::new(Default::default()) + .with_chain("chain") + .with_default_resources(|default_resources| { + default_resources + .with_limit_memory("100m") + .with_request_cpu("invalid") + }) + .with_validator(|node| node.with_name("node").with_command("command")) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + r"relaychain.default_resources.request_cpu: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'" + ); + } + + #[test] + fn relaychain_config_builder_should_fails_and_returns_an_error_if_first_node_is_invalid() { + let errors = RelaychainConfigBuilder::new(Default::default()) + .with_chain("chain") + .with_validator(|node| node.with_name("node").with_command("invalid command")) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "relaychain.nodes['node'].command: 'invalid command' shouldn't contains whitespace" + ); + } + + #[test] + fn relaychain_config_builder_with_at_least_one_node_should_fails_and_returns_an_error_if_second_node_is_invalid( + ) { + let errors = RelaychainConfigBuilder::new(Default::default()) + .with_chain("chain") + .with_validator(|node| node.with_name("node1").with_command("command1")) + .with_validator(|node| node.with_name("node2").with_command("invalid command")) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "relaychain.nodes['node2'].command: 'invalid command' shouldn't contains whitespace" + ); + } + + #[test] + fn relaychain_config_builder_should_fails_returns_multiple_errors_if_a_node_and_default_resources_are_invalid( + ) { + let errors = RelaychainConfigBuilder::new(Default::default()) + .with_chain("chain") + .with_default_resources(|resources| { + resources + .with_request_cpu("100Mi") + .with_limit_memory("1Gi") + .with_limit_cpu("invalid") + }) + .with_validator(|node| node.with_name("node").with_image("invalid image")) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 2); + assert_eq!( + errors.first().unwrap().to_string(), + "relaychain.default_resources.limit_cpu: 'invalid' doesn't match regex '^\\d+(.\\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'" + ); + assert_eq!( + errors.get(1).unwrap().to_string(), + "relaychain.nodes['node'].image: 'invalid image' doesn't match regex '^([ip]|[hostname]/)?[tag_name]:[tag_version]?$'" + ); + } + + #[test] + fn relaychain_config_builder_should_works_with_chain_spec_command() { + const CMD_TPL: &str = "./bin/chain-spec-generator {% raw %} {{chainName}} {% endraw %}"; + let config = RelaychainConfigBuilder::new(Default::default()) + .with_chain("polkadot") + .with_default_image("myrepo:myimage") + .with_default_command("default_command") + .with_chain_spec_command(CMD_TPL) + .with_fullnode(|node| node.with_name("node1").bootnode(true)) + .build() + .unwrap(); + + assert_eq!(config.chain_spec_command(), Some(CMD_TPL)); + assert!(!config.chain_spec_command_is_local()); + } + + #[test] + fn relaychain_config_builder_should_works_with_chain_spec_command_locally() { + const CMD_TPL: &str = "./bin/chain-spec-generator {% raw %} {{chainName}} {% endraw %}"; + let config = RelaychainConfigBuilder::new(Default::default()) + .with_chain("polkadot") + .with_default_image("myrepo:myimage") + .with_default_command("default_command") + .with_chain_spec_command(CMD_TPL) + .chain_spec_command_is_local(true) + .with_fullnode(|node| node.with_name("node1").bootnode(true)) + .build() + .unwrap(); + + assert_eq!(config.chain_spec_command(), Some(CMD_TPL)); + assert!(config.chain_spec_command_is_local()); + } + + #[test] + fn relaychain_with_group_config_should_succeeds_and_returns_a_relaychain_config() { + let relaychain_config = RelaychainConfigBuilder::new(Default::default()) + .with_chain("chain") + .with_default_command("command") + .with_validator(|node| node.with_name("node").with_command("node_command")) + .with_node_group(|group| { + group.with_count(2).with_base_node(|base| { + base.with_name("group_node") + .with_command("some_command") + .with_image("repo:image") + .validator(true) + }) + }) + .build() + .unwrap(); + + assert_eq!(relaychain_config.chain().as_str(), "chain"); + assert_eq!(relaychain_config.nodes().len(), 1); + assert_eq!(relaychain_config.group_node_configs().len(), 1); + assert_eq!( + relaychain_config + .group_node_configs() + .first() + .unwrap() + .count, + 2 + ); + let &node = relaychain_config.nodes().first().unwrap(); + assert_eq!(node.name(), "node"); + assert_eq!(node.command().unwrap().as_str(), "node_command"); + + let group_nodes = relaychain_config.group_node_configs(); + let group_base_node = group_nodes.first().unwrap(); + assert_eq!(group_base_node.base_config.name(), "group_node"); + assert_eq!( + group_base_node.base_config.command().unwrap().as_str(), + "some_command" + ); + assert_eq!( + group_base_node.base_config.image().unwrap().as_str(), + "repo:image" + ); + assert!(group_base_node.base_config.is_validator()); + } + + #[test] + fn relaychain_with_group_count_0_config_should_fail() { + let relaychain_config = RelaychainConfigBuilder::new(Default::default()) + .with_chain("chain") + .with_default_command("command") + .with_validator(|node| node.with_name("node").with_command("node_command")) + .with_node_group(|group| { + group.with_count(0).with_base_node(|base| { + base.with_name("group_node") + .with_command("some_command") + .with_image("repo:image") + .validator(true) + }) + }) + .build(); + + let errors: Vec = match relaychain_config { + Ok(_) => vec![], + Err(errs) => errs, + }; + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "relaychain.nodes['group_node'].Count cannot be zero" + ); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared.rs b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared.rs new file mode 100644 index 00000000..bb1d7bf1 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared.rs @@ -0,0 +1,6 @@ +pub mod errors; +pub mod helpers; +pub mod macros; +pub mod node; +pub mod resources; +pub mod types; diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared/errors.rs b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared/errors.rs new file mode 100644 index 00000000..1a5fd6a3 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared/errors.rs @@ -0,0 +1,116 @@ +use super::types::{ParaId, Port}; + +/// An error at the configuration level. +#[derive(thiserror::Error, Debug)] +pub enum ConfigError { + #[error("relaychain.{0}")] + Relaychain(anyhow::Error), + + #[error("teyrchain[{0}].{1}")] + Teyrchain(ParaId, anyhow::Error), + + #[error("global_settings.{0}")] + GlobalSettings(anyhow::Error), + + #[error("nodes['{0}'].{1}")] + Node(String, anyhow::Error), + + #[error("collators['{0}'].{1}")] + Collator(String, anyhow::Error), +} + +/// An error at the field level. +#[derive(thiserror::Error, Debug)] +pub enum FieldError { + #[error("name: {0}")] + Name(anyhow::Error), + + #[error("chain: {0}")] + Chain(anyhow::Error), + + #[error("image: {0}")] + Image(anyhow::Error), + + #[error("default_image: {0}")] + DefaultImage(anyhow::Error), + + #[error("command: {0}")] + Command(anyhow::Error), + + #[error("default_command: {0}")] + DefaultCommand(anyhow::Error), + + #[error("bootnodes_addresses[{0}]: '{1}' {2}")] + BootnodesAddress(usize, String, anyhow::Error), + + #[error("genesis_wasm_generator: {0}")] + GenesisWasmGenerator(anyhow::Error), + + #[error("genesis_state_generator: {0}")] + GenesisStateGenerator(anyhow::Error), + + #[error("local_ip: {0}")] + LocalIp(anyhow::Error), + + #[error("default_resources.{0}")] + DefaultResources(anyhow::Error), + + #[error("resources.{0}")] + Resources(anyhow::Error), + + #[error("request_memory: {0}")] + RequestMemory(anyhow::Error), + + #[error("request_cpu: {0}")] + RequestCpu(anyhow::Error), + + #[error("limit_memory: {0}")] + LimitMemory(anyhow::Error), + + #[error("limit_cpu: {0}")] + LimitCpu(anyhow::Error), + + #[error("ws_port: {0}")] + WsPort(anyhow::Error), + + #[error("rpc_port: {0}")] + RpcPort(anyhow::Error), + + #[error("prometheus_port: {0}")] + PrometheusPort(anyhow::Error), + + #[error("p2p_port: {0}")] + P2pPort(anyhow::Error), + + #[error("session_key: {0}")] + SessionKey(anyhow::Error), + + #[error("registration_strategy: {0}")] + RegistrationStrategy(anyhow::Error), +} + +/// A conversion error for shared types across fields. +#[derive(thiserror::Error, Debug, Clone)] +pub enum ConversionError { + #[error("'{0}' shouldn't contains whitespace")] + ContainsWhitespaces(String), + + #[error("'{}' doesn't match regex '{}'", .value, .regex)] + DoesntMatchRegex { value: String, regex: String }, + + #[error("can't be empty")] + CantBeEmpty, + + #[error("deserialize error")] + DeserializeError(String), +} + +/// A validation error for shared types across fields. +#[derive(thiserror::Error, Debug, Clone)] +pub enum ValidationError { + #[error("'{0}' is already used across config")] + PortAlreadyUsed(Port), + + #[error("can't be empty")] + CantBeEmpty(), +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared/helpers.rs b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared/helpers.rs new file mode 100644 index 00000000..5fa50d95 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared/helpers.rs @@ -0,0 +1,118 @@ +use std::{cell::RefCell, collections::HashSet, rc::Rc}; + +use support::constants::{BORROWABLE, THIS_IS_A_BUG}; +use tracing::warn; + +use super::{ + errors::ValidationError, + types::{ParaId, Port, ValidationContext}, +}; + +pub fn merge_errors(errors: Vec, new_error: anyhow::Error) -> Vec { + let mut errors = errors; + errors.push(new_error); + + errors +} + +pub fn merge_errors_vecs( + errors: Vec, + new_errors: Vec, +) -> Vec { + let mut errors = errors; + + for new_error in new_errors.into_iter() { + errors.push(new_error); + } + + errors +} + +/// Generates a unique name from a base name and the names already present in a +/// [`ValidationContext`]. +/// +/// Uses [`generate_unique_node_name_from_names()`] internally to ensure uniqueness. +/// Logs a warning if the generated name differs from the original due to duplicates. +pub fn generate_unique_node_name( + node_name: impl Into, + validation_context: Rc>, +) -> String { + let mut context = validation_context + .try_borrow_mut() + .expect(&format!("{BORROWABLE}, {THIS_IS_A_BUG}")); + + generate_unique_node_name_from_names(node_name, &mut context.used_nodes_names) +} + +/// Returns `node_name` if it is not already in `names`. +/// +/// Otherwise, appends an incrementing `-{counter}` suffix until a unique name is found, +/// then returns it. Logs a warning when a duplicate is detected. +pub fn generate_unique_node_name_from_names( + node_name: impl Into, + names: &mut HashSet, +) -> String { + let node_name = node_name.into(); + + if names.insert(node_name.clone()) { + return node_name; + } + + let mut counter = 1; + let mut candidate = node_name.clone(); + while names.contains(&candidate) { + candidate = format!("{node_name}-{counter}"); + counter += 1; + } + + warn!( + original = %node_name, + adjusted = %candidate, + "Duplicate node name detected." + ); + + names.insert(candidate.clone()); + candidate +} + +pub fn ensure_value_is_not_empty(value: &str) -> Result<(), anyhow::Error> { + if value.is_empty() { + Err(ValidationError::CantBeEmpty().into()) + } else { + Ok(()) + } +} + +pub fn ensure_port_unique( + port: Port, + validation_context: Rc>, +) -> Result<(), anyhow::Error> { + let mut context = validation_context + .try_borrow_mut() + .expect(&format!("{BORROWABLE}, {THIS_IS_A_BUG}")); + + if !context.used_ports.contains(&port) { + context.used_ports.push(port); + return Ok(()); + } + + Err(ValidationError::PortAlreadyUsed(port).into()) +} + +pub fn generate_unique_para_id( + para_id: ParaId, + validation_context: Rc>, +) -> String { + let mut context = validation_context + .try_borrow_mut() + .expect(&format!("{BORROWABLE}, {THIS_IS_A_BUG}")); + + if let Some(suffix) = context.used_para_ids.get_mut(¶_id) { + *suffix += 1; + format!("{para_id}-{suffix}") + } else { + // insert 0, since will be used next time. + context.used_para_ids.insert(para_id, 0); + para_id.to_string() + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared/macros.rs b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared/macros.rs new file mode 100644 index 00000000..4797cb82 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared/macros.rs @@ -0,0 +1,11 @@ +// Helper to define states of a type. +// We use an enum with no variants because it can't be constructed by definition. +macro_rules! states { + ($($ident:ident),*) => { + $( + pub enum $ident {} + )* + }; +} + +pub(crate) use states; diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared/node.rs b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared/node.rs new file mode 100644 index 00000000..03613d28 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared/node.rs @@ -0,0 +1,1355 @@ +use std::{cell::RefCell, error::Error, fmt::Display, marker::PhantomData, path::PathBuf, rc::Rc}; + +use multiaddr::Multiaddr; +use serde::{ser::SerializeStruct, Deserialize, Serialize}; + +use super::{ + errors::FieldError, + helpers::{ + ensure_port_unique, ensure_value_is_not_empty, generate_unique_node_name, + generate_unique_node_name_from_names, merge_errors, merge_errors_vecs, + }, + macros::states, + resources::ResourcesBuilder, + types::{AssetLocation, ChainDefaultContext, Command, Image, ValidationContext, U128}, +}; +use crate::{ + shared::{ + resources::Resources, + types::{Arg, Port}, + }, + utils::{default_as_true, default_initial_balance}, +}; + +states! { + Buildable, + Initial +} + +/// An environment variable with a name and a value. +/// It can be constructed from a `(&str, &str)`. +/// +/// # Examples: +/// +/// ``` +/// use zombienet_configuration::shared::node::EnvVar; +/// +/// let simple_var: EnvVar = ("FOO", "BAR").into(); +/// +/// assert_eq!( +/// simple_var, +/// EnvVar { +/// name: "FOO".into(), +/// value: "BAR".into() +/// } +/// ) +/// ``` +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct EnvVar { + /// The name of the environment variable. + pub name: String, + + /// The value of the environment variable. + pub value: String, +} + +impl From<(&str, &str)> for EnvVar { + fn from((name, value): (&str, &str)) -> Self { + Self { + name: name.to_owned(), + value: value.to_owned(), + } + } +} + +/// A node configuration, with fine-grained configuration options. +#[derive(Debug, Clone, Default, PartialEq, Deserialize)] +pub struct NodeConfig { + pub(crate) name: String, + pub(crate) image: Option, + pub(crate) command: Option, + pub(crate) subcommand: Option, + #[serde(default)] + args: Vec, + #[serde(alias = "validator", default = "default_as_true")] + pub(crate) is_validator: bool, + #[serde(alias = "invulnerable", default = "default_as_true")] + pub(crate) is_invulnerable: bool, + #[serde(alias = "bootnode", default)] + pub(crate) is_bootnode: bool, + #[serde(alias = "balance")] + #[serde(default = "default_initial_balance")] + initial_balance: U128, + #[serde(default)] + env: Vec, + #[serde(default)] + bootnodes_addresses: Vec, + pub(crate) resources: Option, + ws_port: Option, + rpc_port: Option, + prometheus_port: Option, + p2p_port: Option, + p2p_cert_hash: Option, + pub(crate) db_snapshot: Option, + /// Optional override for the automatically generated EVM (eth) session key. + /// When set, override the auto-generated key so the seed will not be part of the resulting zombie.json + #[serde(default, skip_serializing_if = "Option::is_none")] + override_eth_key: Option, + #[serde(default)] + // used to skip serialization of fields with defaults to avoid duplication + pub(crate) chain_context: ChainDefaultContext, + pub(crate) node_log_path: Option, + // optional node keystore path override + keystore_path: Option, + /// Keystore key types to generate. + /// Supports short form (e.g., "audi") using predefined schemas, + /// or long form (e.g., "audi_sr") with explicit schema (sr, ed, ec). + #[serde(default)] + keystore_key_types: Vec, +} + +impl Serialize for NodeConfig { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let mut state = serializer.serialize_struct("NodeConfig", 19)?; + state.serialize_field("name", &self.name)?; + + if self.image == self.chain_context.default_image { + state.skip_field("image")?; + } else { + state.serialize_field("image", &self.image)?; + } + + if self.command == self.chain_context.default_command { + state.skip_field("command")?; + } else { + state.serialize_field("command", &self.command)?; + } + + if self.subcommand.is_none() { + state.skip_field("subcommand")?; + } else { + state.serialize_field("subcommand", &self.subcommand)?; + } + + if self.args.is_empty() || self.args == self.chain_context.default_args { + state.skip_field("args")?; + } else { + state.serialize_field("args", &self.args)?; + } + + state.serialize_field("validator", &self.is_validator)?; + state.serialize_field("invulnerable", &self.is_invulnerable)?; + state.serialize_field("bootnode", &self.is_bootnode)?; + state.serialize_field("balance", &self.initial_balance)?; + + if self.env.is_empty() { + state.skip_field("env")?; + } else { + state.serialize_field("env", &self.env)?; + } + + if self.bootnodes_addresses.is_empty() { + state.skip_field("bootnodes_addresses")?; + } else { + state.serialize_field("bootnodes_addresses", &self.bootnodes_addresses)?; + } + + if self.resources == self.chain_context.default_resources { + state.skip_field("resources")?; + } else { + state.serialize_field("resources", &self.resources)?; + } + + state.serialize_field("ws_port", &self.ws_port)?; + state.serialize_field("rpc_port", &self.rpc_port)?; + state.serialize_field("prometheus_port", &self.prometheus_port)?; + state.serialize_field("p2p_port", &self.p2p_port)?; + state.serialize_field("p2p_cert_hash", &self.p2p_cert_hash)?; + state.serialize_field("override_eth_key", &self.override_eth_key)?; + + if self.db_snapshot == self.chain_context.default_db_snapshot { + state.skip_field("db_snapshot")?; + } else { + state.serialize_field("db_snapshot", &self.db_snapshot)?; + } + + if self.node_log_path.is_none() { + state.skip_field("node_log_path")?; + } else { + state.serialize_field("node_log_path", &self.node_log_path)?; + } + + if self.keystore_path.is_none() { + state.skip_field("keystore_path")?; + } else { + state.serialize_field("keystore_path", &self.keystore_path)?; + } + + if self.keystore_key_types.is_empty() { + state.skip_field("keystore_key_types")?; + } else { + state.serialize_field("keystore_key_types", &self.keystore_key_types)?; + } + + state.skip_field("chain_context")?; + state.end() + } +} + +/// A group of nodes configuration +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct GroupNodeConfig { + #[serde(flatten)] + pub(crate) base_config: NodeConfig, + pub(crate) count: usize, +} + +impl GroupNodeConfig { + /// Expands the group into individual node configs. + /// Each node will have the same base configuration, but with unique names and log paths. + pub fn expand_group_configs(&self) -> Vec { + let mut used_names = std::collections::HashSet::new(); + + (0..self.count) + .map(|_index| { + let mut node = self.base_config.clone(); + + let unique_name = generate_unique_node_name_from_names(node.name, &mut used_names); + node.name = unique_name; + + // If base config has a log path, generate unique log path for each node + if let Some(ref base_log_path) = node.node_log_path { + let unique_log_path = if let Some(parent) = base_log_path.parent() { + parent.join(format!("{}.log", node.name)) + } else { + PathBuf::from(format!("{}.log", node.name)) + }; + node.node_log_path = Some(unique_log_path); + } + + node + }) + .collect() + } +} + +impl Serialize for GroupNodeConfig { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let mut state = serializer.serialize_struct("GroupNodeConfig", 18)?; + state.serialize_field("NodeConfig", &self.base_config)?; + state.serialize_field("count", &self.count)?; + state.end() + } +} + +impl NodeConfig { + /// Node name (should be unique). + pub fn name(&self) -> &str { + &self.name + } + + /// Image to run (only podman/k8s). + pub fn image(&self) -> Option<&Image> { + self.image.as_ref() + } + + /// Command to run the node. + pub fn command(&self) -> Option<&Command> { + self.command.as_ref() + } + + /// Subcommand to run the node. + pub fn subcommand(&self) -> Option<&Command> { + self.subcommand.as_ref() + } + + /// Arguments to use for node. + pub fn args(&self) -> Vec<&Arg> { + self.args.iter().collect() + } + + /// Arguments to use for node. + pub(crate) fn set_args(&mut self, args: Vec) { + self.args = args; + } + + /// Whether the node is a validator. + pub fn is_validator(&self) -> bool { + self.is_validator + } + + /// Whether the node keys must be added to invulnerables. + pub fn is_invulnerable(&self) -> bool { + self.is_invulnerable + } + + /// Whether the node is a bootnode. + pub fn is_bootnode(&self) -> bool { + self.is_bootnode + } + + /// Node initial balance present in genesis. + pub fn initial_balance(&self) -> u128 { + self.initial_balance.0 + } + + /// Environment variables to set (inside pod for podman/k8s, inside shell for native). + pub fn env(&self) -> Vec<&EnvVar> { + self.env.iter().collect() + } + + /// List of node's bootnodes addresses to use. + pub fn bootnodes_addresses(&self) -> Vec<&Multiaddr> { + self.bootnodes_addresses.iter().collect() + } + + /// Default resources. + pub fn resources(&self) -> Option<&Resources> { + self.resources.as_ref() + } + + /// Websocket port to use. + pub fn ws_port(&self) -> Option { + self.ws_port + } + + /// RPC port to use. + pub fn rpc_port(&self) -> Option { + self.rpc_port + } + + /// Prometheus port to use. + pub fn prometheus_port(&self) -> Option { + self.prometheus_port + } + + /// P2P port to use. + pub fn p2p_port(&self) -> Option { + self.p2p_port + } + + /// `libp2p` cert hash to use with `WebRTC` transport. + pub fn p2p_cert_hash(&self) -> Option<&str> { + self.p2p_cert_hash.as_deref() + } + + /// Database snapshot. + pub fn db_snapshot(&self) -> Option<&AssetLocation> { + self.db_snapshot.as_ref() + } + + /// Node log path + pub fn node_log_path(&self) -> Option<&PathBuf> { + self.node_log_path.as_ref() + } + + /// Keystore path + pub fn keystore_path(&self) -> Option<&PathBuf> { + self.keystore_path.as_ref() + } + + /// Override EVM session key to use for the node + pub fn override_eth_key(&self) -> Option<&str> { + self.override_eth_key.as_deref() + } + + /// Keystore key types to generate. + /// Returns the list of key type specifications (short form like "audi" or long form like "audi_sr"). + pub fn keystore_key_types(&self) -> Vec<&str> { + self.keystore_key_types.iter().map(String::as_str).collect() + } +} + +/// A node configuration builder, used to build a [`NodeConfig`] declaratively with fields validation. +pub struct NodeConfigBuilder { + config: NodeConfig, + validation_context: Rc>, + errors: Vec, + _state: PhantomData, +} + +impl Default for NodeConfigBuilder { + fn default() -> Self { + Self { + config: NodeConfig { + name: "".into(), + image: None, + command: None, + subcommand: None, + args: vec![], + is_validator: true, + is_invulnerable: true, + is_bootnode: false, + initial_balance: 2_000_000_000_000.into(), + env: vec![], + bootnodes_addresses: vec![], + resources: None, + ws_port: None, + rpc_port: None, + prometheus_port: None, + p2p_port: None, + p2p_cert_hash: None, + db_snapshot: None, + override_eth_key: None, + chain_context: Default::default(), + node_log_path: None, + keystore_path: None, + keystore_key_types: vec![], + }, + validation_context: Default::default(), + errors: vec![], + _state: PhantomData, + } + } +} + +impl NodeConfigBuilder { + fn transition( + config: NodeConfig, + validation_context: Rc>, + errors: Vec, + ) -> NodeConfigBuilder { + NodeConfigBuilder { + config, + validation_context, + errors, + _state: PhantomData, + } + } +} + +impl NodeConfigBuilder { + pub fn new( + chain_context: ChainDefaultContext, + validation_context: Rc>, + ) -> Self { + Self::transition( + NodeConfig { + command: chain_context.default_command.clone(), + image: chain_context.default_image.clone(), + resources: chain_context.default_resources.clone(), + db_snapshot: chain_context.default_db_snapshot.clone(), + args: chain_context.default_args.clone(), + chain_context, + ..Self::default().config + }, + validation_context, + vec![], + ) + } + + /// Set the name of the node. + pub fn with_name + Copy>(self, name: T) -> NodeConfigBuilder { + let name: String = generate_unique_node_name(name, self.validation_context.clone()); + + match ensure_value_is_not_empty(&name) { + Ok(_) => Self::transition( + NodeConfig { + name, + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(e) => Self::transition( + NodeConfig { + // we still set the name in error case to display error path + name, + ..self.config + }, + self.validation_context, + merge_errors(self.errors, FieldError::Name(e).into()), + ), + } + } +} + +impl NodeConfigBuilder { + /// Set the command that will be executed to launch the node. Override the default. + pub fn with_command(self, command: T) -> Self + where + T: TryInto, + T::Error: Error + Send + Sync + 'static, + { + match command.try_into() { + Ok(command) => Self::transition( + NodeConfig { + command: Some(command), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(error) => Self::transition( + self.config, + self.validation_context, + merge_errors(self.errors, FieldError::Command(error.into()).into()), + ), + } + } + + /// Set the subcommand that will be executed to launch the node. + pub fn with_subcommand(self, subcommand: T) -> Self + where + T: TryInto, + T::Error: Error + Send + Sync + 'static, + { + match subcommand.try_into() { + Ok(subcommand) => Self::transition( + NodeConfig { + subcommand: Some(subcommand), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(error) => Self::transition( + self.config, + self.validation_context, + merge_errors(self.errors, FieldError::Command(error.into()).into()), + ), + } + } + + /// Set the image that will be used for the node (only podman/k8s). Override the default. + pub fn with_image(self, image: T) -> Self + where + T: TryInto, + T::Error: Error + Send + Sync + 'static, + { + match image.try_into() { + Ok(image) => Self::transition( + NodeConfig { + image: Some(image), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(error) => Self::transition( + self.config, + self.validation_context, + merge_errors(self.errors, FieldError::Image(error.into()).into()), + ), + } + } + + /// Set the arguments that will be used when launching the node. Override the default. + pub fn with_args(self, args: Vec) -> Self { + Self::transition( + NodeConfig { + args, + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set whether the node is a validator. + pub fn validator(self, choice: bool) -> Self { + Self::transition( + NodeConfig { + is_validator: choice, + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set whether the node is invulnerable. + pub fn invulnerable(self, choice: bool) -> Self { + Self::transition( + NodeConfig { + is_invulnerable: choice, + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set whether the node is a bootnode. + pub fn bootnode(self, choice: bool) -> Self { + Self::transition( + NodeConfig { + is_bootnode: choice, + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Override the EVM session key to use for the node + pub fn with_override_eth_key(self, session_key: impl Into) -> Self { + Self::transition( + NodeConfig { + override_eth_key: Some(session_key.into()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the node initial balance. + pub fn with_initial_balance(self, initial_balance: u128) -> Self { + Self::transition( + NodeConfig { + initial_balance: initial_balance.into(), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the node environment variables that will be used when launched. Override the default. + pub fn with_env(self, env: Vec>) -> Self { + let env = env.into_iter().map(|var| var.into()).collect::>(); + + Self::transition( + NodeConfig { env, ..self.config }, + self.validation_context, + self.errors, + ) + } + + /// Set the bootnodes addresses that the node will try to connect to. Override the default. + /// + /// Note: Bootnode address replacements are NOT supported here. + /// Only arguments (`args`) support dynamic replacements. Bootnode addresses must be a valid address. + pub fn with_raw_bootnodes_addresses(self, bootnodes_addresses: Vec) -> Self + where + T: TryInto + Display + Copy, + T::Error: Error + Send + Sync + 'static, + { + let mut addrs = vec![]; + let mut errors = vec![]; + + for (index, addr) in bootnodes_addresses.into_iter().enumerate() { + match addr.try_into() { + Ok(addr) => addrs.push(addr), + Err(error) => errors.push( + FieldError::BootnodesAddress(index, addr.to_string(), error.into()).into(), + ), + } + } + + Self::transition( + NodeConfig { + bootnodes_addresses: addrs, + ..self.config + }, + self.validation_context, + merge_errors_vecs(self.errors, errors), + ) + } + + /// Set the resources limits what will be used for the node (only podman/k8s). Override the default. + pub fn with_resources(self, f: impl FnOnce(ResourcesBuilder) -> ResourcesBuilder) -> Self { + match f(ResourcesBuilder::new()).build() { + Ok(resources) => Self::transition( + NodeConfig { + resources: Some(resources), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(errors) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs( + self.errors, + errors + .into_iter() + .map(|error| FieldError::Resources(error).into()) + .collect::>(), + ), + ), + } + } + + /// Set the websocket port that will be exposed. Uniqueness across config will be checked. + pub fn with_ws_port(self, ws_port: Port) -> Self { + match ensure_port_unique(ws_port, self.validation_context.clone()) { + Ok(_) => Self::transition( + NodeConfig { + ws_port: Some(ws_port), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(error) => Self::transition( + self.config, + self.validation_context, + merge_errors(self.errors, FieldError::WsPort(error).into()), + ), + } + } + + /// Set the RPC port that will be exposed. Uniqueness across config will be checked. + pub fn with_rpc_port(self, rpc_port: Port) -> Self { + match ensure_port_unique(rpc_port, self.validation_context.clone()) { + Ok(_) => Self::transition( + NodeConfig { + rpc_port: Some(rpc_port), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(error) => Self::transition( + self.config, + self.validation_context, + merge_errors(self.errors, FieldError::RpcPort(error).into()), + ), + } + } + + /// Set the Prometheus port that will be exposed for metrics. Uniqueness across config will be checked. + pub fn with_prometheus_port(self, prometheus_port: Port) -> Self { + match ensure_port_unique(prometheus_port, self.validation_context.clone()) { + Ok(_) => Self::transition( + NodeConfig { + prometheus_port: Some(prometheus_port), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(error) => Self::transition( + self.config, + self.validation_context, + merge_errors(self.errors, FieldError::PrometheusPort(error).into()), + ), + } + } + + /// Set the P2P port that will be exposed. Uniqueness across config will be checked. + pub fn with_p2p_port(self, p2p_port: Port) -> Self { + match ensure_port_unique(p2p_port, self.validation_context.clone()) { + Ok(_) => Self::transition( + NodeConfig { + p2p_port: Some(p2p_port), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(error) => Self::transition( + self.config, + self.validation_context, + merge_errors(self.errors, FieldError::P2pPort(error).into()), + ), + } + } + + /// Set the P2P cert hash that will be used as part of the multiaddress + /// if and only if the multiaddress is set to use `webrtc`. + pub fn with_p2p_cert_hash(self, p2p_cert_hash: impl Into) -> Self { + Self::transition( + NodeConfig { + p2p_cert_hash: Some(p2p_cert_hash.into()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the database snapshot that will be used to launch the node. Override the default. + pub fn with_db_snapshot(self, location: impl Into) -> Self { + Self::transition( + NodeConfig { + db_snapshot: Some(location.into()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the node log path that will be used to launch the node. + pub fn with_log_path(self, log_path: impl Into) -> Self { + Self::transition( + NodeConfig { + node_log_path: Some(log_path.into()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the keystore path override. + pub fn with_keystore_path(self, keystore_path: impl Into) -> Self { + Self::transition( + NodeConfig { + keystore_path: Some(keystore_path.into()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the keystore key types to generate. + /// + /// Each key type can be specified in short form (e.g., "audi") using predefined schemas + /// (defaults to `sr` if no predefined schema exists for the key type), + /// or in long form (e.g., "audi_sr") with an explicit schema (sr, ed, ec). + /// + /// # Examples + /// + /// ``` + /// use zombienet_configuration::shared::{node::NodeConfigBuilder, types::ChainDefaultContext}; + /// + /// let config = NodeConfigBuilder::new(ChainDefaultContext::default(), Default::default()) + /// .with_name("node") + /// .with_keystore_key_types(vec!["audi", "gran", "cust_sr"]) + /// .build() + /// .unwrap(); + /// + /// assert_eq!(config.keystore_key_types(), &["audi", "gran", "cust_sr"]); + /// ``` + pub fn with_keystore_key_types(self, key_types: Vec>) -> Self { + Self::transition( + NodeConfig { + keystore_key_types: key_types.into_iter().map(|k| k.into()).collect(), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Seals the builder and returns a [`NodeConfig`] if there are no validation errors, else returns errors. + pub fn build(self) -> Result)> { + if !self.errors.is_empty() { + return Err((self.config.name.clone(), self.errors)); + } + + Ok(self.config) + } +} + +/// A group node configuration builder, used to build a [`GroupNodeConfig`] declaratively with fields validation. +pub struct GroupNodeConfigBuilder { + base_config: NodeConfig, + count: usize, + validation_context: Rc>, + errors: Vec, + _state: PhantomData, +} + +impl GroupNodeConfigBuilder { + pub fn new( + chain_context: ChainDefaultContext, + validation_context: Rc>, + ) -> Self { + let (errors, base_config) = match NodeConfigBuilder::new( + chain_context.clone(), + validation_context.clone(), + ) + .with_name(" ") // placeholder + .build() + { + Ok(base_config) => (vec![], base_config), + Err((_name, errors)) => (errors, NodeConfig::default()), + }; + + Self { + base_config, + count: 1, + validation_context, + errors, + _state: PhantomData, + } + } + + /// Set the base node config using a closure. + pub fn with_base_node( + mut self, + f: impl FnOnce(NodeConfigBuilder) -> NodeConfigBuilder, + ) -> GroupNodeConfigBuilder { + match f(NodeConfigBuilder::new( + ChainDefaultContext::default(), + self.validation_context.clone(), + )) + .build() + { + Ok(node) => { + self.base_config = node; + GroupNodeConfigBuilder { + base_config: self.base_config, + count: self.count, + validation_context: self.validation_context, + errors: self.errors, + _state: PhantomData, + } + }, + Err((_name, errors)) => { + self.errors.extend(errors); + GroupNodeConfigBuilder { + base_config: self.base_config, + count: self.count, + validation_context: self.validation_context, + errors: self.errors, + _state: PhantomData, + } + }, + } + } + + /// Set the number of nodes in the group. + pub fn with_count(mut self, count: usize) -> Self { + self.count = count; + self + } +} + +impl GroupNodeConfigBuilder { + pub fn build(self) -> Result)> { + if self.count == 0 { + return Err(( + self.base_config.name().to_string(), + vec![anyhow::anyhow!("Count cannot be zero")], + )); + } + + if !self.errors.is_empty() { + return Err((self.base_config.name().to_string(), self.errors)); + } + + Ok(GroupNodeConfig { + base_config: self.base_config, + count: self.count, + }) + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashSet; + + use super::*; + + #[test] + fn node_config_builder_should_succeeds_and_returns_a_node_config() { + let node_config = + NodeConfigBuilder::new(ChainDefaultContext::default(), Default::default()) + .with_name("node") + .with_command("mycommand") + .with_image("myrepo:myimage") + .with_args(vec![("--arg1", "value1").into(), "--option2".into()]) + .validator(true) + .invulnerable(true) + .bootnode(true) + .with_override_eth_key("0x0123456789abcdef0123456789abcdef01234567") + .with_initial_balance(100_000_042) + .with_env(vec![("VAR1", "VALUE1"), ("VAR2", "VALUE2")]) + .with_raw_bootnodes_addresses(vec![ + "/ip4/10.41.122.55/tcp/45421", + "/ip4/51.144.222.10/tcp/2333", + ]) + .with_resources(|resources| { + resources + .with_request_cpu("200M") + .with_request_memory("500M") + .with_limit_cpu("1G") + .with_limit_memory("2G") + }) + .with_ws_port(5000) + .with_rpc_port(6000) + .with_prometheus_port(7000) + .with_p2p_port(8000) + .with_p2p_cert_hash( + "ec8d6467180a4b72a52b24c53aa1e53b76c05602fa96f5d0961bf720edda267f", + ) + .with_db_snapshot("/tmp/mysnapshot") + .with_keystore_path("/tmp/mykeystore") + .build() + .unwrap(); + + assert_eq!(node_config.name(), "node"); + assert_eq!(node_config.command().unwrap().as_str(), "mycommand"); + assert_eq!(node_config.image().unwrap().as_str(), "myrepo:myimage"); + let args: Vec = vec![("--arg1", "value1").into(), "--option2".into()]; + assert_eq!(node_config.args(), args.iter().collect::>()); + assert!(node_config.is_validator()); + assert!(node_config.is_invulnerable()); + assert!(node_config.is_bootnode()); + assert_eq!( + node_config.override_eth_key(), + Some("0x0123456789abcdef0123456789abcdef01234567") + ); + assert_eq!(node_config.initial_balance(), 100_000_042); + let env: Vec = vec![("VAR1", "VALUE1").into(), ("VAR2", "VALUE2").into()]; + assert_eq!(node_config.env(), env.iter().collect::>()); + let bootnodes_addresses: Vec = vec![ + "/ip4/10.41.122.55/tcp/45421".try_into().unwrap(), + "/ip4/51.144.222.10/tcp/2333".try_into().unwrap(), + ]; + assert_eq!( + node_config.bootnodes_addresses(), + bootnodes_addresses.iter().collect::>() + ); + let resources = node_config.resources().unwrap(); + assert_eq!(resources.request_cpu().unwrap().as_str(), "200M"); + assert_eq!(resources.request_memory().unwrap().as_str(), "500M"); + assert_eq!(resources.limit_cpu().unwrap().as_str(), "1G"); + assert_eq!(resources.limit_memory().unwrap().as_str(), "2G"); + assert_eq!(node_config.ws_port().unwrap(), 5000); + assert_eq!(node_config.rpc_port().unwrap(), 6000); + assert_eq!(node_config.prometheus_port().unwrap(), 7000); + assert_eq!(node_config.p2p_port().unwrap(), 8000); + assert_eq!( + node_config.p2p_cert_hash().unwrap(), + "ec8d6467180a4b72a52b24c53aa1e53b76c05602fa96f5d0961bf720edda267f" + ); + assert!(matches!( + node_config.db_snapshot().unwrap(), AssetLocation::FilePath(value) if value.to_str().unwrap() == "/tmp/mysnapshot" + )); + assert!(matches!( + node_config.keystore_path().unwrap().to_str().unwrap(), + "/tmp/mykeystore" + )); + } + + #[test] + fn node_config_builder_should_use_unique_name_if_node_name_already_used() { + let mut used_nodes_names = HashSet::new(); + used_nodes_names.insert("mynode".into()); + let validation_context = Rc::new(RefCell::new(ValidationContext { + used_nodes_names, + ..Default::default() + })); + let node_config = + NodeConfigBuilder::new(ChainDefaultContext::default(), validation_context) + .with_name("mynode") + .build() + .unwrap(); + + assert_eq!(node_config.name, "mynode-1"); + } + + #[test] + fn node_config_builder_should_fails_and_returns_an_error_and_node_name_if_command_is_invalid() { + let (node_name, errors) = + NodeConfigBuilder::new(ChainDefaultContext::default(), Default::default()) + .with_name("node") + .with_command("invalid command") + .build() + .unwrap_err(); + + assert_eq!(node_name, "node"); + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "command: 'invalid command' shouldn't contains whitespace" + ); + } + + #[test] + fn node_config_builder_should_fails_and_returns_an_error_and_node_name_if_image_is_invalid() { + let (node_name, errors) = + NodeConfigBuilder::new(ChainDefaultContext::default(), Default::default()) + .with_name("node") + .with_image("myinvalid.image") + .build() + .unwrap_err(); + + assert_eq!(node_name, "node"); + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "image: 'myinvalid.image' doesn't match regex '^([ip]|[hostname]/)?[tag_name]:[tag_version]?$'" + ); + } + + #[test] + fn node_config_builder_should_fails_and_returns_an_error_and_node_name_if_one_bootnode_address_is_invalid( + ) { + let (node_name, errors) = + NodeConfigBuilder::new(ChainDefaultContext::default(), Default::default()) + .with_name("node") + .with_raw_bootnodes_addresses(vec!["/ip4//tcp/45421"]) + .build() + .unwrap_err(); + + assert_eq!(node_name, "node"); + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "bootnodes_addresses[0]: '/ip4//tcp/45421' failed to parse: invalid IPv4 address syntax" + ); + } + + #[test] + fn node_config_builder_should_fails_and_returns_mulitle_errors_and_node_name_if_multiple_bootnode_address_are_invalid( + ) { + let (node_name, errors) = + NodeConfigBuilder::new(ChainDefaultContext::default(), Default::default()) + .with_name("node") + .with_raw_bootnodes_addresses(vec!["/ip4//tcp/45421", "//10.42.153.10/tcp/43111"]) + .build() + .unwrap_err(); + + assert_eq!(node_name, "node"); + assert_eq!(errors.len(), 2); + assert_eq!( + errors.first().unwrap().to_string(), + "bootnodes_addresses[0]: '/ip4//tcp/45421' failed to parse: invalid IPv4 address syntax" + ); + assert_eq!( + errors.get(1).unwrap().to_string(), + "bootnodes_addresses[1]: '//10.42.153.10/tcp/43111' unknown protocol string: " + ); + } + + #[test] + fn node_config_builder_should_fails_and_returns_an_error_and_node_name_if_resources_has_an_error( + ) { + let (node_name, errors) = + NodeConfigBuilder::new(ChainDefaultContext::default(), Default::default()) + .with_name("node") + .with_resources(|resources| resources.with_limit_cpu("invalid")) + .build() + .unwrap_err(); + + assert_eq!(node_name, "node"); + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + r"resources.limit_cpu: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'" + ); + } + + #[test] + fn node_config_builder_should_fails_and_returns_multiple_errors_and_node_name_if_resources_has_multiple_errors( + ) { + let (node_name, errors) = + NodeConfigBuilder::new(ChainDefaultContext::default(), Default::default()) + .with_name("node") + .with_resources(|resources| { + resources + .with_limit_cpu("invalid") + .with_request_memory("invalid") + }) + .build() + .unwrap_err(); + + assert_eq!(node_name, "node"); + assert_eq!(errors.len(), 2); + assert_eq!( + errors.first().unwrap().to_string(), + r"resources.limit_cpu: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'" + ); + assert_eq!( + errors.get(1).unwrap().to_string(), + r"resources.request_memory: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'" + ); + } + + #[test] + fn node_config_builder_should_fails_and_returns_multiple_errors_and_node_name_if_multiple_fields_have_errors( + ) { + let (node_name, errors) = + NodeConfigBuilder::new(ChainDefaultContext::default(), Default::default()) + .with_name("node") + .with_command("invalid command") + .with_image("myinvalid.image") + .with_resources(|resources| { + resources + .with_limit_cpu("invalid") + .with_request_memory("invalid") + }) + .build() + .unwrap_err(); + + assert_eq!(node_name, "node"); + assert_eq!(errors.len(), 4); + assert_eq!( + errors.first().unwrap().to_string(), + "command: 'invalid command' shouldn't contains whitespace" + ); + assert_eq!( + errors.get(1).unwrap().to_string(), + "image: 'myinvalid.image' doesn't match regex '^([ip]|[hostname]/)?[tag_name]:[tag_version]?$'" + ); + assert_eq!( + errors.get(2).unwrap().to_string(), + r"resources.limit_cpu: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'" + ); + assert_eq!( + errors.get(3).unwrap().to_string(), + r"resources.request_memory: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'" + ); + } + + #[test] + fn node_config_builder_should_fails_and_returns_an_error_and_node_name_if_ws_port_is_already_used( + ) { + let validation_context = Rc::new(RefCell::new(ValidationContext { + used_ports: vec![30333], + ..Default::default() + })); + let (node_name, errors) = + NodeConfigBuilder::new(ChainDefaultContext::default(), validation_context) + .with_name("node") + .with_ws_port(30333) + .build() + .unwrap_err(); + + assert_eq!(node_name, "node"); + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "ws_port: '30333' is already used across config" + ); + } + + #[test] + fn node_config_builder_should_fails_and_returns_an_error_and_node_name_if_rpc_port_is_already_used( + ) { + let validation_context = Rc::new(RefCell::new(ValidationContext { + used_ports: vec![4444], + ..Default::default() + })); + let (node_name, errors) = + NodeConfigBuilder::new(ChainDefaultContext::default(), validation_context) + .with_name("node") + .with_rpc_port(4444) + .build() + .unwrap_err(); + + assert_eq!(node_name, "node"); + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "rpc_port: '4444' is already used across config" + ); + } + + #[test] + fn node_config_builder_should_fails_and_returns_an_error_and_node_name_if_prometheus_port_is_already_used( + ) { + let validation_context = Rc::new(RefCell::new(ValidationContext { + used_ports: vec![9089], + ..Default::default() + })); + let (node_name, errors) = + NodeConfigBuilder::new(ChainDefaultContext::default(), validation_context) + .with_name("node") + .with_prometheus_port(9089) + .build() + .unwrap_err(); + + assert_eq!(node_name, "node"); + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "prometheus_port: '9089' is already used across config" + ); + } + + #[test] + fn node_config_builder_should_fails_and_returns_and_error_and_node_name_if_p2p_port_is_already_used( + ) { + let validation_context = Rc::new(RefCell::new(ValidationContext { + used_ports: vec![45093], + ..Default::default() + })); + let (node_name, errors) = + NodeConfigBuilder::new(ChainDefaultContext::default(), validation_context) + .with_name("node") + .with_p2p_port(45093) + .build() + .unwrap_err(); + + assert_eq!(node_name, "node"); + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "p2p_port: '45093' is already used across config" + ); + } + + #[test] + fn node_config_builder_should_fails_if_node_name_is_empty() { + let validation_context = Rc::new(RefCell::new(ValidationContext { + ..Default::default() + })); + + let (_, errors) = + NodeConfigBuilder::new(ChainDefaultContext::default(), validation_context) + .with_name("") + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 1); + assert_eq!(errors.first().unwrap().to_string(), "name: can't be empty"); + } + + #[test] + fn group_default_base_node() { + let validation_context = Rc::new(RefCell::new(ValidationContext::default())); + + let group_config = + GroupNodeConfigBuilder::new(ChainDefaultContext::default(), validation_context.clone()) + .with_base_node(|node| node.with_name("validator")) + .build() + .unwrap(); + + // Check group config + assert_eq!(group_config.count, 1); + assert_eq!(group_config.base_config.name(), "validator"); + } + + #[test] + fn group_custom_base_node() { + let validation_context = Rc::new(RefCell::new(ValidationContext::default())); + let node_config = + NodeConfigBuilder::new(ChainDefaultContext::default(), validation_context.clone()) + .with_name("node") + .with_command("some_command") + .with_image("repo:image") + .validator(true) + .invulnerable(true) + .bootnode(true); + + let group_config = + GroupNodeConfigBuilder::new(ChainDefaultContext::default(), validation_context.clone()) + .with_count(5) + .with_base_node(|_node| node_config) + .build() + .unwrap(); + + // Check group config + assert_eq!(group_config.count, 5); + + assert_eq!(group_config.base_config.name(), "node"); + assert_eq!( + group_config.base_config.command().unwrap().as_str(), + "some_command" + ); + assert_eq!( + group_config.base_config.image().unwrap().as_str(), + "repo:image" + ); + assert!(group_config.base_config.is_validator()); + assert!(group_config.base_config.is_invulnerable()); + assert!(group_config.base_config.is_bootnode()); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared/resources.rs b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared/resources.rs new file mode 100644 index 00000000..11e9723f --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared/resources.rs @@ -0,0 +1,489 @@ +use std::error::Error; + +use lazy_static::lazy_static; +use regex::Regex; +use serde::{ + de::{self}, + ser::SerializeStruct, + Deserialize, Serialize, +}; +use support::constants::{SHOULD_COMPILE, THIS_IS_A_BUG}; + +use super::{ + errors::{ConversionError, FieldError}, + helpers::merge_errors, +}; + +/// A resource quantity used to define limits (k8s/podman only). +/// It can be constructed from a `&str` or u64, if it fails, it returns a [`ConversionError`]. +/// Possible optional prefixes are: m, K, M, G, T, P, E, Ki, Mi, Gi, Ti, Pi, Ei +/// +/// # Examples +/// +/// ``` +/// use zombienet_configuration::shared::resources::ResourceQuantity; +/// +/// let quantity1: ResourceQuantity = "100000".try_into().unwrap(); +/// let quantity2: ResourceQuantity = "1000m".try_into().unwrap(); +/// let quantity3: ResourceQuantity = "1Gi".try_into().unwrap(); +/// let quantity4: ResourceQuantity = 10_000.into(); +/// +/// assert_eq!(quantity1.as_str(), "100000"); +/// assert_eq!(quantity2.as_str(), "1000m"); +/// assert_eq!(quantity3.as_str(), "1Gi"); +/// assert_eq!(quantity4.as_str(), "10000"); +/// ``` +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ResourceQuantity(String); + +impl ResourceQuantity { + pub fn as_str(&self) -> &str { + &self.0 + } +} + +impl TryFrom<&str> for ResourceQuantity { + type Error = ConversionError; + + fn try_from(value: &str) -> Result { + lazy_static! { + static ref RE: Regex = Regex::new(r"^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$") + .expect(&format!("{SHOULD_COMPILE}, {THIS_IS_A_BUG}")); + } + + if !RE.is_match(value) { + return Err(ConversionError::DoesntMatchRegex { + value: value.to_string(), + regex: r"^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$".to_string(), + }); + } + + Ok(Self(value.to_string())) + } +} + +impl From for ResourceQuantity { + fn from(value: u64) -> Self { + Self(value.to_string()) + } +} + +/// Resources limits used in the context of podman/k8s. +#[derive(Debug, Default, Clone, PartialEq)] +pub struct Resources { + request_memory: Option, + request_cpu: Option, + limit_memory: Option, + limit_cpu: Option, +} + +#[derive(Serialize, Deserialize)] +struct ResourcesField { + memory: Option, + cpu: Option, +} + +impl Serialize for Resources { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let mut state = serializer.serialize_struct("Resources", 2)?; + + if self.request_memory.is_some() || self.request_memory.is_some() { + state.serialize_field( + "requests", + &ResourcesField { + memory: self.request_memory.clone(), + cpu: self.request_cpu.clone(), + }, + )?; + } else { + state.skip_field("requests")?; + } + + if self.limit_memory.is_some() || self.limit_memory.is_some() { + state.serialize_field( + "limits", + &ResourcesField { + memory: self.limit_memory.clone(), + cpu: self.limit_cpu.clone(), + }, + )?; + } else { + state.skip_field("limits")?; + } + + state.end() + } +} + +struct ResourcesVisitor; + +impl<'de> de::Visitor<'de> for ResourcesVisitor { + type Value = Resources; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("a resources object") + } + + fn visit_map(self, mut map: A) -> Result + where + A: de::MapAccess<'de>, + { + let mut resources: Resources = Resources::default(); + + while let Some((key, value)) = map.next_entry::()? { + match key.as_str() { + "requests" => { + resources.request_memory = value.memory; + resources.request_cpu = value.cpu; + }, + "limits" => { + resources.limit_memory = value.memory; + resources.limit_cpu = value.cpu; + }, + _ => { + return Err(de::Error::unknown_field( + &key, + &["requests", "limits", "cpu", "memory"], + )) + }, + } + } + Ok(resources) + } +} + +impl<'de> Deserialize<'de> for Resources { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_any(ResourcesVisitor) + } +} + +impl Resources { + /// Memory limit applied to requests. + pub fn request_memory(&self) -> Option<&ResourceQuantity> { + self.request_memory.as_ref() + } + + /// CPU limit applied to requests. + pub fn request_cpu(&self) -> Option<&ResourceQuantity> { + self.request_cpu.as_ref() + } + + /// Overall memory limit applied. + pub fn limit_memory(&self) -> Option<&ResourceQuantity> { + self.limit_memory.as_ref() + } + + /// Overall CPU limit applied. + pub fn limit_cpu(&self) -> Option<&ResourceQuantity> { + self.limit_cpu.as_ref() + } +} + +/// A resources builder, used to build a [`Resources`] declaratively with fields validation. +#[derive(Debug, Default)] +pub struct ResourcesBuilder { + config: Resources, + errors: Vec, +} + +impl ResourcesBuilder { + pub fn new() -> ResourcesBuilder { + Self::default() + } + + fn transition(config: Resources, errors: Vec) -> Self { + Self { config, errors } + } + + /// Set the requested memory for a pod. This is the minimum memory allocated for a pod. + pub fn with_request_memory(self, quantity: T) -> Self + where + T: TryInto, + T::Error: Error + Send + Sync + 'static, + { + match quantity.try_into() { + Ok(quantity) => Self::transition( + Resources { + request_memory: Some(quantity), + ..self.config + }, + self.errors, + ), + Err(error) => Self::transition( + self.config, + merge_errors(self.errors, FieldError::RequestMemory(error.into()).into()), + ), + } + } + + /// Set the requested CPU limit for a pod. This is the minimum CPU allocated for a pod. + pub fn with_request_cpu(self, quantity: T) -> Self + where + T: TryInto, + T::Error: Error + Send + Sync + 'static, + { + match quantity.try_into() { + Ok(quantity) => Self::transition( + Resources { + request_cpu: Some(quantity), + ..self.config + }, + self.errors, + ), + Err(error) => Self::transition( + self.config, + merge_errors(self.errors, FieldError::RequestCpu(error.into()).into()), + ), + } + } + + /// Set the overall memory limit for a pod. This is the maximum memory threshold for a pod. + pub fn with_limit_memory(self, quantity: T) -> Self + where + T: TryInto, + T::Error: Error + Send + Sync + 'static, + { + match quantity.try_into() { + Ok(quantity) => Self::transition( + Resources { + limit_memory: Some(quantity), + ..self.config + }, + self.errors, + ), + Err(error) => Self::transition( + self.config, + merge_errors(self.errors, FieldError::LimitMemory(error.into()).into()), + ), + } + } + + /// Set the overall CPU limit for a pod. This is the maximum CPU threshold for a pod. + pub fn with_limit_cpu(self, quantity: T) -> Self + where + T: TryInto, + T::Error: Error + Send + Sync + 'static, + { + match quantity.try_into() { + Ok(quantity) => Self::transition( + Resources { + limit_cpu: Some(quantity), + ..self.config + }, + self.errors, + ), + Err(error) => Self::transition( + self.config, + merge_errors(self.errors, FieldError::LimitCpu(error.into()).into()), + ), + } + } + + /// Seals the builder and returns a [`Resources`] if there are no validation errors, else returns errors. + pub fn build(self) -> Result> { + if !self.errors.is_empty() { + return Err(self.errors); + } + + Ok(self.config) + } +} + +#[cfg(test)] +#[allow(non_snake_case)] +mod tests { + use super::*; + use crate::NetworkConfig; + + macro_rules! impl_resources_quantity_unit_test { + ($val:literal) => {{ + let resources = ResourcesBuilder::new() + .with_request_memory($val) + .build() + .unwrap(); + + assert_eq!(resources.request_memory().unwrap().as_str(), $val); + assert_eq!(resources.request_cpu(), None); + assert_eq!(resources.limit_cpu(), None); + assert_eq!(resources.limit_memory(), None); + }}; + } + + #[test] + fn converting_a_string_a_resource_quantity_without_unit_should_succeeds() { + impl_resources_quantity_unit_test!("1000"); + } + + #[test] + fn converting_a_str_with_m_unit_into_a_resource_quantity_should_succeeds() { + impl_resources_quantity_unit_test!("100m"); + } + + #[test] + fn converting_a_str_with_K_unit_into_a_resource_quantity_should_succeeds() { + impl_resources_quantity_unit_test!("50K"); + } + + #[test] + fn converting_a_str_with_M_unit_into_a_resource_quantity_should_succeeds() { + impl_resources_quantity_unit_test!("100M"); + } + + #[test] + fn converting_a_str_with_G_unit_into_a_resource_quantity_should_succeeds() { + impl_resources_quantity_unit_test!("1G"); + } + + #[test] + fn converting_a_str_with_T_unit_into_a_resource_quantity_should_succeeds() { + impl_resources_quantity_unit_test!("0.01T"); + } + + #[test] + fn converting_a_str_with_P_unit_into_a_resource_quantity_should_succeeds() { + impl_resources_quantity_unit_test!("0.00001P"); + } + + #[test] + fn converting_a_str_with_E_unit_into_a_resource_quantity_should_succeeds() { + impl_resources_quantity_unit_test!("0.000000001E"); + } + + #[test] + fn converting_a_str_with_Ki_unit_into_a_resource_quantity_should_succeeds() { + impl_resources_quantity_unit_test!("50Ki"); + } + + #[test] + fn converting_a_str_with_Mi_unit_into_a_resource_quantity_should_succeeds() { + impl_resources_quantity_unit_test!("100Mi"); + } + + #[test] + fn converting_a_str_with_Gi_unit_into_a_resource_quantity_should_succeeds() { + impl_resources_quantity_unit_test!("1Gi"); + } + + #[test] + fn converting_a_str_with_Ti_unit_into_a_resource_quantity_should_succeeds() { + impl_resources_quantity_unit_test!("0.01Ti"); + } + + #[test] + fn converting_a_str_with_Pi_unit_into_a_resource_quantity_should_succeeds() { + impl_resources_quantity_unit_test!("0.00001Pi"); + } + + #[test] + fn converting_a_str_with_Ei_unit_into_a_resource_quantity_should_succeeds() { + impl_resources_quantity_unit_test!("0.000000001Ei"); + } + + #[test] + fn resources_config_builder_should_succeeds_and_returns_a_resources_config() { + let resources = ResourcesBuilder::new() + .with_request_memory("200M") + .with_request_cpu("1G") + .with_limit_cpu("500M") + .with_limit_memory("2G") + .build() + .unwrap(); + + assert_eq!(resources.request_memory().unwrap().as_str(), "200M"); + assert_eq!(resources.request_cpu().unwrap().as_str(), "1G"); + assert_eq!(resources.limit_cpu().unwrap().as_str(), "500M"); + assert_eq!(resources.limit_memory().unwrap().as_str(), "2G"); + } + + #[test] + fn resources_config_toml_import_should_succeeds_and_returns_a_resources_config() { + let load_from_toml = + NetworkConfig::load_from_toml("./testing/snapshots/0001-big-network.toml").unwrap(); + + let resources = load_from_toml.relaychain().default_resources().unwrap(); + assert_eq!(resources.request_memory().unwrap().as_str(), "500M"); + assert_eq!(resources.request_cpu().unwrap().as_str(), "100000"); + assert_eq!(resources.limit_cpu().unwrap().as_str(), "10Gi"); + assert_eq!(resources.limit_memory().unwrap().as_str(), "4000M"); + } + + #[test] + fn resources_config_builder_should_fails_and_returns_an_error_if_couldnt_parse_request_memory() + { + let resources_builder = ResourcesBuilder::new().with_request_memory("invalid"); + + let errors = resources_builder.build().err().unwrap(); + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + r"request_memory: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'" + ); + } + + #[test] + fn resources_config_builder_should_fails_and_returns_an_error_if_couldnt_parse_request_cpu() { + let resources_builder = ResourcesBuilder::new().with_request_cpu("invalid"); + + let errors = resources_builder.build().err().unwrap(); + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + r"request_cpu: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'" + ); + } + + #[test] + fn resources_config_builder_should_fails_and_returns_an_error_if_couldnt_parse_limit_memory() { + let resources_builder = ResourcesBuilder::new().with_limit_memory("invalid"); + + let errors = resources_builder.build().err().unwrap(); + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + r"limit_memory: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'" + ); + } + + #[test] + fn resources_config_builder_should_fails_and_returns_an_error_if_couldnt_parse_limit_cpu() { + let resources_builder = ResourcesBuilder::new().with_limit_cpu("invalid"); + + let errors = resources_builder.build().err().unwrap(); + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + r"limit_cpu: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'" + ); + } + + #[test] + fn resources_config_builder_should_fails_and_returns_multiple_error_if_couldnt_parse_multiple_fields( + ) { + let resources_builder = ResourcesBuilder::new() + .with_limit_cpu("invalid") + .with_request_memory("invalid"); + + let errors = resources_builder.build().err().unwrap(); + + assert_eq!(errors.len(), 2); + assert_eq!( + errors.first().unwrap().to_string(), + r"limit_cpu: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'" + ); + assert_eq!( + errors.get(1).unwrap().to_string(), + r"request_memory: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'" + ); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared/types.rs b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared/types.rs new file mode 100644 index 00000000..765143e3 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/shared/types.rs @@ -0,0 +1,930 @@ +use std::{ + collections::{HashMap, HashSet}, + error::Error, + fmt::{self, Display}, + path::PathBuf, + str::FromStr, +}; + +use anyhow::anyhow; +use lazy_static::lazy_static; +use regex::Regex; +use serde::{ + de::{self, IntoDeserializer}, + Deserialize, Deserializer, Serialize, +}; +use support::constants::{INFAILABLE, SHOULD_COMPILE, THIS_IS_A_BUG}; +use tokio::fs; +use url::Url; + +use super::{errors::ConversionError, resources::Resources}; + +/// An alias for a duration in seconds. +pub type Duration = u32; + +/// An alias for a port. +pub type Port = u16; + +/// An alias for a parachain ID. +pub type ParaId = u32; + +/// Custom type wrapping u128 to add custom Serialization/Deserialization logic because it's not supported +/// issue tracking the problem: +#[derive(Default, Debug, Clone, PartialEq)] +pub struct U128(pub(crate) u128); + +impl From for U128 { + fn from(value: u128) -> Self { + Self(value) + } +} + +impl TryFrom<&str> for U128 { + type Error = Box; + + fn try_from(value: &str) -> Result { + Ok(Self(value.to_string().parse::()?)) + } +} + +impl Serialize for U128 { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + // here we add a prefix to the string to be able to replace the wrapped + // value with "" to a value without "" in the TOML string + serializer.serialize_str(&format!("U128%{}", self.0)) + } +} + +struct U128Visitor; + +impl de::Visitor<'_> for U128Visitor { + type Value = U128; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("an integer between 0 and 2^128 − 1.") + } + + fn visit_str(self, v: &str) -> Result + where + E: de::Error, + { + v.try_into().map_err(de::Error::custom) + } +} + +impl<'de> Deserialize<'de> for U128 { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_str(U128Visitor) + } +} + +/// A chain name. +/// It can be constructed for an `&str`, if it fails, it will returns a [`ConversionError`]. +/// +/// # Examples: +/// ``` +/// use zombienet_configuration::shared::types::Chain; +/// +/// let polkadot: Chain = "polkadot".try_into().unwrap(); +/// let kusama: Chain = "kusama".try_into().unwrap(); +/// let myparachain: Chain = "myparachain".try_into().unwrap(); +/// +/// assert_eq!(polkadot.as_str(), "polkadot"); +/// assert_eq!(kusama.as_str(), "kusama"); +/// assert_eq!(myparachain.as_str(), "myparachain"); +/// ``` +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Chain(String); + +impl TryFrom<&str> for Chain { + type Error = ConversionError; + + fn try_from(value: &str) -> Result { + if value.contains(char::is_whitespace) { + return Err(ConversionError::ContainsWhitespaces(value.to_string())); + } + + if value.is_empty() { + return Err(ConversionError::CantBeEmpty); + } + + Ok(Self(value.to_string())) + } +} + +impl Chain { + pub fn as_str(&self) -> &str { + &self.0 + } +} + +/// A container image. +/// It can be constructed from an `&str` including a combination of name, version, IPv4 or/and hostname, if it fails, it will returns a [`ConversionError`]. +/// +/// # Examples: +/// ``` +/// use zombienet_configuration::shared::types::Image; +/// +/// let image1: Image = "name".try_into().unwrap(); +/// let image2: Image = "name:version".try_into().unwrap(); +/// let image3: Image = "myrepo.com/name:version".try_into().unwrap(); +/// let image4: Image = "10.15.43.155/name:version".try_into().unwrap(); +/// +/// assert_eq!(image1.as_str(), "name"); +/// assert_eq!(image2.as_str(), "name:version"); +/// assert_eq!(image3.as_str(), "myrepo.com/name:version"); +/// assert_eq!(image4.as_str(), "10.15.43.155/name:version"); +/// ``` +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Image(String); + +impl TryFrom<&str> for Image { + type Error = ConversionError; + + fn try_from(value: &str) -> Result { + static IP_PART: &str = "((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]).){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))"; + static HOSTNAME_PART: &str = "((([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]).)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9-]*[A-Za-z0-9]))"; + static TAG_NAME_PART: &str = "([a-z0-9](-*[a-z0-9])*)"; + static TAG_VERSION_PART: &str = "([a-z0-9_]([-._a-z0-9])*)"; + lazy_static! { + static ref RE: Regex = Regex::new(&format!( + "^({IP_PART}|{HOSTNAME_PART}/)?{TAG_NAME_PART}(:{TAG_VERSION_PART})?$", + )) + .expect(&format!("{SHOULD_COMPILE}, {THIS_IS_A_BUG}")); + }; + + if !RE.is_match(value) { + return Err(ConversionError::DoesntMatchRegex { + value: value.to_string(), + regex: "^([ip]|[hostname]/)?[tag_name]:[tag_version]?$".to_string(), + }); + } + + Ok(Self(value.to_string())) + } +} + +impl Image { + pub fn as_str(&self) -> &str { + &self.0 + } +} + +/// A command that will be executed natively (native provider) or in a container (podman/k8s). +/// It can be constructed from an `&str`, if it fails, it will returns a [`ConversionError`]. +/// +/// # Examples: +/// ``` +/// use zombienet_configuration::shared::types::Command; +/// +/// let command1: Command = "mycommand".try_into().unwrap(); +/// let command2: Command = "myothercommand".try_into().unwrap(); +/// +/// assert_eq!(command1.as_str(), "mycommand"); +/// assert_eq!(command2.as_str(), "myothercommand"); +/// ``` +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Command(String); + +impl TryFrom<&str> for Command { + type Error = ConversionError; + + fn try_from(value: &str) -> Result { + if value.contains(char::is_whitespace) { + return Err(ConversionError::ContainsWhitespaces(value.to_string())); + } + + Ok(Self(value.to_string())) + } +} +impl Default for Command { + fn default() -> Self { + Self(String::from("polkadot")) + } +} + +impl Command { + pub fn as_str(&self) -> &str { + &self.0 + } +} + +/// A command with optional custom arguments, the command will be executed natively (native provider) or in a container (podman/k8s). +/// It can be constructed from an `&str`, if it fails, it will returns a [`ConversionError`]. +/// +/// # Examples: +/// ``` +/// use zombienet_configuration::shared::types::CommandWithCustomArgs; +/// +/// let command1: CommandWithCustomArgs = "mycommand --demo=2 --other-flag".try_into().unwrap(); +/// let command2: CommandWithCustomArgs = "my_other_cmd_without_args".try_into().unwrap(); +/// +/// assert_eq!(command1.cmd().as_str(), "mycommand"); +/// assert_eq!(command2.cmd().as_str(), "my_other_cmd_without_args"); +/// ``` +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct CommandWithCustomArgs(Command, Vec); + +impl TryFrom<&str> for CommandWithCustomArgs { + type Error = ConversionError; + + fn try_from(value: &str) -> Result { + if value.is_empty() { + return Err(ConversionError::CantBeEmpty); + } + + let mut parts = value.split_whitespace().collect::>(); + let cmd = parts.remove(0).try_into().unwrap(); + let args = parts + .iter() + .map(|x| { + Arg::deserialize(x.into_deserializer()).map_err(|_: serde_json::Error| { + ConversionError::DeserializeError(String::from(*x)) + }) + }) + .collect::, _>>()?; + + Ok(Self(cmd, args)) + } +} +impl Default for CommandWithCustomArgs { + fn default() -> Self { + Self("polkadot".try_into().unwrap(), vec![]) + } +} + +impl CommandWithCustomArgs { + pub fn cmd(&self) -> &Command { + &self.0 + } + + pub fn args(&self) -> &Vec { + &self.1 + } +} + +/// A location for a locally or remotely stored asset. +/// It can be constructed from an [`url::Url`], a [`std::path::PathBuf`] or an `&str`. +/// +/// # Examples: +/// ``` +/// use url::Url; +/// use std::{path::PathBuf, str::FromStr}; +/// use zombienet_configuration::shared::types::AssetLocation; +/// +/// let url_location: AssetLocation = Url::from_str("https://mycloudstorage.com/path/to/my/file.tgz").unwrap().into(); +/// let url_location2: AssetLocation = "https://mycloudstorage.com/path/to/my/file.tgz".into(); +/// let path_location: AssetLocation = PathBuf::from_str("/tmp/path/to/my/file").unwrap().into(); +/// let path_location2: AssetLocation = "/tmp/path/to/my/file".into(); +/// +/// assert!(matches!(url_location, AssetLocation::Url(value) if value.as_str() == "https://mycloudstorage.com/path/to/my/file.tgz")); +/// assert!(matches!(url_location2, AssetLocation::Url(value) if value.as_str() == "https://mycloudstorage.com/path/to/my/file.tgz")); +/// assert!(matches!(path_location, AssetLocation::FilePath(value) if value.to_str().unwrap() == "/tmp/path/to/my/file")); +/// assert!(matches!(path_location2, AssetLocation::FilePath(value) if value.to_str().unwrap() == "/tmp/path/to/my/file")); +/// ``` +#[derive(Debug, Clone, PartialEq)] +pub enum AssetLocation { + Url(Url), + FilePath(PathBuf), +} + +impl From for AssetLocation { + fn from(value: Url) -> Self { + Self::Url(value) + } +} + +impl From for AssetLocation { + fn from(value: PathBuf) -> Self { + Self::FilePath(value) + } +} + +impl From<&str> for AssetLocation { + fn from(value: &str) -> Self { + if let Ok(parsed_url) = Url::parse(value) { + return Self::Url(parsed_url); + } + + Self::FilePath(PathBuf::from_str(value).expect(&format!("{INFAILABLE}, {THIS_IS_A_BUG}"))) + } +} + +impl Display for AssetLocation { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + AssetLocation::Url(value) => write!(f, "{}", value.as_str()), + AssetLocation::FilePath(value) => write!(f, "{}", value.display()), + } + } +} + +impl AssetLocation { + /// Get the current asset (from file or url) and return the content + pub async fn get_asset(&self) -> Result, anyhow::Error> { + let contents = match self { + AssetLocation::Url(location) => { + let res = reqwest::get(location.as_ref()).await.map_err(|err| { + anyhow!("Error dowinloding asset from url {location} - {err}") + })?; + + res.bytes().await.unwrap().into() + }, + AssetLocation::FilePath(filepath) => { + tokio::fs::read(filepath).await.map_err(|err| { + anyhow!( + "Error reading asset from path {} - {}", + filepath.to_string_lossy(), + err + ) + })? + }, + }; + + Ok(contents) + } + + /// Write asset (from file or url) to the destination path. + pub async fn dump_asset(&self, dst_path: impl Into) -> Result<(), anyhow::Error> { + let contents = self.get_asset().await?; + fs::write(dst_path.into(), contents).await?; + Ok(()) + } +} + +impl Serialize for AssetLocation { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} + +struct AssetLocationVisitor; + +impl de::Visitor<'_> for AssetLocationVisitor { + type Value = AssetLocation; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a string") + } + + fn visit_str(self, v: &str) -> Result + where + E: de::Error, + { + Ok(AssetLocation::from(v)) + } +} + +impl<'de> Deserialize<'de> for AssetLocation { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_any(AssetLocationVisitor) + } +} + +/// A CLI argument passed to an executed command, can be an option with an assigned value or a simple flag to enable/disable a feature. +/// A flag arg can be constructed from a `&str` and a option arg can be constructed from a `(&str, &str)`. +/// +/// # Examples: +/// ``` +/// use zombienet_configuration::shared::types::Arg; +/// +/// let flag_arg: Arg = "myflag".into(); +/// let option_arg: Arg = ("name", "value").into(); +/// +/// assert!(matches!(flag_arg, Arg::Flag(value) if value == "myflag")); +/// assert!(matches!(option_arg, Arg::Option(name, value) if name == "name" && value == "value")); +/// ``` +#[derive(Debug, Clone, PartialEq)] +pub enum Arg { + Flag(String), + Option(String, String), + Array(String, Vec), +} + +impl From<&str> for Arg { + fn from(flag: &str) -> Self { + Self::Flag(flag.to_owned()) + } +} + +impl From<(&str, &str)> for Arg { + fn from((option, value): (&str, &str)) -> Self { + Self::Option(option.to_owned(), value.to_owned()) + } +} + +impl From<(&str, &[T])> for Arg +where + T: AsRef + Clone, +{ + fn from((option, values): (&str, &[T])) -> Self { + Self::Array( + option.to_owned(), + values.iter().map(|v| v.as_ref().to_string()).collect(), + ) + } +} + +impl From<(&str, Vec)> for Arg +where + T: AsRef, +{ + fn from((option, values): (&str, Vec)) -> Self { + Self::Array( + option.to_owned(), + values.into_iter().map(|v| v.as_ref().to_string()).collect(), + ) + } +} + +impl Serialize for Arg { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + match self { + Arg::Flag(value) => serializer.serialize_str(value), + Arg::Option(option, value) => serializer.serialize_str(&format!("{option}={value}")), + Arg::Array(option, values) => { + serializer.serialize_str(&format!("{}=[{}]", option, values.join(","))) + }, + } + } +} + +struct ArgVisitor; + +impl de::Visitor<'_> for ArgVisitor { + type Value = Arg; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a string") + } + + fn visit_str(self, v: &str) -> Result + where + E: de::Error, + { + // covers the "-lruntime=debug,parachain=trace" case + // TODO: Make this more generic by adding the scenario in the regex below + if v.starts_with("-l") || v.starts_with("-log") { + return Ok(Arg::Flag(v.to_string())); + } + // Handle argument removal syntax: -:--flag-name + if v.starts_with("-:") { + return Ok(Arg::Flag(v.to_string())); + } + let re = Regex::new("^(?(?-{1,2})?(?[a-zA-Z]+(-[a-zA-Z]+)*))((?=| )(?\\[[^\\]]*\\]|[^ ]+))?$").unwrap(); + + let captures = re.captures(v); + if let Some(captures) = captures { + if let Some(value) = captures.name("value") { + let name_prefix = captures + .name("name_prefix") + .expect("BUG: name_prefix capture group missing") + .as_str() + .to_string(); + + let val = value.as_str(); + if val.starts_with('[') && val.ends_with(']') { + // Remove brackets and split by comma + let inner = &val[1..val.len() - 1]; + let items: Vec = inner + .split(',') + .map(|s| s.trim().to_string()) + .filter(|s| !s.is_empty()) + .collect(); + return Ok(Arg::Array(name_prefix, items)); + } else { + return Ok(Arg::Option(name_prefix, val.to_string())); + } + } + if let Some(name_prefix) = captures.name("name_prefix") { + return Ok(Arg::Flag(name_prefix.as_str().to_string())); + } + } + + Err(de::Error::custom( + "the provided argument is invalid and doesn't match Arg::Option, Arg::Flag or Arg::Array", + )) + } +} + +impl<'de> Deserialize<'de> for Arg { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_any(ArgVisitor) + } +} + +#[derive(Debug, Default, Clone)] +pub struct ValidationContext { + pub used_ports: Vec, + pub used_nodes_names: HashSet, + // Store para_id already used + pub used_para_ids: HashMap, +} + +#[derive(Default, Debug, Clone, PartialEq, Deserialize)] +pub struct ChainDefaultContext { + pub(crate) default_command: Option, + pub(crate) default_image: Option, + pub(crate) default_resources: Option, + pub(crate) default_db_snapshot: Option, + #[serde(default)] + pub(crate) default_args: Vec, +} + +/// Represent a runtime (.wasm) asset location and an +/// optional preset to use for chain-spec generation. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ChainSpecRuntime { + pub location: AssetLocation, + pub preset: Option, +} + +impl ChainSpecRuntime { + pub fn new(location: AssetLocation) -> Self { + ChainSpecRuntime { + location, + preset: None, + } + } + + pub fn with_preset(location: AssetLocation, preset: impl Into) -> Self { + ChainSpecRuntime { + location, + preset: Some(preset.into()), + } + } +} + +/// Represents a set of JSON overrides for a configuration. +/// +/// The overrides can be provided as an inline JSON object or loaded from a +/// separate file via a path or URL. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(untagged)] +pub enum JsonOverrides { + /// A path or URL pointing to a JSON file containing the overrides. + Location(AssetLocation), + /// An inline JSON value representing the overrides. + Json(serde_json::Value), +} + +impl From for JsonOverrides { + fn from(value: AssetLocation) -> Self { + Self::Location(value) + } +} + +impl From for JsonOverrides { + fn from(value: serde_json::Value) -> Self { + Self::Json(value) + } +} + +impl From<&str> for JsonOverrides { + fn from(value: &str) -> Self { + Self::Location(AssetLocation::from(value)) + } +} + +impl Display for JsonOverrides { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + JsonOverrides::Location(location) => write!(f, "{location}"), + JsonOverrides::Json(json) => write!(f, "{json}"), + } + } +} + +impl JsonOverrides { + pub async fn get(&self) -> Result { + let contents = match self { + Self::Location(location) => serde_json::from_slice(&location.get_asset().await?) + .map_err(|err| anyhow!("Error converting asset to json {location} - {err}")), + Self::Json(json) => Ok(json.clone()), + }; + + contents + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_arg_flag_roundtrip() { + let arg = Arg::from("verbose"); + let serialized = serde_json::to_string(&arg).unwrap(); + let deserialized: Arg = serde_json::from_str(&serialized).unwrap(); + assert_eq!(arg, deserialized); + } + #[test] + fn test_arg_option_roundtrip() { + let arg = Arg::from(("mode", "fast")); + let serialized = serde_json::to_string(&arg).unwrap(); + let deserialized: Arg = serde_json::from_str(&serialized).unwrap(); + assert_eq!(arg, deserialized); + } + + #[test] + fn test_arg_array_roundtrip() { + let arg = Arg::from(("items", ["a", "b", "c"].as_slice())); + + let serialized = serde_json::to_string(&arg).unwrap(); + println!("serialized = {serialized}"); + let deserialized: Arg = serde_json::from_str(&serialized).unwrap(); + assert_eq!(arg, deserialized); + } + + #[test] + fn test_arg_option_valid_input() { + let expected = Arg::from(("--foo", "bar")); + + // name and value delimited with = + let valid = "\"--foo=bar\""; + let result: Result = serde_json::from_str(valid); + assert_eq!(result.unwrap(), expected); + + // name and value delimited with space + let valid = "\"--foo bar\""; + let result: Result = serde_json::from_str(valid); + assert_eq!(result.unwrap(), expected); + + // value contains = + let expected = Arg::from(("--foo", "bar=baz")); + let valid = "\"--foo=bar=baz\""; + let result: Result = serde_json::from_str(valid); + assert_eq!(result.unwrap(), expected); + } + + #[test] + fn test_arg_array_valid_input() { + let expected = Arg::from(("--foo", vec!["bar", "baz"])); + + // name and values delimited with = + let valid = "\"--foo=[bar,baz]\""; + let result: Result = serde_json::from_str(valid); + assert_eq!(result.unwrap(), expected); + + // name and values delimited with space + let valid = "\"--foo [bar,baz]\""; + let result: Result = serde_json::from_str(valid); + assert_eq!(result.unwrap(), expected); + + // values delimited with commas and space + let valid = "\"--foo [bar , baz]\""; + let result: Result = serde_json::from_str(valid); + assert_eq!(result.unwrap(), expected); + + // empty values array + let expected = Arg::from(("--foo", Vec::<&str>::new())); + let valid = "\"--foo []\""; + let result: Result = serde_json::from_str(valid); + assert_eq!(result.unwrap(), expected); + } + + #[test] + fn test_arg_invalid_input() { + // missing = or space + let invalid = "\"--foo[bar]\""; + let result: Result = serde_json::from_str(invalid); + assert!(result.is_err()); + + // value contains space + let invalid = "\"--foo=bar baz\""; + let result: Result = serde_json::from_str(invalid); + println!("result = {result:?}"); + assert!(result.is_err()); + } + + #[test] + fn converting_a_str_without_whitespaces_into_a_chain_should_succeeds() { + let got: Result = "mychain".try_into(); + + assert_eq!(got.unwrap().as_str(), "mychain"); + } + + #[test] + fn converting_a_str_containing_tag_name_into_an_image_should_succeeds() { + let got: Result = "myimage".try_into(); + + assert_eq!(got.unwrap().as_str(), "myimage"); + } + + #[test] + fn converting_a_str_containing_tag_name_and_tag_version_into_an_image_should_succeeds() { + let got: Result = "myimage:version".try_into(); + + assert_eq!(got.unwrap().as_str(), "myimage:version"); + } + + #[test] + fn converting_a_str_containing_hostname_and_tag_name_into_an_image_should_succeeds() { + let got: Result = "myrepository.com/myimage".try_into(); + + assert_eq!(got.unwrap().as_str(), "myrepository.com/myimage"); + } + + #[test] + fn converting_a_str_containing_hostname_tag_name_and_tag_version_into_an_image_should_succeeds() + { + let got: Result = "myrepository.com/myimage:version".try_into(); + + assert_eq!(got.unwrap().as_str(), "myrepository.com/myimage:version"); + } + + #[test] + fn converting_a_str_containing_ip_and_tag_name_into_an_image_should_succeeds() { + let got: Result = "myrepository.com/myimage".try_into(); + + assert_eq!(got.unwrap().as_str(), "myrepository.com/myimage"); + } + + #[test] + fn converting_a_str_containing_ip_tag_name_and_tag_version_into_an_image_should_succeeds() { + let got: Result = "127.0.0.1/myimage:version".try_into(); + + assert_eq!(got.unwrap().as_str(), "127.0.0.1/myimage:version"); + } + + #[test] + fn converting_a_str_without_whitespaces_into_a_command_should_succeeds() { + let got: Result = "mycommand".try_into(); + + assert_eq!(got.unwrap().as_str(), "mycommand"); + } + + #[test] + fn converting_an_url_into_an_asset_location_should_succeeds() { + let url = Url::from_str("https://mycloudstorage.com/path/to/my/file.tgz").unwrap(); + let got: AssetLocation = url.clone().into(); + + assert!(matches!(got, AssetLocation::Url(value) if value == url)); + } + + #[test] + fn converting_a_pathbuf_into_an_asset_location_should_succeeds() { + let pathbuf = PathBuf::from_str("/tmp/path/to/my/file").unwrap(); + let got: AssetLocation = pathbuf.clone().into(); + + assert!(matches!(got, AssetLocation::FilePath(value) if value == pathbuf)); + } + + #[test] + fn converting_a_str_into_an_url_asset_location_should_succeeds() { + let url = "https://mycloudstorage.com/path/to/my/file.tgz"; + let got: AssetLocation = url.into(); + + assert!(matches!(got, AssetLocation::Url(value) if value == Url::from_str(url).unwrap())); + } + + #[test] + fn converting_a_str_into_an_filepath_asset_location_should_succeeds() { + let filepath = "/tmp/path/to/my/file"; + let got: AssetLocation = filepath.into(); + + assert!(matches!( + got, + AssetLocation::FilePath(value) if value == PathBuf::from_str(filepath).unwrap() + )); + } + + #[test] + fn converting_a_str_into_an_flag_arg_should_succeeds() { + let got: Arg = "myflag".into(); + + assert!(matches!(got, Arg::Flag(flag) if flag == "myflag")); + } + + #[test] + fn converting_a_str_tuple_into_an_option_arg_should_succeeds() { + let got: Arg = ("name", "value").into(); + + assert!(matches!(got, Arg::Option(name, value) if name == "name" && value == "value")); + } + + #[test] + fn converting_a_str_with_whitespaces_into_a_chain_should_fails() { + let got: Result = "my chain".try_into(); + + assert!(matches!( + got.clone().unwrap_err(), + ConversionError::ContainsWhitespaces(_) + )); + assert_eq!( + got.unwrap_err().to_string(), + "'my chain' shouldn't contains whitespace" + ); + } + + #[test] + fn converting_an_empty_str_into_a_chain_should_fails() { + let got: Result = "".try_into(); + + assert!(matches!( + got.clone().unwrap_err(), + ConversionError::CantBeEmpty + )); + assert_eq!(got.unwrap_err().to_string(), "can't be empty"); + } + + #[test] + fn converting_a_str_containing_only_ip_into_an_image_should_fails() { + let got: Result = "127.0.0.1".try_into(); + + assert!(matches!( + got.clone().unwrap_err(), + ConversionError::DoesntMatchRegex { value: _, regex: _ } + )); + assert_eq!( + got.unwrap_err().to_string(), + "'127.0.0.1' doesn't match regex '^([ip]|[hostname]/)?[tag_name]:[tag_version]?$'" + ); + } + + #[test] + fn converting_a_str_containing_only_ip_and_tag_version_into_an_image_should_fails() { + let got: Result = "127.0.0.1:version".try_into(); + + assert!(matches!( + got.clone().unwrap_err(), + ConversionError::DoesntMatchRegex { value: _, regex: _ } + )); + assert_eq!(got.unwrap_err().to_string(), "'127.0.0.1:version' doesn't match regex '^([ip]|[hostname]/)?[tag_name]:[tag_version]?$'"); + } + + #[test] + fn converting_a_str_containing_only_hostname_into_an_image_should_fails() { + let got: Result = "myrepository.com".try_into(); + + assert!(matches!( + got.clone().unwrap_err(), + ConversionError::DoesntMatchRegex { value: _, regex: _ } + )); + assert_eq!(got.unwrap_err().to_string(), "'myrepository.com' doesn't match regex '^([ip]|[hostname]/)?[tag_name]:[tag_version]?$'"); + } + + #[test] + fn converting_a_str_containing_only_hostname_and_tag_version_into_an_image_should_fails() { + let got: Result = "myrepository.com:version".try_into(); + + assert!(matches!( + got.clone().unwrap_err(), + ConversionError::DoesntMatchRegex { value: _, regex: _ } + )); + assert_eq!(got.unwrap_err().to_string(), "'myrepository.com:version' doesn't match regex '^([ip]|[hostname]/)?[tag_name]:[tag_version]?$'"); + } + + #[test] + fn converting_a_str_with_whitespaces_into_a_command_should_fails() { + let got: Result = "my command".try_into(); + + assert!(matches!( + got.clone().unwrap_err(), + ConversionError::ContainsWhitespaces(_) + )); + assert_eq!( + got.unwrap_err().to_string(), + "'my command' shouldn't contains whitespace" + ); + } + + #[test] + fn test_convert_to_json_overrides() { + let url: AssetLocation = "https://example.com/overrides.json".into(); + assert!(matches!( + url.into(), + JsonOverrides::Location(AssetLocation::Url(_)) + )); + + let path: AssetLocation = "/path/to/overrides.json".into(); + assert!(matches!( + path.into(), + JsonOverrides::Location(AssetLocation::FilePath(_)) + )); + + let inline = serde_json::json!({ "para_id": 2000}); + assert!(matches!( + inline.into(), + JsonOverrides::Json(serde_json::Value::Object(_)) + )); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/teyrchain.rs b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/teyrchain.rs new file mode 100644 index 00000000..a9de4587 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/teyrchain.rs @@ -0,0 +1,1873 @@ +use std::{cell::RefCell, error::Error, fmt::Display, marker::PhantomData, rc::Rc}; + +use anyhow::anyhow; +use multiaddr::Multiaddr; +use serde::{ + de::{self, Visitor}, + ser::SerializeStruct, + Deserialize, Serialize, +}; + +use crate::{ + shared::{ + errors::{ConfigError, FieldError}, + helpers::{generate_unique_para_id, merge_errors, merge_errors_vecs}, + node::{self, GroupNodeConfig, GroupNodeConfigBuilder, NodeConfig, NodeConfigBuilder}, + resources::{Resources, ResourcesBuilder}, + types::{ + Arg, AssetLocation, Chain, ChainDefaultContext, Command, Image, ValidationContext, U128, + }, + }, + types::{ChainSpecRuntime, CommandWithCustomArgs, JsonOverrides}, + utils::{default_as_false, default_as_true, default_initial_balance, is_false}, +}; + +/// The registration strategy that will be used for the teyrchain. +#[derive(Debug, Clone, PartialEq)] +pub enum RegistrationStrategy { + /// The teyrchain will be added to the genesis before spawning. + InGenesis, + /// The teyrchain will be registered using an extrinsic after spawning. + UsingExtrinsic, + /// The teyrchaing will not be registered and the user can doit after spawning manually. + Manual, +} + +impl Serialize for RegistrationStrategy { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let mut state = serializer.serialize_struct("RegistrationStrategy", 1)?; + + match self { + Self::InGenesis => state.serialize_field("add_to_genesis", &true)?, + Self::UsingExtrinsic => state.serialize_field("register_para", &true)?, + Self::Manual => { + state.serialize_field("add_to_genesis", &false)?; + state.serialize_field("register_para", &false)?; + }, + } + + state.end() + } +} + +struct RegistrationStrategyVisitor; + +impl<'de> Visitor<'de> for RegistrationStrategyVisitor { + type Value = RegistrationStrategy; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("struct RegistrationStrategy") + } + + fn visit_map(self, mut map: A) -> Result + where + A: serde::de::MapAccess<'de>, + { + let mut add_to_genesis = false; + let mut register_para = false; + + while let Some(key) = map.next_key::()? { + match key.as_str() { + "addToGenesis" | "add_to_genesis" => add_to_genesis = map.next_value()?, + "registerPara" | "register_para" => register_para = map.next_value()?, + _ => { + return Err(de::Error::unknown_field( + &key, + &["add_to_genesis", "register_para"], + )) + }, + } + } + + match (add_to_genesis, register_para) { + (true, false) => Ok(RegistrationStrategy::InGenesis), + (false, true) => Ok(RegistrationStrategy::UsingExtrinsic), + _ => Err(de::Error::missing_field("add_to_genesis or register_para")), + } + } +} + +impl<'de> Deserialize<'de> for RegistrationStrategy { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_struct( + "RegistrationStrategy", + &["add_to_genesis", "register_para"], + RegistrationStrategyVisitor, + ) + } +} + +/// A teyrchain configuration, composed of collators and fine-grained configuration options. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct TeyrchainConfig { + id: u32, + #[serde(skip)] + // unique_id is internally used to allow multiple teyrchains with the same id + // BUT, only one of them could be register automatically at spawn + unique_id: String, + chain: Option, + #[serde(flatten)] + registration_strategy: Option, + #[serde( + skip_serializing_if = "super::utils::is_true", + default = "default_as_true" + )] + onboard_as_teyrchain: bool, + #[serde(rename = "balance", default = "default_initial_balance")] + initial_balance: U128, + default_command: Option, + default_image: Option, + default_resources: Option, + default_db_snapshot: Option, + #[serde(skip_serializing_if = "std::vec::Vec::is_empty", default)] + default_args: Vec, + genesis_wasm_path: Option, + genesis_wasm_generator: Option, + genesis_state_path: Option, + genesis_state_generator: Option, + /// chain-spec to use (location can be url or file path) + chain_spec_path: Option, + /// runtime to use for generating the chain-spec. + /// Location can be url or file path and an optional preset + chain_spec_runtime: Option, + // Path or url to override the runtime (:code) in the chain-spec + wasm_override: Option, + // Full _template_ command, will be rendered using [tera] + // and executed for generate the chain-spec. + // available tokens {{chainName}} / {{disableBootnodes}} + chain_spec_command: Option, + // Does the chain_spec_command needs to be run locally + #[serde(skip_serializing_if = "is_false", default)] + chain_spec_command_is_local: bool, + // Path to the file where the `chain_spec_command` will write the chain-spec into. + // Defaults to /dev/stdout. + chain_spec_command_output_path: Option, + #[serde(rename = "cumulus_based", default = "default_as_true")] + is_cumulus_based: bool, + #[serde(rename = "evm_based", default = "default_as_false")] + is_evm_based: bool, + #[serde(skip_serializing_if = "std::vec::Vec::is_empty", default)] + bootnodes_addresses: Vec, + #[serde(skip_serializing_if = "is_false", default)] + no_default_bootnodes: bool, + #[serde(rename = "genesis", skip_serializing_if = "Option::is_none")] + genesis_overrides: Option, + #[serde(skip_serializing_if = "std::vec::Vec::is_empty", default)] + pub(crate) collators: Vec, + // Single collator config, added for backward compatibility + // with `toml` networks definitions from v1. + // This field can only be set loading an old `toml` definition + // with `[teyrchain.collator]` key. + // NOTE: if the file also contains multiple collators defined in + // `[[teyrchain.collators]], the single configuration will be added to the bottom. + pub(crate) collator: Option, + #[serde(skip_serializing_if = "std::vec::Vec::is_empty", default)] + pub(crate) collator_groups: Vec, + // Inline json or asset location to override raw chainspec + raw_spec_override: Option, +} + +impl TeyrchainConfig { + /// The teyrchain ID. + pub fn id(&self) -> u32 { + self.id + } + + /// The teyrchain unique ID. + pub fn unique_id(&self) -> &str { + &self.unique_id + } + + /// The chain name. + pub fn chain(&self) -> Option<&Chain> { + self.chain.as_ref() + } + + /// The registration strategy for the teyrchain. + pub fn registration_strategy(&self) -> Option<&RegistrationStrategy> { + self.registration_strategy.as_ref() + } + + /// Whether the teyrchain should be onboarded or stay a parathread + pub fn onboard_as_teyrchain(&self) -> bool { + self.onboard_as_teyrchain + } + + /// Backward compatibility alias for onboard_as_teyrchain(). + pub fn onboard_as_parachain(&self) -> bool { + self.onboard_as_teyrchain() + } + + /// The initial balance of the teyrchain account. + pub fn initial_balance(&self) -> u128 { + self.initial_balance.0 + } + + /// The default command used for collators. + pub fn default_command(&self) -> Option<&Command> { + self.default_command.as_ref() + } + + /// The default container image used for collators. + pub fn default_image(&self) -> Option<&Image> { + self.default_image.as_ref() + } + + /// The default resources limits used for collators. + pub fn default_resources(&self) -> Option<&Resources> { + self.default_resources.as_ref() + } + + /// The default database snapshot location that will be used for state. + pub fn default_db_snapshot(&self) -> Option<&AssetLocation> { + self.default_db_snapshot.as_ref() + } + + /// The default arguments that will be used to execute the collator command. + pub fn default_args(&self) -> Vec<&Arg> { + self.default_args.iter().collect::>() + } + + /// The location of a pre-existing genesis WASM runtime blob of the teyrchain. + pub fn genesis_wasm_path(&self) -> Option<&AssetLocation> { + self.genesis_wasm_path.as_ref() + } + + /// The generator command used to create the genesis WASM runtime blob of the teyrchain. + pub fn genesis_wasm_generator(&self) -> Option<&Command> { + self.genesis_wasm_generator.as_ref() + } + + /// The location of a pre-existing genesis state of the teyrchain. + pub fn genesis_state_path(&self) -> Option<&AssetLocation> { + self.genesis_state_path.as_ref() + } + + /// The generator command used to create the genesis state of the teyrchain. + pub fn genesis_state_generator(&self) -> Option<&CommandWithCustomArgs> { + self.genesis_state_generator.as_ref() + } + + /// The genesis overrides as a JSON value. + pub fn genesis_overrides(&self) -> Option<&serde_json::Value> { + self.genesis_overrides.as_ref() + } + + /// The location of a pre-existing chain specification for the teyrchain. + pub fn chain_spec_path(&self) -> Option<&AssetLocation> { + self.chain_spec_path.as_ref() + } + + /// The full _template_ command to genera the chain-spec + pub fn chain_spec_command(&self) -> Option<&str> { + self.chain_spec_command.as_deref() + } + + /// Does the chain_spec_command needs to be run locally + pub fn chain_spec_command_is_local(&self) -> bool { + self.chain_spec_command_is_local + } + + /// The file where the `chain_spec_command` will write the chain-spec into. + /// Defaults to /dev/stdout. + pub fn chain_spec_command_output_path(&self) -> Option<&str> { + self.chain_spec_command_output_path.as_deref() + } + + /// Whether the teyrchain is based on cumulus. + pub fn is_cumulus_based(&self) -> bool { + self.is_cumulus_based + } + + /// Whether the teyrchain is evm based (e.g frontier). + pub fn is_evm_based(&self) -> bool { + self.is_evm_based + } + + /// The bootnodes addresses the collators will connect to. + pub fn bootnodes_addresses(&self) -> Vec<&Multiaddr> { + self.bootnodes_addresses.iter().collect::>() + } + + /// Whether to not automatically assign a bootnode role if none of the nodes are marked + /// as bootnodes. + pub fn no_default_bootnodes(&self) -> bool { + self.no_default_bootnodes + } + + /// The collators of the teyrchain. + pub fn collators(&self) -> Vec<&NodeConfig> { + let mut cols = self.collators.iter().collect::>(); + if let Some(col) = self.collator.as_ref() { + cols.push(col); + } + cols + } + + /// The grouped collators of the teyrchain. + pub fn group_collators_configs(&self) -> Vec<&GroupNodeConfig> { + self.collator_groups.iter().collect::>() + } + + /// The location of a wasm runtime to override in the chain-spec. + pub fn wasm_override(&self) -> Option<&AssetLocation> { + self.wasm_override.as_ref() + } + + /// The location of a file or inline json to override raw chain-spec. + pub fn raw_spec_override(&self) -> Option<&JsonOverrides> { + self.raw_spec_override.as_ref() + } + + /// The location of runtime to use by chain-spec builder lib (from `sc-chain-spec` crate) + pub fn chain_spec_runtime(&self) -> Option<&ChainSpecRuntime> { + self.chain_spec_runtime.as_ref() + } +} + +pub mod states { + use crate::shared::macros::states; + + states! { + Initial, + WithId, + WithAtLeastOneCollator + } + + states! { + Bootstrap, + Running + } + + pub trait Context {} + impl Context for Bootstrap {} + impl Context for Running {} +} + +use states::{Bootstrap, Context, Initial, Running, WithAtLeastOneCollator, WithId}; +/// A teyrchain configuration builder, used to build a [`TeyrchainConfig`] declaratively with fields validation. +pub struct TeyrchainConfigBuilder { + config: TeyrchainConfig, + validation_context: Rc>, + errors: Vec, + _state: PhantomData, + _context: PhantomData, +} + +impl Default for TeyrchainConfigBuilder { + fn default() -> Self { + Self { + config: TeyrchainConfig { + id: 100, + unique_id: String::from("100"), + chain: None, + registration_strategy: Some(RegistrationStrategy::InGenesis), + onboard_as_teyrchain: true, + initial_balance: 2_000_000_000_000.into(), + default_command: None, + default_image: None, + default_resources: None, + default_db_snapshot: None, + default_args: vec![], + genesis_wasm_path: None, + genesis_wasm_generator: None, + genesis_state_path: None, + genesis_state_generator: None, + genesis_overrides: None, + chain_spec_path: None, + chain_spec_runtime: None, + chain_spec_command: None, + chain_spec_command_output_path: None, + wasm_override: None, + chain_spec_command_is_local: false, // remote by default + is_cumulus_based: true, + is_evm_based: false, + bootnodes_addresses: vec![], + no_default_bootnodes: false, + collators: vec![], + collator: None, + collator_groups: vec![], + raw_spec_override: None, + }, + validation_context: Default::default(), + errors: vec![], + _state: PhantomData, + _context: PhantomData, + } + } +} + +impl TeyrchainConfigBuilder { + fn transition( + config: TeyrchainConfig, + validation_context: Rc>, + errors: Vec, + ) -> TeyrchainConfigBuilder { + TeyrchainConfigBuilder { + config, + validation_context, + errors, + _state: PhantomData, + _context: PhantomData, + } + } + + fn default_chain_context(&self) -> ChainDefaultContext { + ChainDefaultContext { + default_command: self.config.default_command.clone(), + default_image: self.config.default_image.clone(), + default_resources: self.config.default_resources.clone(), + default_db_snapshot: self.config.default_db_snapshot.clone(), + default_args: self.config.default_args.clone(), + } + } + + fn create_node_builder(&self, f: F) -> NodeConfigBuilder + where + F: FnOnce(NodeConfigBuilder) -> NodeConfigBuilder, + { + f(NodeConfigBuilder::new( + self.default_chain_context(), + self.validation_context.clone(), + )) + } +} + +impl TeyrchainConfigBuilder { + /// Instantiate a new builder that can be used to build a [`TeyrchainConfig`] during the bootstrap phase. + pub fn new( + validation_context: Rc>, + ) -> TeyrchainConfigBuilder { + Self { + validation_context, + ..Self::default() + } + } +} + +impl TeyrchainConfigBuilder { + /// Set the registration strategy for the teyrchain, could be Manual (no registered by zombienet) or automatic + /// using an extrinsic or in genesis. + pub fn with_registration_strategy(self, strategy: RegistrationStrategy) -> Self { + Self::transition( + TeyrchainConfig { + registration_strategy: Some(strategy), + ..self.config + }, + self.validation_context, + self.errors, + ) + } +} + +impl TeyrchainConfigBuilder { + /// Set the registration strategy for the teyrchain, could be Manual (no registered by zombienet) or automatic + /// Using an extrinsic. Genesis option is not allowed in `Running` context. + pub fn with_registration_strategy(self, strategy: RegistrationStrategy) -> Self { + match strategy { + RegistrationStrategy::InGenesis => Self::transition( + self.config, + self.validation_context, + merge_errors( + self.errors, + FieldError::RegistrationStrategy(anyhow!( + "Can be set to InGenesis in Running context" + )) + .into(), + ), + ), + RegistrationStrategy::Manual | RegistrationStrategy::UsingExtrinsic => { + Self::transition( + TeyrchainConfig { + registration_strategy: Some(strategy), + ..self.config + }, + self.validation_context, + self.errors, + ) + }, + } + } +} + +impl TeyrchainConfigBuilder { + /// Start a new builder in the context of a running network + pub fn new_with_running( + validation_context: Rc>, + ) -> TeyrchainConfigBuilder { + let mut builder = Self { + validation_context, + ..Self::default() + }; + + // override the registration strategy + builder.config.registration_strategy = Some(RegistrationStrategy::UsingExtrinsic); + builder + } +} + +impl TeyrchainConfigBuilder { + /// Set the teyrchain ID and the unique_id (with the suffix `-x` if the id is already used) + pub fn with_id(self, id: u32) -> TeyrchainConfigBuilder { + let unique_id = generate_unique_para_id(id, self.validation_context.clone()); + Self::transition( + TeyrchainConfig { + id, + unique_id, + ..self.config + }, + self.validation_context, + self.errors, + ) + } +} + +impl TeyrchainConfigBuilder { + /// Set the chain name (e.g. rococo-local). + /// Use [`None`], if you are running adder-collator or undying-collator). + pub fn with_chain(self, chain: T) -> Self + where + T: TryInto, + T::Error: Error + Send + Sync + 'static, + { + match chain.try_into() { + Ok(chain) => Self::transition( + TeyrchainConfig { + chain: Some(chain), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(error) => Self::transition( + self.config, + self.validation_context, + merge_errors(self.errors, FieldError::Chain(error.into()).into()), + ), + } + } + + /// Set whether the teyrchain should be onboarded or stay a parathread. Default is ```true```. + pub fn onboard_as_teyrchain(self, choice: bool) -> Self { + Self::transition( + TeyrchainConfig { + onboard_as_teyrchain: choice, + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Backward compatibility alias for onboard_as_teyrchain(). + pub fn onboard_as_parachain(self, choice: bool) -> Self { + self.onboard_as_teyrchain(choice) + } + + /// Set the initial balance of the teyrchain account. + pub fn with_initial_balance(self, initial_balance: u128) -> Self { + Self::transition( + TeyrchainConfig { + initial_balance: initial_balance.into(), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the default command used for collators. Can be overridden. + pub fn with_default_command(self, command: T) -> Self + where + T: TryInto, + T::Error: Error + Send + Sync + 'static, + { + match command.try_into() { + Ok(command) => Self::transition( + TeyrchainConfig { + default_command: Some(command), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(error) => Self::transition( + self.config, + self.validation_context, + merge_errors(self.errors, FieldError::DefaultCommand(error.into()).into()), + ), + } + } + + /// Set the default container image used for collators. Can be overridden. + pub fn with_default_image(self, image: T) -> Self + where + T: TryInto, + T::Error: Error + Send + Sync + 'static, + { + match image.try_into() { + Ok(image) => Self::transition( + TeyrchainConfig { + default_image: Some(image), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(error) => Self::transition( + self.config, + self.validation_context, + merge_errors(self.errors, FieldError::DefaultImage(error.into()).into()), + ), + } + } + + /// Set the default resources limits used for collators. Can be overridden. + pub fn with_default_resources( + self, + f: impl FnOnce(ResourcesBuilder) -> ResourcesBuilder, + ) -> Self { + match f(ResourcesBuilder::new()).build() { + Ok(default_resources) => Self::transition( + TeyrchainConfig { + default_resources: Some(default_resources), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(errors) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs( + self.errors, + errors + .into_iter() + .map(|error| FieldError::DefaultResources(error).into()) + .collect::>(), + ), + ), + } + } + + /// Set the default database snapshot location that will be used for state. Can be overridden. + pub fn with_default_db_snapshot(self, location: impl Into) -> Self { + Self::transition( + TeyrchainConfig { + default_db_snapshot: Some(location.into()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the default arguments that will be used to execute the collator command. Can be overridden. + pub fn with_default_args(self, args: Vec) -> Self { + Self::transition( + TeyrchainConfig { + default_args: args, + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the location of a pre-existing genesis WASM runtime blob of the teyrchain. + pub fn with_genesis_wasm_path(self, location: impl Into) -> Self { + Self::transition( + TeyrchainConfig { + genesis_wasm_path: Some(location.into()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the generator command used to create the genesis WASM runtime blob of the teyrchain. + pub fn with_genesis_wasm_generator(self, command: T) -> Self + where + T: TryInto, + T::Error: Error + Send + Sync + 'static, + { + match command.try_into() { + Ok(command) => Self::transition( + TeyrchainConfig { + genesis_wasm_generator: Some(command), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(error) => Self::transition( + self.config, + self.validation_context, + merge_errors( + self.errors, + FieldError::GenesisWasmGenerator(error.into()).into(), + ), + ), + } + } + + /// Set the location of a pre-existing genesis state of the teyrchain. + pub fn with_genesis_state_path(self, location: impl Into) -> Self { + Self::transition( + TeyrchainConfig { + genesis_state_path: Some(location.into()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the generator command used to create the genesis state of the teyrchain. + pub fn with_genesis_state_generator(self, command: T) -> Self + where + T: TryInto, + T::Error: Error + Send + Sync + 'static, + { + match command.try_into() { + Ok(command) => Self::transition( + TeyrchainConfig { + genesis_state_generator: Some(command), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err(error) => Self::transition( + self.config, + self.validation_context, + merge_errors( + self.errors, + FieldError::GenesisStateGenerator(error.into()).into(), + ), + ), + } + } + + /// Set the genesis overrides as a JSON object. + pub fn with_genesis_overrides(self, genesis_overrides: impl Into) -> Self { + Self::transition( + TeyrchainConfig { + genesis_overrides: Some(genesis_overrides.into()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the location of a pre-existing chain specification for the teyrchain. + pub fn with_chain_spec_path(self, location: impl Into) -> Self { + Self::transition( + TeyrchainConfig { + chain_spec_path: Some(location.into()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the chain-spec command _template_ for the relay chain. + pub fn with_chain_spec_command(self, cmd_template: impl Into) -> Self { + Self::transition( + TeyrchainConfig { + chain_spec_command: Some(cmd_template.into()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the runtime path to use for generating the chain-spec and an optiona preset. + /// If the preset is not set, we will try to match [`local_testnet`, `development`, `dev`] + /// with the available ones and fallback to the default configuration as last option. + pub fn with_chain_spec_runtime( + self, + location: impl Into, + preset: Option<&str>, + ) -> Self { + let chain_spec_runtime = if let Some(preset) = preset { + ChainSpecRuntime::with_preset(location.into(), preset.to_string()) + } else { + ChainSpecRuntime::new(location.into()) + }; + Self::transition( + TeyrchainConfig { + chain_spec_runtime: Some(chain_spec_runtime), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the location of a wasm to override the chain-spec. + pub fn with_wasm_override(self, location: impl Into) -> Self { + Self::transition( + TeyrchainConfig { + wasm_override: Some(location.into()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set if the chain-spec command needs to be run locally or not (false by default) + pub fn chain_spec_command_is_local(self, choice: bool) -> Self { + Self::transition( + TeyrchainConfig { + chain_spec_command_is_local: choice, + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the output path for the chain-spec command. + pub fn with_chain_spec_command_output_path(self, output_path: &str) -> Self { + Self::transition( + TeyrchainConfig { + chain_spec_command_output_path: Some(output_path.to_string()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set whether the teyrchain is based on cumulus (true in a majority of case, except adder or undying collators). + pub fn cumulus_based(self, choice: bool) -> Self { + Self::transition( + TeyrchainConfig { + is_cumulus_based: choice, + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Alias for cumulus_based - Set whether the teyrchain is based on pezcumulus/cumulus. + pub fn pezcumulus_based(self, choice: bool) -> Self { + self.cumulus_based(choice) + } + + /// Set whether the teyrchain is evm based (e.g frontier /evm template) + pub fn evm_based(self, choice: bool) -> Self { + Self::transition( + TeyrchainConfig { + is_evm_based: choice, + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Set the bootnodes addresses the collators will connect to. + /// + /// Note: Bootnode address replacements are NOT supported here. + /// Only arguments (`args`) support dynamic replacements. Bootnode addresses must be a valid address. + pub fn with_raw_bootnodes_addresses(self, bootnodes_addresses: Vec) -> Self + where + T: TryInto + Display + Copy, + T::Error: Error + Send + Sync + 'static, + { + let mut addrs = vec![]; + let mut errors = vec![]; + + for (index, addr) in bootnodes_addresses.into_iter().enumerate() { + match addr.try_into() { + Ok(addr) => addrs.push(addr), + Err(error) => errors.push( + FieldError::BootnodesAddress(index, addr.to_string(), error.into()).into(), + ), + } + } + + Self::transition( + TeyrchainConfig { + bootnodes_addresses: addrs, + ..self.config + }, + self.validation_context, + merge_errors_vecs(self.errors, errors), + ) + } + + /// Do not assign a bootnode role automatically if no nodes are marked as bootnodes. + pub fn without_default_bootnodes(self) -> Self { + Self::transition( + TeyrchainConfig { + no_default_bootnodes: true, + ..self.config + }, + self.validation_context, + self.errors, + ) + } + + /// Add a new collator using a nested [`NodeConfigBuilder`]. + pub fn with_collator( + self, + f: impl FnOnce(NodeConfigBuilder) -> NodeConfigBuilder, + ) -> TeyrchainConfigBuilder { + match self.create_node_builder(f).validator(true).build() { + Ok(collator) => Self::transition( + TeyrchainConfig { + collators: vec![collator], + ..self.config + }, + self.validation_context, + self.errors, + ), + Err((name, errors)) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs( + self.errors, + errors + .into_iter() + .map(|error| ConfigError::Collator(name.clone(), error).into()) + .collect::>(), + ), + ), + } + } + + /// Add a new full node using a nested [`NodeConfigBuilder`]. + /// The node will be configured as a full node (non-validator). + pub fn with_fullnode( + self, + f: impl FnOnce(NodeConfigBuilder) -> NodeConfigBuilder, + ) -> TeyrchainConfigBuilder { + match self.create_node_builder(f).validator(false).build() { + Ok(node) => Self::transition( + TeyrchainConfig { + collators: vec![node], + ..self.config + }, + self.validation_context, + self.errors, + ), + Err((name, errors)) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs( + self.errors, + errors + .into_iter() + .map(|error| ConfigError::Collator(name.clone(), error).into()) + .collect::>(), + ), + ), + } + } + + /// Add a new node using a nested [`NodeConfigBuilder`]. + /// + /// **Deprecated**: Use [`with_collator`] for collator nodes or [`with_fullnode`] for full nodes instead. + #[deprecated( + since = "0.4.0", + note = "Use `with_collator()` for collator nodes or `with_fullnode()` for full nodes instead" + )] + pub fn with_node( + self, + f: impl FnOnce(NodeConfigBuilder) -> NodeConfigBuilder, + ) -> TeyrchainConfigBuilder { + match self.create_node_builder(f).build() { + Ok(node) => Self::transition( + TeyrchainConfig { + collators: vec![node], + ..self.config + }, + self.validation_context, + self.errors, + ), + Err((name, errors)) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs( + self.errors, + errors + .into_iter() + .map(|error| ConfigError::Collator(name.clone(), error).into()) + .collect::>(), + ), + ), + } + } + + /// Add a new collator group using a nested [`GroupNodeConfigBuilder`]. + pub fn with_collator_group( + self, + f: impl FnOnce(GroupNodeConfigBuilder) -> GroupNodeConfigBuilder, + ) -> TeyrchainConfigBuilder { + match f(GroupNodeConfigBuilder::new( + self.default_chain_context(), + self.validation_context.clone(), + )) + .build() + { + Ok(group) => Self::transition( + TeyrchainConfig { + collator_groups: [self.config.collator_groups, vec![group]].concat(), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err((name, errors)) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs( + self.errors, + errors + .into_iter() + .map(|error| ConfigError::Collator(name.clone(), error).into()) + .collect::>(), + ), + ), + } + } + + /// Set the location or inline value of json to override the raw chain-spec. + pub fn with_raw_spec_override(self, overrides: impl Into) -> Self { + Self::transition( + TeyrchainConfig { + raw_spec_override: Some(overrides.into()), + ..self.config + }, + self.validation_context, + self.errors, + ) + } +} + +impl TeyrchainConfigBuilder { + /// Add a new collator using a nested [`NodeConfigBuilder`]. + pub fn with_collator( + self, + f: impl FnOnce(NodeConfigBuilder) -> NodeConfigBuilder, + ) -> Self { + match self.create_node_builder(f).validator(true).build() { + Ok(collator) => Self::transition( + TeyrchainConfig { + collators: [self.config.collators, vec![collator]].concat(), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err((name, errors)) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs( + self.errors, + errors + .into_iter() + .map(|error| ConfigError::Collator(name.clone(), error).into()) + .collect::>(), + ), + ), + } + } + + /// Add a new full node using a nested [`NodeConfigBuilder`]. + /// The node will be configured as a full node (non-validator). + pub fn with_fullnode( + self, + f: impl FnOnce(NodeConfigBuilder) -> NodeConfigBuilder, + ) -> Self { + match self.create_node_builder(f).validator(false).build() { + Ok(node) => Self::transition( + TeyrchainConfig { + collators: [self.config.collators, vec![node]].concat(), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err((name, errors)) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs( + self.errors, + errors + .into_iter() + .map(|error| ConfigError::Collator(name.clone(), error).into()) + .collect::>(), + ), + ), + } + } + + /// Add a new node using a nested [`NodeConfigBuilder`]. + /// + /// **Deprecated**: Use [`with_collator`] for collator nodes or [`with_fullnode`] for full nodes instead. + #[deprecated( + since = "0.4.0", + note = "Use `with_collator()` for collator nodes or `with_fullnode()` for full nodes instead" + )] + pub fn with_node( + self, + f: impl FnOnce(NodeConfigBuilder) -> NodeConfigBuilder, + ) -> Self { + match self.create_node_builder(f).build() { + Ok(node) => Self::transition( + TeyrchainConfig { + collators: [self.config.collators, vec![node]].concat(), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err((name, errors)) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs( + self.errors, + errors + .into_iter() + .map(|error| ConfigError::Collator(name.clone(), error).into()) + .collect::>(), + ), + ), + } + } + + /// Add a new collator group using a nested [`GroupNodeConfigBuilder`]. + pub fn with_collator_group( + self, + f: impl FnOnce(GroupNodeConfigBuilder) -> GroupNodeConfigBuilder, + ) -> Self { + match f(GroupNodeConfigBuilder::new( + self.default_chain_context(), + self.validation_context.clone(), + )) + .build() + { + Ok(group) => Self::transition( + TeyrchainConfig { + collator_groups: [self.config.collator_groups, vec![group]].concat(), + ..self.config + }, + self.validation_context, + self.errors, + ), + Err((name, errors)) => Self::transition( + self.config, + self.validation_context, + merge_errors_vecs( + self.errors, + errors + .into_iter() + .map(|error| ConfigError::Collator(name.clone(), error).into()) + .collect::>(), + ), + ), + } + } + + /// Seals the builder and returns a [`TeyrchainConfig`] if there are no validation errors, else returns errors. + pub fn build(self) -> Result> { + if !self.errors.is_empty() { + return Err(self + .errors + .into_iter() + .map(|error| ConfigError::Teyrchain(self.config.id, error).into()) + .collect::>()); + } + + Ok(self.config) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::NetworkConfig; + + #[test] + fn teyrchain_config_builder_should_succeeds_and_returns_a_new_teyrchain_config() { + let teyrchain_config = TeyrchainConfigBuilder::new(Default::default()) + .with_id(1000) + .with_chain("mychainname") + .with_registration_strategy(RegistrationStrategy::UsingExtrinsic) + .onboard_as_teyrchain(false) + .with_initial_balance(100_000_042) + .with_default_image("myrepo:myimage") + .with_default_command("default_command") + .with_default_resources(|resources| { + resources + .with_limit_cpu("500M") + .with_limit_memory("1G") + .with_request_cpu("250M") + }) + .with_default_db_snapshot("https://www.urltomysnapshot.com/file.tgz") + .with_default_args(vec![("--arg1", "value1").into(), "--option2".into()]) + .with_genesis_wasm_path("https://www.backupsite.com/my/wasm/file.tgz") + .with_genesis_wasm_generator("generator_wasm") + .with_genesis_state_path("./path/to/genesis/state") + .with_genesis_state_generator( + "undying-collator export-genesis-state --pov-size=10000 --pvf-complexity=1", + ) + .with_chain_spec_path("./path/to/chain/spec.json") + .with_chain_spec_runtime("./path/to/runtime.wasm", Some("dev")) + .with_wasm_override("./path/to/override/runtime.wasm") + .with_raw_spec_override("./path/to/override/rawspec.json") + .cumulus_based(false) + .evm_based(false) + .with_raw_bootnodes_addresses(vec![ + "/ip4/10.41.122.55/tcp/45421", + "/ip4/51.144.222.10/tcp/2333", + ]) + .without_default_bootnodes() + .with_collator(|collator| { + collator + .with_name("collator1") + .with_command("command1") + .bootnode(true) + }) + .with_collator(|collator| { + collator + .with_name("collator2") + .with_command("command2") + .validator(true) + }) + .build() + .unwrap(); + + assert_eq!(teyrchain_config.id(), 1000); + assert_eq!(teyrchain_config.collators().len(), 2); + let &collator1 = teyrchain_config.collators().first().unwrap(); + assert_eq!(collator1.name(), "collator1"); + assert_eq!(collator1.command().unwrap().as_str(), "command1"); + assert!(collator1.is_bootnode()); + let &collator2 = teyrchain_config.collators().last().unwrap(); + assert_eq!(collator2.name(), "collator2"); + assert_eq!(collator2.command().unwrap().as_str(), "command2"); + assert!(collator2.is_validator()); + assert_eq!(teyrchain_config.chain().unwrap().as_str(), "mychainname"); + + assert_eq!( + teyrchain_config.registration_strategy().unwrap(), + &RegistrationStrategy::UsingExtrinsic + ); + assert!(!teyrchain_config.onboard_as_teyrchain()); + assert_eq!(teyrchain_config.initial_balance(), 100_000_042); + assert_eq!( + teyrchain_config.default_command().unwrap().as_str(), + "default_command" + ); + assert_eq!( + teyrchain_config.default_image().unwrap().as_str(), + "myrepo:myimage" + ); + let default_resources = teyrchain_config.default_resources().unwrap(); + assert_eq!(default_resources.limit_cpu().unwrap().as_str(), "500M"); + assert_eq!(default_resources.limit_memory().unwrap().as_str(), "1G"); + assert_eq!(default_resources.request_cpu().unwrap().as_str(), "250M"); + assert!(matches!( + teyrchain_config.default_db_snapshot().unwrap(), + AssetLocation::Url(value) if value.as_str() == "https://www.urltomysnapshot.com/file.tgz", + )); + assert!(matches!( + teyrchain_config.chain_spec_path().unwrap(), + AssetLocation::FilePath(value) if value.to_str().unwrap() == "./path/to/chain/spec.json" + )); + assert!(matches!( + teyrchain_config.wasm_override().unwrap(), + AssetLocation::FilePath(value) if value.to_str().unwrap() == "./path/to/override/runtime.wasm" + )); + assert!(matches!( + &teyrchain_config.chain_spec_runtime().unwrap().location, + AssetLocation::FilePath(value) if value.to_str().unwrap() == "./path/to/runtime.wasm" + )); + assert_eq!( + teyrchain_config + .chain_spec_runtime() + .unwrap() + .preset + .as_deref(), + Some("dev") + ); + + let args: Vec = vec![("--arg1", "value1").into(), "--option2".into()]; + assert_eq!( + teyrchain_config.default_args(), + args.iter().collect::>() + ); + assert!(matches!( + teyrchain_config.genesis_wasm_path().unwrap(), + AssetLocation::Url(value) if value.as_str() == "https://www.backupsite.com/my/wasm/file.tgz" + )); + assert_eq!( + teyrchain_config.genesis_wasm_generator().unwrap().as_str(), + "generator_wasm" + ); + assert!(matches!( + teyrchain_config.genesis_state_path().unwrap(), + AssetLocation::FilePath(value) if value.to_str().unwrap() == "./path/to/genesis/state" + )); + assert_eq!( + teyrchain_config + .genesis_state_generator() + .unwrap() + .cmd() + .as_str(), + "undying-collator" + ); + + assert_eq!( + teyrchain_config.genesis_state_generator().unwrap().args(), + &vec![ + "export-genesis-state".into(), + ("--pov-size", "10000").into(), + ("--pvf-complexity", "1").into() + ] + ); + + assert!(matches!( + teyrchain_config.chain_spec_path().unwrap(), + AssetLocation::FilePath(value) if value.to_str().unwrap() == "./path/to/chain/spec.json" + )); + assert!(!teyrchain_config.is_cumulus_based()); + let bootnodes_addresses: Vec = vec![ + "/ip4/10.41.122.55/tcp/45421".try_into().unwrap(), + "/ip4/51.144.222.10/tcp/2333".try_into().unwrap(), + ]; + assert!(teyrchain_config.no_default_bootnodes()); + assert_eq!( + teyrchain_config.bootnodes_addresses(), + bootnodes_addresses.iter().collect::>() + ); + assert!(!teyrchain_config.is_evm_based()); + assert!(matches!( + teyrchain_config.raw_spec_override().unwrap(), + JsonOverrides::Location(AssetLocation::FilePath(value)) if value.to_str().unwrap() == "./path/to/override/rawspec.json" + )); + } + + #[test] + fn teyrchain_config_builder_should_works_when_genesis_state_generator_contains_args() { + let teyrchain_config = TeyrchainConfigBuilder::new(Default::default()) + .with_id(1000) + .with_chain("myteyrchain") + .with_genesis_state_generator("generator_state --simple-flag --flag=value") + .with_collator(|collator| { + collator + .with_name("collator") + .with_command("command") + .validator(true) + }) + .build() + .unwrap(); + + assert_eq!( + teyrchain_config + .genesis_state_generator() + .unwrap() + .cmd() + .as_str(), + "generator_state" + ); + + assert_eq!( + teyrchain_config + .genesis_state_generator() + .unwrap() + .args() + .len(), + 2 + ); + + let args = teyrchain_config.genesis_state_generator().unwrap().args(); + + assert_eq!( + args, + &vec![ + Arg::Flag("--simple-flag".into()), + Arg::Option("--flag".into(), "value".into()) + ] + ); + } + + #[test] + fn teyrchain_config_builder_should_fails_and_returns_an_error_if_chain_is_invalid() { + let errors = TeyrchainConfigBuilder::new(Default::default()) + .with_id(1000) + .with_chain("invalid chain") + .with_collator(|collator| { + collator + .with_name("collator") + .with_command("command") + .validator(true) + }) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "teyrchain[1000].chain: 'invalid chain' shouldn't contains whitespace" + ); + } + + #[test] + fn teyrchain_config_builder_should_fails_and_returns_an_error_if_default_command_is_invalid() { + let errors = TeyrchainConfigBuilder::new(Default::default()) + .with_id(1000) + .with_chain("chain") + .with_default_command("invalid command") + .with_collator(|collator| { + collator + .with_name("node") + .with_command("command") + .validator(true) + }) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "teyrchain[1000].default_command: 'invalid command' shouldn't contains whitespace" + ); + } + + #[test] + fn teyrchain_config_builder_should_fails_and_returns_an_error_if_default_image_is_invalid() { + let errors = TeyrchainConfigBuilder::new(Default::default()) + .with_id(1000) + .with_chain("chain") + .with_default_image("invalid image") + .with_collator(|collator| { + collator + .with_name("node") + .with_command("command") + .validator(true) + }) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + r"teyrchain[1000].default_image: 'invalid image' doesn't match regex '^([ip]|[hostname]/)?[tag_name]:[tag_version]?$'" + ); + } + + #[test] + fn teyrchain_config_builder_should_fails_and_returns_an_error_if_default_resources_are_invalid() + { + let errors = TeyrchainConfigBuilder::new(Default::default()) + .with_id(1000) + .with_chain("chain") + .with_default_resources(|default_resources| { + default_resources + .with_limit_memory("100m") + .with_request_cpu("invalid") + }) + .with_collator(|collator| { + collator + .with_name("node") + .with_command("command") + .validator(true) + }) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + r"teyrchain[1000].default_resources.request_cpu: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'" + ); + } + + #[test] + fn teyrchain_config_builder_should_fails_and_returns_an_error_if_genesis_wasm_generator_is_invalid( + ) { + let errors = TeyrchainConfigBuilder::new(Default::default()) + .with_id(2000) + .with_chain("myteyrchain") + .with_genesis_wasm_generator("invalid command") + .with_collator(|collator| { + collator + .with_name("collator") + .with_command("command") + .validator(true) + }) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "teyrchain[2000].genesis_wasm_generator: 'invalid command' shouldn't contains whitespace" + ); + } + + #[test] + fn teyrchain_config_builder_should_fails_and_returns_an_error_if_bootnodes_addresses_are_invalid( + ) { + let errors = TeyrchainConfigBuilder::new(Default::default()) + .with_id(2000) + .with_chain("myteyrchain") + .with_raw_bootnodes_addresses(vec!["/ip4//tcp/45421", "//10.42.153.10/tcp/43111"]) + .with_collator(|collator| { + collator + .with_name("collator") + .with_command("command") + .validator(true) + }) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 2); + assert_eq!( + errors.first().unwrap().to_string(), + "teyrchain[2000].bootnodes_addresses[0]: '/ip4//tcp/45421' failed to parse: invalid IPv4 address syntax" + ); + assert_eq!( + errors.get(1).unwrap().to_string(), + "teyrchain[2000].bootnodes_addresses[1]: '//10.42.153.10/tcp/43111' unknown protocol string: " + ); + } + + #[test] + fn teyrchain_config_builder_should_fails_and_returns_an_error_if_first_collator_is_invalid() { + let errors = TeyrchainConfigBuilder::new(Default::default()) + .with_id(1000) + .with_chain("myteyrchain") + .with_collator(|collator| { + collator + .with_name("collator") + .with_command("invalid command") + }) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "teyrchain[1000].collators['collator'].command: 'invalid command' shouldn't contains whitespace" + ); + } + + #[test] + fn teyrchain_config_builder_with_at_least_one_collator_should_fails_and_returns_an_error_if_second_collator_is_invalid( + ) { + let errors = TeyrchainConfigBuilder::new(Default::default()) + .with_id(2000) + .with_chain("myteyrchain") + .with_collator(|collator| { + collator + .with_name("collator1") + .with_command("command1") + .invulnerable(true) + .bootnode(true) + }) + .with_collator(|collator| { + collator + .with_name("collator2") + .with_command("invalid command") + .with_initial_balance(20000000) + }) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "teyrchain[2000].collators['collator2'].command: 'invalid command' shouldn't contains whitespace" + ); + } + + #[test] + fn teyrchain_config_builder_should_fails_and_returns_multiple_errors_if_multiple_fields_are_invalid( + ) { + let errors = TeyrchainConfigBuilder::new(Default::default()) + .with_id(2000) + .with_chain("myteyrchain") + .with_raw_bootnodes_addresses(vec!["/ip4//tcp/45421", "//10.42.153.10/tcp/43111"]) + .with_collator(|collator| { + collator + .with_name("collator1") + .with_command("invalid command") + .invulnerable(true) + .bootnode(true) + .with_resources(|resources| { + resources + .with_limit_cpu("invalid") + .with_request_memory("1G") + }) + }) + .with_collator(|collator| { + collator + .with_name("collator2") + .with_command("command2") + .with_image("invalid.image") + .with_initial_balance(20000000) + }) + .build() + .unwrap_err(); + + assert_eq!(errors.len(), 5); + assert_eq!( + errors.first().unwrap().to_string(), + "teyrchain[2000].bootnodes_addresses[0]: '/ip4//tcp/45421' failed to parse: invalid IPv4 address syntax" + ); + assert_eq!( + errors.get(1).unwrap().to_string(), + "teyrchain[2000].bootnodes_addresses[1]: '//10.42.153.10/tcp/43111' unknown protocol string: " + ); + assert_eq!( + errors.get(2).unwrap().to_string(), + "teyrchain[2000].collators['collator1'].command: 'invalid command' shouldn't contains whitespace" + ); + assert_eq!( + errors.get(3).unwrap().to_string(), + r"teyrchain[2000].collators['collator1'].resources.limit_cpu: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'", + ); + assert_eq!( + errors.get(4).unwrap().to_string(), + "teyrchain[2000].collators['collator2'].image: 'invalid.image' doesn't match regex '^([ip]|[hostname]/)?[tag_name]:[tag_version]?$'" + ); + } + + #[test] + fn import_toml_registration_strategy_should_deserialize() { + let load_from_toml = + NetworkConfig::load_from_toml("./testing/snapshots/0001-big-network.toml").unwrap(); + + for teyrchain in load_from_toml.teyrchains().iter() { + if teyrchain.id() == 1000 { + assert_eq!( + teyrchain.registration_strategy(), + Some(&RegistrationStrategy::UsingExtrinsic) + ); + } + if teyrchain.id() == 2000 { + assert_eq!( + teyrchain.registration_strategy(), + Some(&RegistrationStrategy::InGenesis) + ); + } + } + + let load_from_toml_small = NetworkConfig::load_from_toml( + "./testing/snapshots/0003-small-network_w_teyrchain.toml", + ) + .unwrap(); + + let teyrchain = load_from_toml_small.teyrchains()[0]; + let teyrchain_evm = load_from_toml_small.teyrchains()[1]; + + assert_eq!(teyrchain.registration_strategy(), None); + assert!(!teyrchain.is_evm_based()); + assert_eq!(teyrchain.collators().len(), 1); + assert!(teyrchain_evm.is_evm_based()); + } + + #[test] + fn onboard_as_teyrchain_should_default_to_true() { + let config = TeyrchainConfigBuilder::new(Default::default()) + .with_id(2000) + .with_chain("myteyrchain") + .with_collator(|collator| collator.with_name("collator")) + .build() + .unwrap(); + + assert!(config.onboard_as_teyrchain()); + } + + #[test] + fn evm_based_default_to_false() { + let config = TeyrchainConfigBuilder::new(Default::default()) + .with_id(2000) + .with_chain("myteyrchain") + .with_collator(|collator| collator.with_name("collator")) + .build() + .unwrap(); + + assert!(!config.is_evm_based()); + } + + #[test] + fn evm_based() { + let config = TeyrchainConfigBuilder::new(Default::default()) + .with_id(2000) + .with_chain("myteyrchain") + .evm_based(true) + .with_collator(|collator| collator.with_name("collator")) + .build() + .unwrap(); + + assert!(config.is_evm_based()); + } + + #[test] + fn build_config_in_running_context() { + let config = TeyrchainConfigBuilder::new_with_running(Default::default()) + .with_id(2000) + .with_chain("myteyrchain") + .with_collator(|collator| collator.with_name("collator")) + .build() + .unwrap(); + + assert_eq!( + config.registration_strategy(), + Some(&RegistrationStrategy::UsingExtrinsic) + ); + } + + #[test] + fn teyrchain_config_builder_should_works_with_chain_spec_command() { + const CMD_TPL: &str = "./bin/chain-spec-generator {% raw %} {{chainName}} {% endraw %}"; + let config = TeyrchainConfigBuilder::new(Default::default()) + .with_id(2000) + .with_chain("some-chain") + .with_default_image("myrepo:myimage") + .with_default_command("default_command") + .with_chain_spec_command(CMD_TPL) + .with_collator(|collator| collator.with_name("collator")) + .build() + .unwrap(); + + assert_eq!(config.chain_spec_command(), Some(CMD_TPL)); + assert!(!config.chain_spec_command_is_local()); + } + + #[test] + fn teyrchain_config_builder_should_works_with_chain_spec_command_and_local() { + const CMD_TPL: &str = "./bin/chain-spec-generator {% raw %} {{chainName}} {% endraw %}"; + let config = TeyrchainConfigBuilder::new(Default::default()) + .with_id(2000) + .with_chain("some-chain") + .with_default_image("myrepo:myimage") + .with_default_command("default_command") + .with_chain_spec_command(CMD_TPL) + .chain_spec_command_is_local(true) + .with_collator(|collator| collator.with_name("collator")) + .build() + .unwrap(); + + assert_eq!(config.chain_spec_command(), Some(CMD_TPL)); + assert!(config.chain_spec_command_is_local()); + } + + #[test] + fn teyrchain_with_group_config_builder_should_succeeds_and_returns_a_new_teyrchain_config() { + let teyrchain_config = TeyrchainConfigBuilder::new(Default::default()) + .with_id(1000) + .with_chain("mychainname") + .with_registration_strategy(RegistrationStrategy::UsingExtrinsic) + .onboard_as_teyrchain(false) + .with_initial_balance(100_000_042) + .with_default_image("myrepo:myimage") + .with_default_command("default_command") + .without_default_bootnodes() + .with_collator(|collator| { + collator + .with_name("collator1") + .with_command("command1") + .bootnode(true) + }) + .with_collator_group(|group| { + group.with_count(2).with_base_node(|base| { + base.with_name("collator_group1") + .with_command("group_command1") + .bootnode(true) + }) + }) + .with_collator_group(|group| { + group.with_count(3).with_base_node(|base| { + base.with_name("collator_group2") + .with_command("group_command2") + .bootnode(false) + }) + }) + .build() + .unwrap(); + + assert_eq!(teyrchain_config.id(), 1000); + assert_eq!(teyrchain_config.collators().len(), 1); + assert_eq!(teyrchain_config.group_collators_configs().len(), 2); + + let group_collator1 = teyrchain_config.group_collators_configs()[0].clone(); + assert_eq!(group_collator1.count, 2); + let base_config1 = group_collator1.base_config; + assert_eq!(base_config1.name(), "collator_group1"); + assert_eq!(base_config1.command().unwrap().as_str(), "group_command1"); + assert!(base_config1.is_bootnode()); + + let group_collator2 = teyrchain_config.group_collators_configs()[1].clone(); + assert_eq!(group_collator2.count, 3); + let base_config2 = group_collator2.base_config; + assert_eq!(base_config2.name(), "collator_group2"); + assert_eq!(base_config2.command().unwrap().as_str(), "group_command2"); + assert!(!base_config2.is_bootnode()); + } + + #[test] + fn teyrchain_with_group_count_0_config_builder_should_fail() { + let teyrchain_config = TeyrchainConfigBuilder::new(Default::default()) + .with_id(1000) + .with_chain("mychainname") + .with_registration_strategy(RegistrationStrategy::UsingExtrinsic) + .onboard_as_teyrchain(false) + .with_initial_balance(100_000_042) + .with_default_image("myrepo:myimage") + .with_default_command("default_command") + .without_default_bootnodes() + .with_collator(|collator| { + collator + .with_name("collator1") + .with_command("command1") + .bootnode(true) + }) + .with_collator_group(|group| { + group.with_count(2).with_base_node(|base| { + base.with_name("collator_group1") + .with_command("group_command1") + .bootnode(true) + }) + }) + .with_collator_group(|group| { + group.with_count(0).with_base_node(|base| { + base.with_name("collator_group2") + .with_command("group_command2") + .bootnode(false) + }) + }) + .build(); + + let errors: Vec = match teyrchain_config { + Ok(_) => vec![], + Err(errs) => errs, + }; + + assert_eq!(errors.len(), 1); + assert_eq!( + errors.first().unwrap().to_string(), + "teyrchain[1000].collators['collator_group2'].Count cannot be zero" + ); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/utils.rs b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/utils.rs new file mode 100644 index 00000000..676c4738 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/src/utils.rs @@ -0,0 +1,65 @@ +use std::env; + +use support::constants::ZOMBIE_NODE_SPAWN_TIMEOUT_SECONDS; + +use crate::types::{Chain, Command, Duration}; + +pub(crate) fn is_true(value: &bool) -> bool { + *value +} + +pub(crate) fn is_false(value: &bool) -> bool { + !(*value) +} + +pub(crate) fn default_as_true() -> bool { + true +} + +pub(crate) fn default_as_false() -> bool { + false +} + +pub(crate) fn default_initial_balance() -> crate::types::U128 { + 2_000_000_000_000.into() +} + +/// Default timeout for spawning a node (10mins) +pub(crate) fn default_node_spawn_timeout() -> Duration { + env::var(ZOMBIE_NODE_SPAWN_TIMEOUT_SECONDS) + .ok() + .and_then(|s| s.parse::().ok()) + .unwrap_or(600) +} + +/// Default timeout for spawning the whole network (1hr) +pub(crate) fn default_timeout() -> Duration { + 3600 +} + +pub(crate) fn default_command_polkadot() -> Option { + TryInto::::try_into("polkadot").ok() +} + +pub(crate) fn default_relaychain_chain() -> Chain { + TryInto::::try_into("rococo-local").expect("'rococo-local' should be a valid chain") +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn default_node_spawn_timeout_works_before_and_after_env_is_set() { + // The default should be 600 seconds if the env var is not set + assert_eq!(default_node_spawn_timeout(), 600); + + // If env var is set to a valid number, it should return that number + env::set_var(ZOMBIE_NODE_SPAWN_TIMEOUT_SECONDS, "123"); + assert_eq!(default_node_spawn_timeout(), 123); + + // If env var is set to a NOT valid number, it should return 600 + env::set_var(ZOMBIE_NODE_SPAWN_TIMEOUT_SECONDS, "NOT_A_NUMBER"); + assert_eq!(default_node_spawn_timeout(), 600); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0000-small-network.toml b/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0000-small-network.toml new file mode 100644 index 00000000..eb3ddd3f --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0000-small-network.toml @@ -0,0 +1,25 @@ +[settings] +timeout = 3600 +node_spawn_timeout = 600 +tear_down_on_failure = true + +[relaychain] +chain = "rococo-local" +default_command = "polkadot" +default_image = "docker.io/parity/polkadot:latest" +default_args = ["-lparachain=debug"] + +[[relaychain.nodes]] +name = "alice" +validator = true +invulnerable = true +bootnode = false +balance = 2000000000000 + +[[relaychain.nodes]] +name = "bob" +args = ["--database=paritydb-experimental"] +validator = true +invulnerable = false +bootnode = true +balance = 2000000000000 diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0001-big-network.toml b/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0001-big-network.toml new file mode 100644 index 00000000..7f7772a1 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0001-big-network.toml @@ -0,0 +1,105 @@ +[settings] +timeout = 3600 +node_spawn_timeout = 600 +tear_down_on_failure = true + +[relaychain] +chain = "polkadot" +default_command = "polkadot" +default_image = "docker.io/parity/polkadot:latest" + +[relaychain.default_resources.requests] +memory = "500M" +cpu = "100000" + +[relaychain.default_resources.limits] +memory = "4000M" +cpu = "10Gi" + +[[relaychain.nodes]] +name = "alice" +validator = true +invulnerable = true +bootnode = true +balance = 1000000000 + +[[relaychain.nodes]] +name = "bob" +validator = true +invulnerable = true +bootnode = true +balance = 2000000000000 + +[[teyrchains]] +id = 1000 +chain = "myparachain" +register_para = true +onboard_as_teyrchain = false +balance = 2000000000000 +default_db_snapshot = "https://storage.com/path/to/db_snapshot.tgz" +chain_spec_path = "/path/to/my/chain/spec.json" +cumulus_based = true +evm_based = false + +[[teyrchains.collators]] +name = "john" +validator = true +invulnerable = true +bootnode = true +balance = 5000000000 + +[[teyrchains.collators]] +name = "charles" +validator = false +invulnerable = true +bootnode = true +balance = 0 + +[[teyrchains.collators]] +name = "frank" +validator = true +invulnerable = false +bootnode = true +balance = 1000000000 + +[[teyrchains]] +id = 2000 +chain = "myotherparachain" +add_to_genesis = true +balance = 2000000000000 +chain_spec_path = "/path/to/my/other/chain/spec.json" +cumulus_based = true +evm_based = false + +[[teyrchains.collators]] +name = "mike" +validator = true +invulnerable = true +bootnode = true +balance = 5000000000 + +[[teyrchains.collators]] +name = "georges" +validator = false +invulnerable = true +bootnode = true +balance = 0 + +[[teyrchains.collators]] +name = "victor" +validator = true +invulnerable = false +bootnode = true +balance = 1000000000 + +[[hrmp_channels]] +sender = 1000 +recipient = 2000 +max_capacity = 150 +max_message_size = 5000 + +[[hrmp_channels]] +sender = 2000 +recipient = 1000 +max_capacity = 200 +max_message_size = 8000 diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0002-overridden-defaults.toml b/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0002-overridden-defaults.toml new file mode 100644 index 00000000..a634cd33 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0002-overridden-defaults.toml @@ -0,0 +1,76 @@ +[settings] +timeout = 3600 +node_spawn_timeout = 600 +tear_down_on_failure = true + +[relaychain] +chain = "polkadot" +default_command = "polkadot" +default_image = "docker.io/parity/polkadot:latest" +default_db_snapshot = "https://storage.com/path/to/db_snapshot.tgz" +default_args = [ + "-name=value", + "--flag", +] + +[relaychain.default_resources.requests] +memory = "500M" +cpu = "100000" + +[relaychain.default_resources.limits] +memory = "4000M" +cpu = "10Gi" + +[[relaychain.nodes]] +name = "alice" +validator = true +invulnerable = true +bootnode = true +balance = 1000000000 + +[[relaychain.nodes]] +name = "bob" +image = "mycustomimage:latest" +command = "my-custom-command" +args = ["-myothername=value"] +validator = true +invulnerable = true +bootnode = true +balance = 2000000000000 +db_snapshot = "https://storage.com/path/to/other/db_snapshot.tgz" + +[relaychain.nodes.resources.requests] +memory = "250Mi" +cpu = "1000" + +[relaychain.nodes.resources.limits] +memory = "2Gi" +cpu = "5Gi" + +[[teyrchains]] +id = 1000 +chain = "myparachain" +add_to_genesis = true +balance = 2000000000000 +default_command = "my-default-command" +default_image = "mydefaultimage:latest" +default_db_snapshot = "https://storage.com/path/to/other_snapshot.tgz" +chain_spec_path = "/path/to/my/chain/spec.json" +cumulus_based = true +evm_based = false + +[[teyrchains.collators]] +name = "john" +image = "anotherimage:latest" +command = "my-non-default-command" +validator = true +invulnerable = true +bootnode = true +balance = 5000000000 + +[[teyrchains.collators]] +name = "charles" +validator = false +invulnerable = true +bootnode = true +balance = 0 diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0003-small-network_w_teyrchain.toml b/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0003-small-network_w_teyrchain.toml new file mode 100644 index 00000000..9d89e1d3 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0003-small-network_w_teyrchain.toml @@ -0,0 +1,57 @@ +[settings] +timeout = 3600 +node_spawn_timeout = 600 + +[relaychain] +chain = "rococo-local" +default_command = "polkadot" +default_image = "docker.io/parity/polkadot:latest" +default_args = ["-lparachain=debug"] + +[[relaychain.nodes]] +name = "alice" +validator = true +invulnerable = true +bootnode = false +balance = 2000000000000 + +[[relaychain.nodes]] +name = "bob" +args = ["--database=paritydb-experimental"] +validator = true +invulnerable = false +bootnode = true +balance = 2000000000000 + +[[teyrchains]] +id = 1000 +chain = "myparachain" +onboard_as_teyrchain = false +balance = 2000000000000 +default_db_snapshot = "https://storage.com/path/to/db_snapshot.tgz" +chain_spec_path = "/path/to/my/chain/spec.json" +cumulus_based = true + +[teyrchains.collator] +name = "john" +validator = true +invulnerable = true +bootnode = true +balance = 5000000000 + +[[teyrchains]] +id = 1000 +chain = "myparachain" +onboard_as_teyrchain = false +balance = 2000000000000 +default_db_snapshot = "https://storage.com/path/to/db_snapshot.tgz" +chain_spec_path = "/path/to/my/chain/spec.json" +cumulus_based = true +evm_based = true + +[[teyrchains.collators]] +name = "john" +validator = true +invulnerable = true +bootnode = true +balance = 5000000000 diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0004-small-network-without-settings.toml b/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0004-small-network-without-settings.toml new file mode 100644 index 00000000..ae56b56c --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0004-small-network-without-settings.toml @@ -0,0 +1,9 @@ +[relaychain] +chain = "rococo-local" +default_command = "polkadot" + +[[relaychain.nodes]] +name = "alice" + +[[relaychain.nodes]] +name = "bob" \ No newline at end of file diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0005-small-networl-with-wasm-override.toml b/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0005-small-networl-with-wasm-override.toml new file mode 100644 index 00000000..4981a832 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0005-small-networl-with-wasm-override.toml @@ -0,0 +1,17 @@ +[relaychain] +chain = "rococo-local" +default_command = "polkadot" +wasm_override = "/some/path/runtime.wasm" + +[[relaychain.nodes]] +name = "alice" + +[[relaychain.nodes]] +name = "bob" + +[[teyrchains]] +id = 1000 +wasm_override = "https://some.com/runtime.wasm" + +[teyrchains.collator] +name = "john" diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0006-without-rc-chain-name.toml b/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0006-without-rc-chain-name.toml new file mode 100644 index 00000000..8b23d7f6 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0006-without-rc-chain-name.toml @@ -0,0 +1,27 @@ +[relaychain] +default_command = "polkadot" +chain_spec_path = "./rc.json" + +[[relaychain.nodes]] +name = "alice" +validator = true +rpc_port = 9944 + +[[relaychain.nodes]] +name = "bob" +validator = true +rpc_port = 9945 +args = [ + "-lruntime::system=debug,runtime::session=trace,runtime::staking::ah-client=trace,runtime::ah-client=debug", +] + +[[teyrchains]] +id = 1100 +chain_spec_path = "./parachain.json" + +[teyrchains.collator] +name = "charlie" +rpc_port = 9946 +args = [ + "-lruntime::system=debug,runtime::multiblock-election=trace,runtime::staking=debug,runtime::staking::rc-client=trace,runtime::rc-client=debug", +] \ No newline at end of file diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0007-small-network_w_teyrchain_w_duplicate_node_names.toml b/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0007-small-network_w_teyrchain_w_duplicate_node_names.toml new file mode 100644 index 00000000..7b69052c --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0007-small-network_w_teyrchain_w_duplicate_node_names.toml @@ -0,0 +1,40 @@ +[settings] +timeout = 3600 +node_spawn_timeout = 600 + +[relaychain] +chain = "rococo-local" +default_command = "polkadot" +default_image = "docker.io/parity/polkadot:latest" +default_args = ["-lparachain=debug"] + +[[relaychain.nodes]] +name = "alice" +validator = true +invulnerable = true +bootnode = false +balance = 2000000000000 + +[[relaychain.nodes]] +name = "bob" +args = ["--database=paritydb-experimental"] +validator = true +invulnerable = false +bootnode = true +balance = 2000000000000 + +[[teyrchains]] +id = 1000 +chain = "myparachain" +onboard_as_teyrchain = false +balance = 2000000000000 +default_db_snapshot = "https://storage.com/path/to/db_snapshot.tgz" +chain_spec_path = "/path/to/my/chain/spec.json" +cumulus_based = true + +[teyrchains.collator] +name = "alice" +validator = true +invulnerable = true +bootnode = true +balance = 5000000000 diff --git a/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0008-small-network-with-raw-spec-override.toml b/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0008-small-network-with-raw-spec-override.toml new file mode 100644 index 00000000..149a2b48 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/configuration/testing/snapshots/0008-small-network-with-raw-spec-override.toml @@ -0,0 +1,17 @@ +[relaychain] +chain = "rococo-local" +default_command = "polkadot" +raw_spec_override = "/some/path/raw_spec_override.json" + +[[relaychain.nodes]] +name = "alice" + +[[relaychain.nodes]] +name = "bob" + +[[teyrchains]] +id = 1000 +raw_spec_override = "https://some.com/raw_spec_override.json" + +[teyrchains.collator] +name = "john" diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/.gitignore b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/.gitignore new file mode 100644 index 00000000..4fffb2f8 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/.gitignore @@ -0,0 +1,2 @@ +/target +/Cargo.lock diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/Cargo.toml b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/Cargo.toml new file mode 100644 index 00000000..36943aff --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/Cargo.toml @@ -0,0 +1,54 @@ +[package] +name = "zombienet-orchestrator" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +publish = true +license.workspace = true +repository.workspace = true +description = "Zombienet Orchestrator, drive network spwan through providers" +keywords = ["zombienet", "orchestrator", "sdk"] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +tokio = { workspace = true, features = ["time"] } +thiserror = { workspace = true } +multiaddr = { workspace = true } +serde_json = { workspace = true, features = ["arbitrary_precision"] } +futures = { workspace = true } +anyhow = { workspace = true } +rand = { workspace = true } +sha2 = { workspace = true, default-features = false } +hex = { workspace = true } +sp-core = { workspace = true } +libp2p = { workspace = true } +pezkuwi-subxt = { workspace = true } +pezkuwi-subxt-signer = { workspace = true } +reqwest = { workspace = true } +tracing = { workspace = true } +uuid = { workspace = true } +regex = { workspace = true } +glob-match = { workspace = true } +async-trait = { workspace = true } +serde = { workspace = true, features = ["derive"] } +libsecp256k1 = { workspace = true } +fancy-regex = { workspace = true } +# staging-chain-spec-builder = { workspace = true } +# parity-scale-codec = { version = "3.7.5", features = ["derive"] } +# sc-chain-spec = {workspace = true, default-features = false} +sc-chain-spec = { workspace = true } +erased-serde = { workspace = true } + +# Zombienet deps +configuration = { workspace = true } +support = { workspace = true } +provider = { workspace = true } +prom-metrics-parser = { workspace = true } + +[dev-dependencies] +toml = { workspace = true } +async-trait = { workspace = true } +lazy_static = { workspace = true } + diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/errors.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/errors.rs new file mode 100644 index 00000000..46f49c31 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/errors.rs @@ -0,0 +1,31 @@ +//! Zombienet Orchestrator error definitions. + +use provider::ProviderError; +use support::fs::FileSystemError; + +use crate::generators; + +#[derive(Debug, thiserror::Error)] +pub enum OrchestratorError { + // TODO: improve invalid config reporting + #[error("Invalid network configuration: {0}")] + InvalidConfig(String), + #[error("Invalid network config to use provider {0}: {1}")] + InvalidConfigForProvider(String, String), + #[error("Invalid configuration for node: {0}, field: {1}")] + InvalidNodeConfig(String, String), + #[error("Invariant not fulfilled {0}")] + InvariantError(&'static str), + #[error("Global network spawn timeout: {0} secs")] + GlobalTimeOut(u32), + #[error("Generator error: {0}")] + GeneratorError(#[from] generators::errors::GeneratorError), + #[error("Provider error")] + ProviderError(#[from] ProviderError), + #[error("FileSystem error")] + FileSystemError(#[from] FileSystemError), + #[error("Serialization error")] + SerializationError(#[from] serde_json::Error), + #[error(transparent)] + SpawnerError(#[from] anyhow::Error), +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators.rs new file mode 100644 index 00000000..e7913eab --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators.rs @@ -0,0 +1,22 @@ +pub mod chain_spec; +pub mod errors; +pub mod key; +pub mod para_artifact; + +mod arg_filter; +mod bootnode_addr; +mod command; +mod identity; +mod keystore; +mod keystore_key_types; +mod port; + +pub use bootnode_addr::generate as generate_node_bootnode_addr; +pub use command::{ + generate_for_cumulus_node as generate_node_command_cumulus, + generate_for_node as generate_node_command, GenCmdOptions, +}; +pub use identity::generate as generate_node_identity; +pub use key::generate as generate_node_keys; +pub use keystore::generate as generate_node_keystore; +pub use port::generate as generate_node_port; diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/arg_filter.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/arg_filter.rs new file mode 100644 index 00000000..ad619959 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/arg_filter.rs @@ -0,0 +1,138 @@ +use configuration::types::Arg; + +/// Parse args to extract those marked for removal (with `-:` prefix). +/// Returns a set of arg names/flags that should be removed from the final command. +/// +/// # Examples +/// - `-:--insecure-validator-i-know-what-i-do` -> removes `--insecure-validator-i-know-what-i-do` +/// - `-:insecure-validator` -> removes `--insecure-validator` (normalized) +/// - `-:--prometheus-port` -> removes `--prometheus-port` +pub fn parse_removal_args(args: &[Arg]) -> Vec { + args.iter() + .filter_map(|arg| match arg { + Arg::Flag(flag) if flag.starts_with("-:") => { + let mut flag_to_exclude = flag[2..].to_string(); + + // Normalize flag format - ensure it starts with -- + if !flag_to_exclude.starts_with("--") { + flag_to_exclude = format!("--{flag_to_exclude}"); + } + + Some(flag_to_exclude) + }, + _ => None, + }) + .collect() +} + +/// Apply arg removals to a vector of string arguments. +/// This filters out any args that match the removal list. +/// +/// # Arguments +/// * `args` - The command arguments to filter +/// * `removals` - List of arg names/flags to remove +/// +/// # Returns +/// Filtered vector with specified args removed +pub fn apply_arg_removals(args: Vec, removals: &[String]) -> Vec { + if removals.is_empty() { + return args; + } + + let mut res = Vec::new(); + let mut skip_next = false; + + for (i, arg) in args.iter().enumerate() { + if skip_next { + skip_next = false; + continue; + } + + let should_remove = removals + .iter() + .any(|removal| arg == removal || arg.starts_with(&format!("{removal}="))); + + if should_remove { + // Only skip next if this looks like an option (starts with --) and next arg doesn't start with -- + if !arg.contains("=") && i + 1 < args.len() { + let next_arg = &args[i + 1]; + if !next_arg.starts_with("-") { + skip_next = true; + } + } + continue; + } + + res.push(arg.clone()); + } + + res +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_removal_args() { + let args = vec![ + Arg::Flag("-:--insecure-validator-i-know-what-i-do".to_string()), + Arg::Flag("--validator".to_string()), + Arg::Flag("-:--no-telemetry".to_string()), + ]; + + let removals = parse_removal_args(&args); + assert_eq!(removals.len(), 2); + assert!(removals.contains(&"--insecure-validator-i-know-what-i-do".to_string())); + assert!(removals.contains(&"--no-telemetry".to_string())); + } + + #[test] + fn test_apply_arg_removals_flag() { + let args = vec![ + "--validator".to_string(), + "--insecure-validator-i-know-what-i-do".to_string(), + "--no-telemetry".to_string(), + ]; + let removals = vec!["--insecure-validator-i-know-what-i-do".to_string()]; + let res = apply_arg_removals(args, &removals); + assert_eq!(res.len(), 2); + assert!(res.contains(&"--validator".to_string())); + assert!(res.contains(&"--no-telemetry".to_string())); + assert!(!res.contains(&"--insecure-validator-i-know-what-i-do".to_string())); + } + + #[test] + fn test_apply_arg_removals_option_with_equals() { + let args = vec!["--name=alice".to_string(), "--port=30333".to_string()]; + let removals = vec!["--port".to_string()]; + let res = apply_arg_removals(args, &removals); + assert_eq!(res.len(), 1); + assert_eq!(res[0], "--name=alice"); + } + + #[test] + fn test_apply_arg_removals_option_with_space() { + let args = vec![ + "--name".to_string(), + "alice".to_string(), + "--port".to_string(), + "30333".to_string(), + ]; + let removals = vec!["--port".to_string()]; + + let res = apply_arg_removals(args, &removals); + assert_eq!(res.len(), 2); + assert_eq!(res[0], "--name"); + assert_eq!(res[1], "alice"); + } + + #[test] + fn test_apply_arg_removals_empty() { + let args = vec!["--validator".to_string()]; + let removals = vec![]; + + let res = apply_arg_removals(args, &removals); + assert_eq!(res, vec!["--validator".to_string()]); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/bootnode_addr.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/bootnode_addr.rs new file mode 100644 index 00000000..3efa1eac --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/bootnode_addr.rs @@ -0,0 +1,111 @@ +use std::{fmt::Display, net::IpAddr}; + +use super::errors::GeneratorError; + +pub fn generate + Display>( + peer_id: &str, + ip: &IpAddr, + port: u16, + args: &[T], + p2p_cert: &Option, +) -> Result { + let addr = if let Some(index) = args.iter().position(|arg| arg.as_ref().eq("--listen-addr")) { + let listen_value = args + .as_ref() + .get(index + 1) + .ok_or(GeneratorError::BootnodeAddrGeneration( + "can not generate bootnode address from args".into(), + ))? + .to_string(); + + let ip_str = ip.to_string(); + let port_str = port.to_string(); + let mut parts = listen_value.split('/').collect::>(); + parts[2] = &ip_str; + parts[4] = port_str.as_str(); + parts.join("/") + } else { + format!("/ip4/{ip}/tcp/{port}/ws") + }; + + let mut addr_with_peer = format!("{addr}/p2p/{peer_id}"); + if let Some(p2p_cert) = p2p_cert { + addr_with_peer.push_str("/certhash/"); + addr_with_peer.push_str(p2p_cert) + } + Ok(addr_with_peer) +} + +#[cfg(test)] +mod tests { + + use provider::constants::LOCALHOST; + + use super::*; + #[test] + fn generate_for_alice_without_args() { + let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed + let args: Vec<&str> = vec![]; + let bootnode_addr = generate(peer_id, &LOCALHOST, 5678, &args, &None).unwrap(); + assert_eq!( + &bootnode_addr, + "/ip4/127.0.0.1/tcp/5678/ws/p2p/12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm" + ); + } + + #[test] + fn generate_for_alice_with_listen_addr() { + // Should override the ip/port + let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed + let args: Vec = [ + "--some", + "other", + "--listen-addr", + "/ip4/192.168.100.1/tcp/30333/ws", + ] + .iter() + .map(|x| x.to_string()) + .collect(); + let bootnode_addr = + generate(peer_id, &LOCALHOST, 5678, args.iter().as_ref(), &None).unwrap(); + assert_eq!( + &bootnode_addr, + "/ip4/127.0.0.1/tcp/5678/ws/p2p/12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm" + ); + } + + #[test] + fn generate_for_alice_with_listen_addr_without_value_must_fail() { + // Should override the ip/port + let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed + let args: Vec = ["--some", "other", "--listen-addr"] + .iter() + .map(|x| x.to_string()) + .collect(); + let bootnode_addr = generate(peer_id, &LOCALHOST, 5678, args.iter().as_ref(), &None); + + assert!(bootnode_addr.is_err()); + assert!(matches!( + bootnode_addr, + Err(GeneratorError::BootnodeAddrGeneration(_)) + )); + } + + #[test] + fn generate_for_alice_withcert() { + let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed + let args: Vec<&str> = vec![]; + let bootnode_addr = generate( + peer_id, + &LOCALHOST, + 5678, + &args, + &Some(String::from("data")), + ) + .unwrap(); + assert_eq!( + &bootnode_addr, + "/ip4/127.0.0.1/tcp/5678/ws/p2p/12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm/certhash/data" + ); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/chain_spec.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/chain_spec.rs new file mode 100644 index 00000000..d7e374f7 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/chain_spec.rs @@ -0,0 +1,2073 @@ +use std::{ + collections::HashMap, + path::{Path, PathBuf}, +}; + +use anyhow::anyhow; +use configuration::{ + types::{AssetLocation, Chain, ChainSpecRuntime, JsonOverrides, ParaId}, + HrmpChannelConfig, +}; +use provider::{ + constants::NODE_CONFIG_DIR, + types::{GenerateFileCommand, GenerateFilesOptions, TransferedFile}, + DynNamespace, ProviderError, +}; +use sc_chain_spec::{GenericChainSpec, GenesisConfigBuilderRuntimeCaller}; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use support::{constants::THIS_IS_A_BUG, fs::FileSystem, replacer::apply_replacements}; +use tokio::process::Command; +use tracing::{debug, info, trace, warn}; + +use super::errors::GeneratorError; +use crate::{ + network_spec::{node::NodeSpec, relaychain::RelaychainSpec, teyrchain::TeyrchainSpec}, + ScopedFilesystem, +}; + +// TODO: (javier) move to state +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum Context { + Relay, + Para { relay_chain: Chain, para_id: ParaId }, +} + +/// Posible chain-spec formats +#[derive(Debug, Clone, Copy)] +enum ChainSpecFormat { + Plain, + Raw, +} +/// Key types to replace in spec +#[derive(Debug, Clone, Copy)] +enum KeyType { + Session, + Aura, + Grandpa, +} + +#[derive(Debug, Clone, Copy, Default)] +enum SessionKeyType { + // Default derivarion (e.g `//`) + #[default] + Default, + // Stash detivarion (e.g `///stash`) + Stash, + // EVM session type + Evm, +} + +type MaybeExpectedPath = Option; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum CommandInContext { + Local(String, MaybeExpectedPath), + Remote(String, MaybeExpectedPath), +} + +impl CommandInContext { + fn cmd(&self) -> &str { + match self { + CommandInContext::Local(cmd, _) | CommandInContext::Remote(cmd, _) => cmd.as_ref(), + } + } +} + +#[derive(Debug)] +pub struct ParaGenesisConfig> { + pub(crate) state_path: T, + pub(crate) wasm_path: T, + pub(crate) id: u32, + pub(crate) as_parachain: bool, +} + +/// Presets to check if is not set by the user. +/// We check if the preset is valid for the runtime in order +/// and if non of them are preset we fallback to the `default config`. +const DEFAULT_PRESETS_TO_CHECK: [&str; 3] = ["local_testnet", "development", "dev"]; + +/// Chain-spec builder representation +/// +/// Multiple options are supported, and the current order is: +/// IF [`asset_location`] is _some_ -> Use this chain_spec by copying the file from [`AssetLocation`] +/// ELSE IF [`runtime_location`] is _some_ -> generate the chain-spec using the sc-chain-spec builder. +/// ELSE -> Fallback to use the `default` or customized cmd. + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ChainSpec { + // Name of the spec file, most of the times could be the same as the chain_name. (e.g rococo-local) + chain_spec_name: String, + // Location of the chain-spec to use + asset_location: Option, + // Location of the runtime to use + runtime: Option, + maybe_plain_path: Option, + chain_name: Option, + raw_path: Option, + // The binary to build the chain-spec + command: Option, + // Imgae to use for build the chain-spec + image: Option, + // Contex of the network (e.g relay or para) + context: Context, +} + +impl ChainSpec { + pub(crate) fn new(chain_spec_name: impl Into, context: Context) -> Self { + Self { + chain_spec_name: chain_spec_name.into(), + chain_name: None, + maybe_plain_path: None, + asset_location: None, + runtime: None, + raw_path: None, + command: None, + image: None, + context, + } + } + + pub(crate) fn chain_spec_name(&self) -> &str { + self.chain_spec_name.as_ref() + } + + pub(crate) fn chain_name(&self) -> Option<&str> { + self.chain_name.as_deref() + } + + pub(crate) fn set_chain_name(mut self, chain_name: impl Into) -> Self { + self.chain_name = Some(chain_name.into()); + self + } + + pub(crate) fn asset_location(mut self, location: AssetLocation) -> Self { + self.asset_location = Some(location); + self + } + + pub(crate) fn runtime(mut self, chain_spec_runtime: ChainSpecRuntime) -> Self { + self.runtime = Some(chain_spec_runtime); + self + } + + pub(crate) fn command( + mut self, + command: impl Into, + is_local: bool, + expected_path: Option<&str>, + ) -> Self { + let maybe_expected_path = expected_path.map(PathBuf::from); + let cmd = if is_local { + CommandInContext::Local(command.into(), maybe_expected_path) + } else { + CommandInContext::Remote(command.into(), maybe_expected_path) + }; + self.command = Some(cmd); + self + } + + pub(crate) fn image(mut self, image: Option) -> Self { + self.image = image; + self + } + + /// Build the chain-spec + /// + /// Chain spec generation flow: + /// if chain_spec_path is set -> use this chain_spec + /// else if runtime_path is set and cmd is compatible with chain-spec-builder -> use the chain-spec-builder + /// else if chain_spec_command is set -> use this cmd for generate the chain_spec + /// else -> use the default command. + pub async fn build<'a, T>( + &mut self, + ns: &DynNamespace, + scoped_fs: &ScopedFilesystem<'a, T>, + ) -> Result<(), GeneratorError> + where + T: FileSystem, + { + if self.asset_location.is_none() && self.command.is_none() && self.runtime.is_none() { + return Err(GeneratorError::ChainSpecGeneration( + "Can not build the chain spec without set the command, asset_location or runtime" + .to_string(), + )); + } + + let maybe_plain_spec_path = PathBuf::from(format!("{}-plain.json", self.chain_spec_name)); + + // if asset_location is some, then copy the asset to the `base_dir` of the ns with the name `-plain.json` + if let Some(location) = self.asset_location.as_ref() { + let maybe_plain_spec_full_path = scoped_fs.full_path(maybe_plain_spec_path.as_path()); + location + .dump_asset(maybe_plain_spec_full_path) + .await + .map_err(|e| { + GeneratorError::ChainSpecGeneration(format!( + "Error {e} dumping location {location:?}" + )) + })?; + } else if let Some(runtime) = self.runtime.as_ref() { + trace!( + "Creating chain-spec with runtime from localtion: {}", + runtime.location + ); + // First dump the runtime into the ns scoped fs, since we want to easily reproduce + let runtime_file_name = PathBuf::from(format!("{}-runtime.wasm", self.chain_spec_name)); + let runtime_path_ns = scoped_fs.full_path(runtime_file_name.as_path()); + runtime + .location + .dump_asset(runtime_path_ns) + .await + .map_err(|e| { + GeneratorError::ChainSpecGeneration(format!( + "Error {e} dumping location {:?}", + runtime.location + )) + })?; + + // list the presets to check if match with the supplied one or one of the defaults + let runtime_code = scoped_fs.read(runtime_file_name.as_path()).await?; + + let caller: GenesisConfigBuilderRuntimeCaller = + GenesisConfigBuilderRuntimeCaller::new(&runtime_code[..]); + let presets = caller.preset_names().map_err(|e| { + GeneratorError::ChainSpecGeneration(format!( + "getting default config from runtime should work: {e}" + )) + })?; + + // check the preset to use with this priorities: + // - IF user provide a preset (and if present) use it + // - else (user don't provide preset or the provided one isn't preset) + // check the [`DEFAULT_PRESETS_TO_CHECK`] in order to find one valid + // - If we can't find any valid preset use the `default config` from the runtime + + let preset_to_check = if let Some(preset) = &runtime.preset { + [vec![preset.as_str()], DEFAULT_PRESETS_TO_CHECK.to_vec()].concat() + } else { + DEFAULT_PRESETS_TO_CHECK.to_vec() + }; + let preset = preset_to_check + .iter() + .find(|preset| presets.iter().any(|item| item == *preset)); + + trace!("presets: {:?} - preset to use: {:?}", presets, preset); + let builder = if let Some(preset) = preset { + GenericChainSpec::<()>::builder(&runtime_code[..], ()) + .with_genesis_config_preset_name(preset) + } else { + // default config + let default_config = caller.get_default_config().map_err(|e| { + GeneratorError::ChainSpecGeneration(format!( + "getting default config from runtime should work: {e}" + )) + })?; + + GenericChainSpec::<()>::builder(&runtime_code[..], ()) + .with_genesis_config(default_config) + }; + + let builder = if let Context::Para { + relay_chain: _, + para_id: _, + } = &self.context + { + builder.with_id(self.chain_spec_name()) + } else { + builder + }; + + let builder = if let Some(chain_name) = self.chain_name.as_ref() { + builder.with_name(chain_name) + } else { + builder + }; + + let chain_spec = builder.build(); + + let contents = chain_spec.as_json(false).map_err(|e| { + GeneratorError::ChainSpecGeneration(format!( + "getting chain-spec as json should work, err: {e}" + )) + })?; + + scoped_fs.write(&maybe_plain_spec_path, contents).await?; + } else { + trace!("Creating chain-spec with command"); + // we should create the chain-spec using command. + let mut replacement_value = String::default(); + if let Some(chain_name) = self.chain_name.as_ref() { + if !chain_name.is_empty() { + replacement_value.clone_from(chain_name); + } + }; + + // SAFETY: we ensure that command is some with the first check of the fn + // default as empty + let sanitized_cmd = if replacement_value.is_empty() { + // we need to remove the `--chain` flag + self.command.as_ref().unwrap().cmd().replace("--chain", "") + } else { + self.command.as_ref().unwrap().cmd().to_owned() + }; + + let full_cmd = apply_replacements( + &sanitized_cmd, + &HashMap::from([("chainName", replacement_value.as_str())]), + ); + trace!("full_cmd: {:?}", full_cmd); + + let parts: Vec<&str> = full_cmd.split_whitespace().collect(); + let Some((cmd, args)) = parts.split_first() else { + return Err(GeneratorError::ChainSpecGeneration(format!( + "Invalid generator command: {full_cmd}" + ))); + }; + trace!("cmd: {:?} - args: {:?}", cmd, args); + + let generate_command = + GenerateFileCommand::new(cmd, maybe_plain_spec_path.clone()).args(args); + if let Some(cmd) = &self.command { + match cmd { + CommandInContext::Local(_, expected_path) => { + build_locally(generate_command, scoped_fs, expected_path.as_deref()).await? + }, + CommandInContext::Remote(_, expected_path) => { + let options = GenerateFilesOptions::new( + vec![generate_command], + self.image.clone(), + expected_path.clone(), + ); + ns.generate_files(options).await?; + }, + } + } + } + + // check if the _generated_ spec is in raw mode. + if is_raw(maybe_plain_spec_path.clone(), scoped_fs).await? { + let spec_path = PathBuf::from(format!("{}.json", self.chain_spec_name)); + let tf_file = TransferedFile::new( + &PathBuf::from_iter([ns.base_dir(), &maybe_plain_spec_path]), + &spec_path, + ); + scoped_fs.copy_files(vec![&tf_file]).await.map_err(|e| { + GeneratorError::ChainSpecGeneration(format!( + "Error copying file: {tf_file}, err: {e}" + )) + })?; + + self.raw_path = Some(spec_path); + } else { + self.maybe_plain_path = Some(maybe_plain_spec_path); + } + Ok(()) + } + + pub async fn build_raw<'a, T>( + &mut self, + ns: &DynNamespace, + scoped_fs: &ScopedFilesystem<'a, T>, + relay_chain_id: Option, + ) -> Result<(), GeneratorError> + where + T: FileSystem, + { + warn!("Building raw version from {:?}", self); + // raw path already set, no more work to do here... + let None = self.raw_path else { + return Ok(()); + }; + + // expected raw path + let raw_spec_path = PathBuf::from(format!("{}.json", self.chain_spec_name)); + + match self + .try_build_raw_with_generic(scoped_fs, relay_chain_id.clone(), raw_spec_path.as_path()) + .await + { + Ok(_) => return Ok(()), + Err(err) => { + if Self::should_retry_with_command(&err) && self.command.is_some() { + warn!( + "GenericChainSpec raw generation failed ({}). Falling back to command execution.", + err + ); + } else { + return Err(err); + } + }, + } + + self.build_raw_with_command(ns, scoped_fs, raw_spec_path, relay_chain_id) + .await?; + + Ok(()) + } + + async fn try_build_raw_with_generic<'a, T>( + &mut self, + scoped_fs: &ScopedFilesystem<'a, T>, + relay_chain_id: Option, + raw_spec_path: &Path, + ) -> Result<(), GeneratorError> + where + T: FileSystem, + { + // `build_raw` is always called after `build`, so `maybe_plain_path` must be set at this point + let (json_content, _) = self.read_spec(scoped_fs).await?; + let json_bytes: Vec = json_content.as_bytes().into(); + let chain_spec = GenericChainSpec::<()>::from_json_bytes(json_bytes).map_err(|e| { + GeneratorError::ChainSpecGeneration(format!( + "Error loading chain-spec from json_bytes, err: {e}" + )) + })?; + + self.raw_path = Some(raw_spec_path.to_path_buf()); + let contents = chain_spec.as_json(true).map_err(|e| { + GeneratorError::ChainSpecGeneration(format!( + "getting chain-spec as json should work, err: {e}" + )) + })?; + let contents = self + .ensure_para_fields_in_raw(&contents, relay_chain_id) + .await?; + self.write_spec(scoped_fs, contents).await?; + + Ok(()) + } + + async fn build_raw_with_command<'a, T>( + &mut self, + ns: &DynNamespace, + scoped_fs: &ScopedFilesystem<'a, T>, + raw_spec_path: PathBuf, + relay_chain_id: Option, + ) -> Result<(), GeneratorError> + where + T: FileSystem, + { + // fallback to use _cmd_ for raw creation + let temp_name = format!( + "temp-build-raw-{}-{}", + self.chain_spec_name, + rand::random::() + ); + + let cmd = self + .command + .as_ref() + .ok_or(GeneratorError::ChainSpecGeneration( + "Invalid command".into(), + ))?; + let maybe_plain_path = + self.maybe_plain_path + .as_ref() + .ok_or(GeneratorError::ChainSpecGeneration( + "Invalid plain path".into(), + ))?; + + // TODO: we should get the full path from the scoped filesystem + let chain_spec_path_local = format!( + "{}/{}", + ns.base_dir().to_string_lossy(), + maybe_plain_path.display() + ); + // Remote path to be injected + let chain_spec_path_in_pod = format!("{}/{}", NODE_CONFIG_DIR, maybe_plain_path.display()); + // Path in the context of the node, this can be different in the context of the providers (e.g native) + let chain_spec_path_in_args = if matches!(self.command, Some(CommandInContext::Local(_, _))) + { + chain_spec_path_local.clone() + } else if ns.capabilities().prefix_with_full_path { + // In native + format!( + "{}/{}{}", + ns.base_dir().to_string_lossy(), + &temp_name, + &chain_spec_path_in_pod + ) + } else { + chain_spec_path_in_pod.clone() + }; + + let mut full_cmd = apply_replacements( + cmd.cmd(), + &HashMap::from([("chainName", chain_spec_path_in_args.as_str())]), + ); + + if !full_cmd.contains("--raw") { + full_cmd = format!("{full_cmd} --raw"); + } + trace!("full_cmd: {:?}", full_cmd); + + let parts: Vec<&str> = full_cmd.split_whitespace().collect(); + let Some((cmd, args)) = parts.split_first() else { + return Err(GeneratorError::ChainSpecGeneration(format!( + "Invalid generator command: {full_cmd}" + ))); + }; + trace!("cmd: {:?} - args: {:?}", cmd, args); + + let generate_command = GenerateFileCommand::new(cmd, raw_spec_path.clone()).args(args); + + if let Some(cmd) = &self.command { + match cmd { + CommandInContext::Local(_, expected_path) => { + build_locally(generate_command, scoped_fs, expected_path.as_deref()).await? + }, + CommandInContext::Remote(_, expected_path) => { + let options = GenerateFilesOptions::with_files( + vec![generate_command], + self.image.clone(), + &[TransferedFile::new( + chain_spec_path_local, + chain_spec_path_in_pod, + )], + expected_path.clone(), + ) + .temp_name(temp_name); + trace!("calling generate_files with options: {:#?}", options); + ns.generate_files(options).await?; + }, + } + } + + self.raw_path = Some(raw_spec_path.clone()); + let (content, _) = self.read_spec(scoped_fs).await?; + let content = self + .ensure_para_fields_in_raw(&content, relay_chain_id) + .await?; + self.write_spec(scoped_fs, content).await?; + + Ok(()) + } + + async fn ensure_para_fields_in_raw( + &mut self, + content: &str, + relay_chain_id: Option, + ) -> Result { + if let Context::Para { + relay_chain: _, + para_id, + } = &self.context + { + let mut chain_spec_json: serde_json::Value = + serde_json::from_str(content).map_err(|e| { + GeneratorError::ChainSpecGeneration(format!( + "getting chain-spec as json should work, err: {e}" + )) + })?; + + let mut needs_write = false; + + if chain_spec_json["relay_chain"].is_null() { + chain_spec_json["relay_chain"] = json!(relay_chain_id); + needs_write = true; + } + + if chain_spec_json["para_id"].is_null() { + chain_spec_json["para_id"] = json!(para_id); + needs_write = true; + } + + if needs_write { + let contents = serde_json::to_string_pretty(&chain_spec_json).map_err(|e| { + GeneratorError::ChainSpecGeneration(format!( + "getting chain-spec json as pretty string should work, err: {e}" + )) + })?; + return Ok(contents); + } + } + + Ok(content.to_string()) + } + + fn should_retry_with_command(err: &GeneratorError) -> bool { + match err { + GeneratorError::ChainSpecGeneration(msg) => { + let msg_lower = msg.to_lowercase(); + msg_lower.contains("genesisbuilder_get_preset") || msg_lower.contains("_get_preset") + }, + _ => false, + } + } + + /// Override the :code in chain-spec raw version + pub async fn override_code<'a, T>( + &mut self, + scoped_fs: &ScopedFilesystem<'a, T>, + wasm_override: &AssetLocation, + ) -> Result<(), GeneratorError> + where + T: FileSystem, + { + // first ensure we have the raw version of the chain-spec + let Some(_) = self.raw_path else { + return Err(GeneratorError::OverridingWasm(String::from( + "Raw path should be set at this point.", + ))); + }; + let (content, _) = self.read_spec(scoped_fs).await?; + // read override wasm + let override_content = wasm_override.get_asset().await.map_err(|_| { + GeneratorError::OverridingWasm(format!( + "Can not get asset to override wasm, asset: {wasm_override}" + )) + })?; + + // read spec to json value + let mut chain_spec_json: serde_json::Value = + serde_json::from_str(&content).map_err(|_| { + GeneratorError::ChainSpecGeneration("Can not parse chain-spec as json".into()) + })?; + + // override :code + let Some(code) = chain_spec_json.pointer_mut("/genesis/raw/top/0x3a636f6465") else { + return Err(GeneratorError::OverridingWasm(String::from( + "Pointer '/genesis/raw/top/0x3a636f6465' should be valid in the raw spec.", + ))); + }; + + info!( + "🖋 Overriding ':code' (0x3a636f6465) in raw chain-spec with content of {}", + wasm_override + ); + *code = json!(format!("0x{}", hex::encode(override_content))); + + let overrided_content = serde_json::to_string_pretty(&chain_spec_json).map_err(|_| { + GeneratorError::ChainSpecGeneration("can not parse chain-spec value as json".into()) + })?; + // save it + self.write_spec(scoped_fs, overrided_content).await?; + + Ok(()) + } + + pub async fn override_raw_spec<'a, T>( + &mut self, + scoped_fs: &ScopedFilesystem<'a, T>, + raw_spec_overrides: &JsonOverrides, + ) -> Result<(), GeneratorError> + where + T: FileSystem, + { + // first ensure we have the raw version of the chain-spec + let Some(_) = self.raw_path else { + return Err(GeneratorError::OverridingRawSpec(String::from( + "Raw path should be set at this point.", + ))); + }; + + let (content, _) = self.read_spec(scoped_fs).await?; + + // read overrides to json value + let override_content: serde_json::Value = raw_spec_overrides.get().await.map_err(|_| { + GeneratorError::OverridingRawSpec(format!( + "Can not parse raw_spec_override contents as json: {raw_spec_overrides}" + )) + })?; + + // read spec to json value + let mut chain_spec_json: serde_json::Value = + serde_json::from_str(&content).map_err(|_| { + GeneratorError::ChainSpecGeneration("Can not parse chain-spec as json".into()) + })?; + + // merge overrides with existing spec + merge(&mut chain_spec_json, &override_content); + + // save changes + let overrided_content = serde_json::to_string_pretty(&chain_spec_json).map_err(|_| { + GeneratorError::ChainSpecGeneration("can not parse chain-spec value as json".into()) + })?; + self.write_spec(scoped_fs, overrided_content).await?; + + Ok(()) + } + + pub fn raw_path(&self) -> Option<&Path> { + self.raw_path.as_deref() + } + + pub fn set_asset_location(&mut self, location: AssetLocation) { + self.asset_location = Some(location) + } + + pub async fn read_chain_id<'a, T>( + &self, + scoped_fs: &ScopedFilesystem<'a, T>, + ) -> Result + where + T: FileSystem, + { + let (content, _) = self.read_spec(scoped_fs).await?; + ChainSpec::chain_id_from_spec(&content) + } + + async fn read_spec<'a, T>( + &self, + scoped_fs: &ScopedFilesystem<'a, T>, + ) -> Result<(String, ChainSpecFormat), GeneratorError> + where + T: FileSystem, + { + let (path, format) = match (self.maybe_plain_path.as_ref(), self.raw_path.as_ref()) { + (Some(path), None) => (path, ChainSpecFormat::Plain), + (None, Some(path)) => (path, ChainSpecFormat::Raw), + (Some(_), Some(path)) => { + // if we have both paths return the raw + (path, ChainSpecFormat::Raw) + }, + (None, None) => unreachable!(), + }; + + let content = scoped_fs.read_to_string(path.clone()).await.map_err(|_| { + GeneratorError::ChainSpecGeneration(format!( + "Can not read chain-spec from {}", + path.to_string_lossy() + )) + })?; + + Ok((content, format)) + } + + async fn write_spec<'a, T>( + &self, + scoped_fs: &ScopedFilesystem<'a, T>, + content: impl Into, + ) -> Result<(), GeneratorError> + where + T: FileSystem, + { + let (path, _format) = match (self.maybe_plain_path.as_ref(), self.raw_path.as_ref()) { + (Some(path), None) => (path, ChainSpecFormat::Plain), + (None, Some(path)) => (path, ChainSpecFormat::Raw), + (Some(_), Some(path)) => { + // if we have both paths return the raw + (path, ChainSpecFormat::Raw) + }, + (None, None) => unreachable!(), + }; + + scoped_fs.write(path, content.into()).await.map_err(|_| { + GeneratorError::ChainSpecGeneration(format!( + "Can not write chain-spec from {}", + path.to_string_lossy() + )) + })?; + + Ok(()) + } + + // TODO: (javier) move this fns to state aware + pub async fn customize_para<'a, T>( + &self, + para: &TeyrchainSpec, + relay_chain_id: &str, + scoped_fs: &ScopedFilesystem<'a, T>, + ) -> Result<(), GeneratorError> + where + T: FileSystem, + { + let (content, format) = self.read_spec(scoped_fs).await?; + let mut chain_spec_json: serde_json::Value = + serde_json::from_str(&content).map_err(|_| { + GeneratorError::ChainSpecGeneration("Can not parse chain-spec as json".into()) + })?; + + if let Some(para_id) = chain_spec_json.get_mut("para_id") { + *para_id = json!(para.id); + }; + if let Some(para_id) = chain_spec_json.get_mut("paraId") { + *para_id = json!(para.id); + }; + + if let Some(relay_chain_id_field) = chain_spec_json.get_mut("relay_chain") { + *relay_chain_id_field = json!(relay_chain_id); + }; + + if let ChainSpecFormat::Plain = format { + let pointer = get_runtime_config_pointer(&chain_spec_json) + .map_err(GeneratorError::ChainSpecGeneration)?; + + // make genesis overrides first. + if let Some(overrides) = ¶.genesis_overrides { + let percolated_overrides = percolate_overrides(&pointer, overrides) + .map_err(|e| GeneratorError::ChainSpecGeneration(e.to_string()))?; + if let Some(genesis) = chain_spec_json.pointer_mut(&pointer) { + merge(genesis, percolated_overrides); + } + } + + clear_authorities(&pointer, &mut chain_spec_json, &self.context); + + let key_type_to_use = if para.is_evm_based { + SessionKeyType::Evm + } else { + SessionKeyType::Default + }; + + // Get validators to add as authorities + let validators: Vec<&NodeSpec> = para + .collators + .iter() + .filter(|node| node.is_validator) + .collect(); + + // check chain key types + if chain_spec_json + .pointer(&format!("{pointer}/session")) + .is_some() + { + add_authorities(&pointer, &mut chain_spec_json, &validators, key_type_to_use); + } else if chain_spec_json + .pointer(&format!("{pointer}/aura")) + .is_some() + { + add_aura_authorities(&pointer, &mut chain_spec_json, &validators, KeyType::Aura); + } else { + warn!("Can't customize keys, not `session` or `aura` find in the chain-spec file"); + }; + + // Add nodes to collator + let invulnerables: Vec<&NodeSpec> = para + .collators + .iter() + .filter(|node| node.is_invulnerable) + .collect(); + + add_collator_selection( + &pointer, + &mut chain_spec_json, + &invulnerables, + key_type_to_use, + ); + + // override `parachainInfo/parachainId` + override_parachain_info(&pointer, &mut chain_spec_json, para.id); + + // write spec + let content = serde_json::to_string_pretty(&chain_spec_json).map_err(|_| { + GeneratorError::ChainSpecGeneration("can not parse chain-spec value as json".into()) + })?; + self.write_spec(scoped_fs, content).await?; + } else { + warn!("⚠️ Chain spec for para_id: {} is in raw mode", para.id); + } + Ok(()) + } + + pub async fn customize_relay<'a, T, U>( + &self, + relaychain: &RelaychainSpec, + hrmp_channels: &[HrmpChannelConfig], + para_artifacts: Vec>, + scoped_fs: &ScopedFilesystem<'a, T>, + ) -> Result<(), GeneratorError> + where + T: FileSystem, + U: AsRef, + { + let (content, format) = self.read_spec(scoped_fs).await?; + let mut chain_spec_json: serde_json::Value = + serde_json::from_str(&content).map_err(|_| { + GeneratorError::ChainSpecGeneration("Can not parse chain-spec as json".into()) + })?; + + if let ChainSpecFormat::Plain = format { + // get the tokenDecimals property or set the default (12) + let token_decimals = + if let Some(val) = chain_spec_json.pointer("/properties/tokenDecimals") { + let val = val.as_u64().unwrap_or(12); + if val > u8::MAX as u64 { + 12 + } else { + val as u8 + } + } else { + 12 + }; + // get the config pointer + let pointer = get_runtime_config_pointer(&chain_spec_json) + .map_err(GeneratorError::ChainSpecGeneration)?; + + // make genesis overrides first. + if let Some(overrides) = &relaychain.runtime_genesis_patch { + let percolated_overrides = percolate_overrides(&pointer, overrides) + .map_err(|e| GeneratorError::ChainSpecGeneration(e.to_string()))?; + if let Some(patch_section) = chain_spec_json.pointer_mut(&pointer) { + merge(patch_section, percolated_overrides); + } + } + + // get min stake (to store if neede later) + let staking_min = get_staking_min(&pointer, &mut chain_spec_json); + + // Clear authorities + clear_authorities(&pointer, &mut chain_spec_json, &self.context); + + // add balances + add_balances( + &pointer, + &mut chain_spec_json, + &relaychain.nodes, + token_decimals, + staking_min, + ); + + // add staking + add_staking( + &pointer, + &mut chain_spec_json, + &relaychain.nodes, + staking_min, + ); + + // Get validators to add as authorities + let validators: Vec<&NodeSpec> = relaychain + .nodes + .iter() + .filter(|node| node.is_validator) + .collect(); + + // check chain key types + if chain_spec_json + .pointer(&format!("{pointer}/session")) + .is_some() + { + add_authorities( + &pointer, + &mut chain_spec_json, + &validators, + SessionKeyType::Stash, + ); + } else { + add_aura_authorities(&pointer, &mut chain_spec_json, &validators, KeyType::Aura); + add_grandpa_authorities(&pointer, &mut chain_spec_json, &validators, KeyType::Aura); + } + + // staking && nominators + + if !hrmp_channels.is_empty() { + add_hrmp_channels(&pointer, &mut chain_spec_json, hrmp_channels); + } + + // paras + for para_genesis_config in para_artifacts.iter() { + add_parachain_to_genesis( + &pointer, + &mut chain_spec_json, + para_genesis_config, + scoped_fs, + ) + .await + .map_err(|e| GeneratorError::ChainSpecGeneration(e.to_string()))?; + } + + // TODO: + // - staking + // - nominators + + // write spec + let content = serde_json::to_string_pretty(&chain_spec_json).map_err(|_| { + GeneratorError::ChainSpecGeneration("can not parse chain-spec value as json".into()) + })?; + self.write_spec(scoped_fs, content).await?; + } else { + warn!( + "⚠️ Chain Spec for chain {} is in raw mode, can't customize.", + self.chain_spec_name + ); + } + Ok(()) + } + + pub async fn add_bootnodes<'a, T>( + &self, + scoped_fs: &ScopedFilesystem<'a, T>, + bootnodes: &[String], + ) -> Result<(), GeneratorError> + where + T: FileSystem, + { + let (content, _) = self.read_spec(scoped_fs).await?; + let mut chain_spec_json: serde_json::Value = + serde_json::from_str(&content).map_err(|_| { + GeneratorError::ChainSpecGeneration("Can not parse chain-spec as json".into()) + })?; + + if let Some(bootnodes_on_file) = chain_spec_json.get_mut("bootNodes") { + if let Some(bootnodes_on_file) = bootnodes_on_file.as_array_mut() { + let mut bootnodes_to_add = + bootnodes.iter().map(|bootnode| json!(bootnode)).collect(); + bootnodes_on_file.append(&mut bootnodes_to_add); + } else { + return Err(GeneratorError::ChainSpecGeneration( + "id should be an string in the chain-spec, this is a bug".into(), + )); + }; + } else { + return Err(GeneratorError::ChainSpecGeneration( + "'bootNodes' should be a fields in the chain-spec of the relaychain".into(), + )); + }; + + // write spec + let content = serde_json::to_string_pretty(&chain_spec_json).map_err(|_| { + GeneratorError::ChainSpecGeneration("can not parse chain-spec value as json".into()) + })?; + self.write_spec(scoped_fs, content).await?; + + Ok(()) + } + + /// Get the chain_is from the json content of a chain-spec file. + pub fn chain_id_from_spec(spec_content: &str) -> Result { + let chain_spec_json: serde_json::Value = + serde_json::from_str(spec_content).map_err(|_| { + GeneratorError::ChainSpecGeneration("Can not parse chain-spec as json".into()) + })?; + if let Some(chain_id) = chain_spec_json.get("id") { + if let Some(chain_id) = chain_id.as_str() { + Ok(chain_id.to_string()) + } else { + Err(GeneratorError::ChainSpecGeneration( + "id should be an string in the chain-spec, this is a bug".into(), + )) + } + } else { + Err(GeneratorError::ChainSpecGeneration( + "'id' should be a fields in the chain-spec of the relaychain".into(), + )) + } + } +} + +type GenesisNodeKey = (String, String, HashMap); + +async fn build_locally<'a, T>( + generate_command: GenerateFileCommand, + scoped_fs: &ScopedFilesystem<'a, T>, + maybe_output: Option<&Path>, +) -> Result<(), GeneratorError> +where + T: FileSystem, +{ + // generate_command. + + let result = Command::new(generate_command.program.clone()) + .args(generate_command.args.clone()) + .output() + .await + .map_err(|err| { + GeneratorError::ChainSpecGeneration(format!( + "Error running cmd: {} args: {}, err: {}", + &generate_command.program, + &generate_command.args.join(" "), + err + )) + })?; + + if result.status.success() { + let raw_output = if let Some(output_path) = maybe_output { + tokio::fs::read(output_path).await.map_err(|err| { + GeneratorError::ChainSpecGeneration(format!( + "Error reading output file at {}: {}", + output_path.display(), + err + )) + })? + } else { + result.stdout + }; + scoped_fs + .write( + generate_command.local_output_path, + String::from_utf8_lossy(&raw_output).to_string(), + ) + .await?; + Ok(()) + } else { + Err(GeneratorError::ChainSpecGeneration(format!( + "Error running cmd: {} args: {}, err: {}", + &generate_command.program, + &generate_command.args.join(" "), + String::from_utf8_lossy(&result.stderr) + ))) + } +} + +async fn is_raw<'a, T>( + file: PathBuf, + scoped_fs: &ScopedFilesystem<'a, T>, +) -> Result +where + T: FileSystem, +{ + let content = scoped_fs.read_to_string(file).await?; + let chain_spec_json: serde_json::Value = serde_json::from_str(&content).unwrap(); + + Ok(chain_spec_json.pointer("/genesis/raw/top").is_some()) +} + +// Internal Chain-spec customizations + +async fn add_parachain_to_genesis<'a, T, U>( + runtime_config_ptr: &str, + chain_spec_json: &mut serde_json::Value, + para_genesis_config: &ParaGenesisConfig, + scoped_fs: &ScopedFilesystem<'a, T>, +) -> Result<(), anyhow::Error> +where + T: FileSystem, + U: AsRef, +{ + if let Some(val) = chain_spec_json.pointer_mut(runtime_config_ptr) { + let paras_pointer = if val.get("paras").is_some() { + "/paras/paras" + } else if val.get("parachainsParas").is_some() { + // For retro-compatibility with substrate pre Polkadot 0.9.5 + "/parachainsParas/paras" + } else { + // The config may not contain paras. Since chainspec allows to contain the RuntimeGenesisConfig patch we can inject it. + val["paras"] = json!({ "paras": [] }); + "/paras/paras" + }; + + let paras = val + .pointer_mut(paras_pointer) + .ok_or(anyhow!("paras pointer should be valid {paras_pointer:?} "))?; + let paras_vec = paras + .as_array_mut() + .ok_or(anyhow!("paras should be an array"))?; + + let head = scoped_fs + .read_to_string(para_genesis_config.state_path.as_ref()) + .await?; + let wasm = scoped_fs + .read_to_string(para_genesis_config.wasm_path.as_ref()) + .await?; + + paras_vec.push(json!([ + para_genesis_config.id, + [head.trim(), wasm.trim(), para_genesis_config.as_parachain] + ])); + + Ok(()) + } else { + unreachable!("pointer to runtime config should be valid!") + } +} + +fn get_runtime_config_pointer(chain_spec_json: &serde_json::Value) -> Result { + // runtime_genesis_config is no longer in ChainSpec after rococo runtime rework (refer to: https://github.com/paritytech/polkadot-sdk/pull/1256) + // ChainSpec may contain a RuntimeGenesisConfigPatch + let pointers = [ + "/genesis/runtimeGenesis/config", + "/genesis/runtimeGenesis/patch", + "/genesis/runtimeGenesisConfigPatch", + "/genesis/runtime/runtime_genesis_config", + "/genesis/runtime", + ]; + + for pointer in pointers { + if chain_spec_json.pointer(pointer).is_some() { + return Ok(pointer.to_string()); + } + } + + Err("Can not find the runtime pointer".into()) +} + +fn percolate_overrides<'a>( + pointer: &str, + overrides: &'a serde_json::Value, +) -> Result<&'a serde_json::Value, anyhow::Error> { + let pointer_parts = pointer.split('/').collect::>(); + trace!("pointer_parts: {pointer_parts:?}"); + + let top_level = overrides + .as_object() + .ok_or_else(|| anyhow!("Overrides must be an object"))?; + let top_level_key = top_level + .keys() + .next() + .ok_or_else(|| anyhow!("Invalid override value: {overrides:?}"))?; + trace!("top_level_key: {top_level_key}"); + let index = pointer_parts.iter().position(|x| *x == top_level_key); + let Some(i) = index else { + warn!("Top level key '{top_level_key}' isn't part of the pointer ({pointer}), returning without percolating"); + return Ok(overrides); + }; + + let p = if i == pointer_parts.len() - 1 { + // top level key is at end of the pointer + let p = format!("/{}", pointer_parts[i]); + trace!("overrides pointer {p}"); + p + } else { + // example: pointer is `/genesis/runtimeGenesis/patch` and the overrides start at `runtimeGenesis` + let p = format!("/{}", pointer_parts[i..].join("/")); + trace!("overrides pointer {p}"); + p + }; + let overrides_to_use = overrides + .pointer(&p) + .ok_or_else(|| anyhow!("Invalid override value: {overrides:?}"))?; + Ok(overrides_to_use) +} + +#[allow(dead_code)] +fn construct_runtime_pointer_from_overrides( + overrides: &serde_json::Value, +) -> Result { + if overrides.get("genesis").is_some() { + // overrides already start with /genesis + return Ok("/genesis".into()); + } else { + // check if we are one level inner + if let Some(top_level) = overrides.as_object() { + let k = top_level + .keys() + .next() + .ok_or_else(|| anyhow!("Invalid override value: {overrides:?}"))?; + match k.as_str() { + "runtimeGenesisConfigPatch" | "runtime" | "runtimeGenesis" => { + return Ok(("/genesis").into()) + }, + "config" | "path" => { + return Ok(("/genesis/runtimeGenesis").into()); + }, + "runtime_genesis_config" => { + return Ok(("/genesis/runtime").into()); + }, + _ => {}, + } + } + } + + Err(anyhow!("Can not find the runtime pointer")) +} + +// Merge `patch_section` with `overrides`. +fn merge(patch_section: &mut serde_json::Value, overrides: &serde_json::Value) { + trace!("patch: {:?}", patch_section); + trace!("overrides: {:?}", overrides); + if let (Some(genesis_obj), Some(overrides_obj)) = + (patch_section.as_object_mut(), overrides.as_object()) + { + for overrides_key in overrides_obj.keys() { + trace!("overrides_key: {:?}", overrides_key); + // we only want to override keys present in the genesis object + if let Some(genesis_value) = genesis_obj.get_mut(overrides_key) { + match (&genesis_value, overrides_obj.get(overrides_key)) { + // recurse if genesis value is an object + (serde_json::Value::Object(_), Some(overrides_value)) + if overrides_value.is_object() => + { + merge(genesis_value, overrides_value); + }, + // override if genesis value not an object + (_, Some(overrides_value)) => { + trace!("overriding: {:?} / {:?}", genesis_value, overrides_value); + *genesis_value = overrides_value.clone(); + }, + _ => { + trace!("not match!"); + }, + } + } else { + // Allow to add keys, see (https://github.com/paritytech/zombienet/issues/1614) + warn!( + "key: {overrides_key} not present in genesis_obj: {:?} (adding key)", + genesis_obj + ); + let overrides_value = overrides_obj.get(overrides_key).expect(&format!( + "overrides_key {overrides_key} should be present in the overrides obj. qed" + )); + genesis_obj.insert(overrides_key.clone(), overrides_value.clone()); + } + } + } +} + +fn clear_authorities( + runtime_config_ptr: &str, + chain_spec_json: &mut serde_json::Value, + ctx: &Context, +) { + if let Some(val) = chain_spec_json.pointer_mut(runtime_config_ptr) { + // clear keys (session, aura, grandpa) + if val.get("session").is_some() { + val["session"]["keys"] = json!([]); + } + + if val.get("aura").is_some() { + val["aura"]["authorities"] = json!([]); + } + + if val.get("grandpa").is_some() { + val["grandpa"]["authorities"] = json!([]); + } + + // clear collatorSelector + if val.get("collatorSelection").is_some() { + val["collatorSelection"]["invulnerables"] = json!([]); + } + + // clear staking but not `validatorCount` if `devStakers` is set + if val.get("staking").is_some() && ctx == &Context::Relay { + val["staking"]["invulnerables"] = json!([]); + val["staking"]["stakers"] = json!([]); + + if val["staking"]["devStakers"] == json!(null) { + val["staking"]["validatorCount"] = json!(0); + } + } + } else { + unreachable!("pointer to runtime config should be valid!") + } +} + +fn get_staking_min(runtime_config_ptr: &str, chain_spec_json: &mut serde_json::Value) -> u128 { + // get min staking + let staking_ptr = format!("{runtime_config_ptr}/staking/stakers"); + if let Some(stakers) = chain_spec_json.pointer(&staking_ptr) { + // stakers should be an array + let min = stakers[0][2].clone(); + min.as_u64().unwrap_or(0).into() + } else { + 0 + } +} + +fn add_balances( + runtime_config_ptr: &str, + chain_spec_json: &mut serde_json::Value, + nodes: &Vec, + token_decimals: u8, + staking_min: u128, +) { + if let Some(val) = chain_spec_json.pointer_mut(runtime_config_ptr) { + let Some(balances) = val.pointer("/balances/balances") else { + // should be a info log + warn!("NO 'balances' key in runtime config, skipping..."); + return; + }; + + // create a balance map + let mut balances_map = generate_balance_map(balances); + for node in nodes { + if node.initial_balance.eq(&0) { + continue; + }; + + // TODO: handle error here and check the `accounts.accounts` design + // Double down the minimal stake defined + let balance = std::cmp::max(node.initial_balance, staking_min * 2); + for k in ["sr", "sr_stash"] { + let account = node.accounts.accounts.get(k).unwrap(); + balances_map.insert(account.address.clone(), balance); + } + } + + // ensure zombie account (//Zombie) have funds + // we will use for internal usage (e.g new validators) + balances_map.insert( + "5FTcLfwFc7ctvqp3RhbEig6UuHLHcHVRujuUm8r21wy4dAR8".to_string(), + 1000 * 10_u128.pow(token_decimals as u32), + ); + + // convert the map and store again + let new_balances: Vec<(&String, &u128)> = + balances_map.iter().collect::>(); + + val["balances"]["balances"] = json!(new_balances); + } else { + unreachable!("pointer to runtime config should be valid!") + } +} + +fn get_node_keys( + node: &NodeSpec, + session_key: SessionKeyType, + asset_hub_polkadot: bool, +) -> GenesisNodeKey { + let sr_account = node.accounts.accounts.get("sr").unwrap(); + let sr_stash = node.accounts.accounts.get("sr_stash").unwrap(); + let ed_account = node.accounts.accounts.get("ed").unwrap(); + let ec_account = node.accounts.accounts.get("ec").unwrap(); + let eth_account = node.accounts.accounts.get("eth").unwrap(); + let mut keys = HashMap::new(); + for k in [ + "babe", + "im_online", + "parachain_validator", + "authority_discovery", + "para_validator", + "para_assignment", + "aura", + "nimbus", + "vrf", + ] { + if k == "aura" && asset_hub_polkadot { + keys.insert(k.to_string(), ed_account.address.clone()); + continue; + } + keys.insert(k.to_string(), sr_account.address.clone()); + } + + keys.insert("grandpa".to_string(), ed_account.address.clone()); + keys.insert("beefy".to_string(), ec_account.address.clone()); + keys.insert("eth".to_string(), eth_account.public_key.clone()); + + let account_to_use = match session_key { + SessionKeyType::Default => sr_account.address.clone(), + SessionKeyType::Stash => sr_stash.address.clone(), + SessionKeyType::Evm => format!("0x{}", eth_account.public_key), + }; + + (account_to_use.clone(), account_to_use, keys) +} +fn add_authorities( + runtime_config_ptr: &str, + chain_spec_json: &mut serde_json::Value, + nodes: &[&NodeSpec], + session_key: SessionKeyType, +) { + let asset_hub_polkadot = chain_spec_json + .get("id") + .and_then(|v| v.as_str()) + .map(|id| id.starts_with("asset-hub-polkadot")) + .unwrap_or_default(); + if let Some(val) = chain_spec_json.pointer_mut(runtime_config_ptr) { + if let Some(session_keys) = val.pointer_mut("/session/keys") { + let keys: Vec = nodes + .iter() + .map(|node| get_node_keys(node, session_key, asset_hub_polkadot)) + .collect(); + *session_keys = json!(keys); + } else { + warn!("⚠️ 'session/keys' key not present in runtime config."); + } + } else { + unreachable!("pointer to runtime config should be valid!") + } +} +fn add_hrmp_channels( + runtime_config_ptr: &str, + chain_spec_json: &mut serde_json::Value, + hrmp_channels: &[HrmpChannelConfig], +) { + if let Some(val) = chain_spec_json.pointer_mut(runtime_config_ptr) { + if let Some(preopen_hrmp_channels) = val.pointer_mut("/hrmp/preopenHrmpChannels") { + let hrmp_channels = hrmp_channels + .iter() + .map(|c| { + ( + c.sender(), + c.recipient(), + c.max_capacity(), + c.max_message_size(), + ) + }) + .collect::>(); + *preopen_hrmp_channels = json!(hrmp_channels); + } else { + warn!("⚠️ 'hrmp/preopenHrmpChannels' key not present in runtime config."); + } + } else { + unreachable!("pointer to runtime config should be valid!") + } +} + +fn add_aura_authorities( + runtime_config_ptr: &str, + chain_spec_json: &mut serde_json::Value, + nodes: &[&NodeSpec], + _key_type: KeyType, +) { + if let Some(val) = chain_spec_json.pointer_mut(runtime_config_ptr) { + if let Some(aura_authorities) = val.pointer_mut("/aura/authorities") { + let keys: Vec = nodes + .iter() + .map(|node| { + node.accounts + .accounts + .get("sr") + .expect(&format!( + "'sr' account should be set at spec computation {THIS_IS_A_BUG}" + )) + .address + .clone() + }) + .collect(); + *aura_authorities = json!(keys); + } else { + warn!("⚠️ 'aura/authorities' key not present in runtime config."); + } + } else { + unreachable!("pointer to runtime config should be valid!") + } +} + +fn add_grandpa_authorities( + runtime_config_ptr: &str, + chain_spec_json: &mut serde_json::Value, + nodes: &[&NodeSpec], + _key_type: KeyType, +) { + if let Some(val) = chain_spec_json.pointer_mut(runtime_config_ptr) { + if let Some(grandpa_authorities) = val.pointer_mut("/grandpa/authorities") { + let keys: Vec<(String, usize)> = nodes + .iter() + .map(|node| { + ( + node.accounts + .accounts + .get("ed") + .expect(&format!( + "'ed' account should be set at spec computation {THIS_IS_A_BUG}" + )) + .address + .clone(), + 1, + ) + }) + .collect(); + *grandpa_authorities = json!(keys); + } else { + warn!("⚠️ 'grandpa/authorities' key not present in runtime config."); + } + } else { + unreachable!("pointer to runtime config should be valid!") + } +} + +fn add_staking( + runtime_config_ptr: &str, + chain_spec_json: &mut serde_json::Value, + nodes: &Vec, + staking_min: u128, +) { + if let Some(val) = chain_spec_json.pointer_mut(runtime_config_ptr) { + let Some(_) = val.pointer("/staking") else { + // should be a info log + warn!("NO 'staking' key in runtime config, skipping..."); + return; + }; + + let mut stakers = vec![]; + let mut invulnerables = vec![]; + for node in nodes { + let sr_stash_addr = &node + .accounts + .accounts + .get("sr_stash") + .expect("'sr_stash account should be defined for the node. qed") + .address; + stakers.push(json!([ + sr_stash_addr, + sr_stash_addr, + staking_min, + "Validator" + ])); + + if node.is_invulnerable { + invulnerables.push(sr_stash_addr); + } + } + + val["staking"]["validatorCount"] = json!(stakers.len()); + val["staking"]["stakers"] = json!(stakers); + val["staking"]["invulnerables"] = json!(invulnerables); + } else { + unreachable!("pointer to runtime config should be valid!") + } +} + +// TODO: (team) +// fn add_nominators() {} + +// // TODO: (team) we should think a better way to use the decorators from +// // current version (ts). +// fn para_custom() { todo!() } +fn override_parachain_info( + runtime_config_ptr: &str, + chain_spec_json: &mut serde_json::Value, + para_id: u32, +) { + if let Some(val) = chain_spec_json.pointer_mut(runtime_config_ptr) { + if let Some(parachain_id) = val.pointer_mut("/parachainInfo/parachainId") { + *parachain_id = json!(para_id) + } else { + // Add warning here! + } + } else { + unreachable!("pointer to runtime config should be valid!") + } +} +fn add_collator_selection( + runtime_config_ptr: &str, + chain_spec_json: &mut serde_json::Value, + nodes: &[&NodeSpec], + session_key: SessionKeyType, +) { + if let Some(val) = chain_spec_json.pointer_mut(runtime_config_ptr) { + let key_type = if let SessionKeyType::Evm = session_key { + "eth" + } else { + "sr" + }; + let keys: Vec = nodes + .iter() + .map(|node| { + node.accounts + .accounts + .get(key_type) + .expect(&format!( + "'sr' account should be set at spec computation {THIS_IS_A_BUG}" + )) + .address + .clone() + }) + .collect(); + + // collatorSelection.invulnerables + if let Some(invulnerables) = val.pointer_mut("/collatorSelection/invulnerables") { + *invulnerables = json!(keys); + } else { + // TODO: add a nice warning here. + debug!("⚠️ 'invulnerables' not present in spec, will not be customized"); + } + } else { + unreachable!("pointer to runtime config should be valid!") + } +} + +// Helpers +fn generate_balance_map(balances: &serde_json::Value) -> HashMap { + // SAFETY: balances is always an array in chain-spec with items [k,v] + let balances_map: HashMap = + serde_json::from_value::>(balances.to_owned()) + .unwrap() + .iter() + .fold(HashMap::new(), |mut memo, balance| { + memo.insert(balance.0.clone(), balance.1); + memo + }); + balances_map +} + +#[cfg(test)] +mod tests { + use std::fs; + + use configuration::HrmpChannelConfigBuilder; + + use super::*; + use crate::{generators, shared::types::NodeAccounts}; + + const ROCOCO_LOCAL_PLAIN_TESTING: &str = "./testing/rococo-local-plain.json"; + + fn chain_spec_test(file: &str) -> serde_json::Value { + let content = fs::read_to_string(file).unwrap(); + serde_json::from_str(&content).unwrap() + } + + fn chain_spec_with_stake() -> serde_json::Value { + json!({"genesis": { + "runtimeGenesis" : { + "patch": { + "staking": { + "forceEra": "NotForcing", + "invulnerables": [ + "5GNJqTPyNqANBkUVMN1LPPrxXnFouWXoe2wNSmmEoLctxiZY", + "5HpG9w8EBLe5XCrbczpwq5TSXvedjrBGCwqxK1iQ7qUsSWFc" + ], + "minimumValidatorCount": 1, + "slashRewardFraction": 100000000, + "stakers": [ + [ + "5GNJqTPyNqANBkUVMN1LPPrxXnFouWXoe2wNSmmEoLctxiZY", + "5GNJqTPyNqANBkUVMN1LPPrxXnFouWXoe2wNSmmEoLctxiZY", + 100000000000001_u128, + "Validator" + ], + [ + "5HpG9w8EBLe5XCrbczpwq5TSXvedjrBGCwqxK1iQ7qUsSWFc", + "5HpG9w8EBLe5XCrbczpwq5TSXvedjrBGCwqxK1iQ7qUsSWFc", + 100000000000000_u128, + "Validator" + ] + ], + "validatorCount": 2 + }, + } + } + }}) + } + + fn chain_spec_with_dev_stakers() -> serde_json::Value { + json!({"genesis": { + "runtimeGenesis" : { + "patch": { + "staking": { + "activeEra": [ + 0, + 0, + 0 + ], + "canceledPayout": 0, + "devStakers": [ + 2000, + 25000 + ], + "forceEra": "NotForcing", + "invulnerables": [], + "maxNominatorCount": null, + "maxValidatorCount": null, + "minNominatorBond": 0, + "minValidatorBond": 0, + "slashRewardFraction": 0, + "stakers": [], + "validatorCount": 500 + }, + } + } + }}) + } + + #[test] + fn get_min_stake_works() { + let mut chain_spec_json = chain_spec_with_stake(); + + let pointer = get_runtime_config_pointer(&chain_spec_json).unwrap(); + let min = get_staking_min(&pointer, &mut chain_spec_json); + + assert_eq!(100000000000001, min); + } + + #[test] + fn dev_stakers_not_override_count_works() { + let mut chain_spec_json = chain_spec_with_dev_stakers(); + + let pointer = get_runtime_config_pointer(&chain_spec_json).unwrap(); + clear_authorities(&pointer, &mut chain_spec_json, &Context::Relay); + + let validator_count = chain_spec_json + .pointer(&format!("{pointer}/staking/validatorCount")) + .unwrap(); + assert_eq!(validator_count, &json!(500)); + } + + #[test] + fn dev_stakers_override_count_works() { + let mut chain_spec_json = chain_spec_with_stake(); + + let pointer = get_runtime_config_pointer(&chain_spec_json).unwrap(); + clear_authorities(&pointer, &mut chain_spec_json, &Context::Relay); + + let validator_count = chain_spec_json + .pointer(&format!("{pointer}/staking/validatorCount")) + .unwrap(); + assert_eq!(validator_count, &json!(0)); + } + + #[test] + fn overrides_from_toml_works() { + use serde::{Deserialize, Serialize}; + + #[derive(Debug, Serialize, Deserialize)] + struct MockConfig { + #[serde(rename = "genesis", skip_serializing_if = "Option::is_none")] + genesis_overrides: Option, + } + + let mut chain_spec_json = chain_spec_test(ROCOCO_LOCAL_PLAIN_TESTING); + // Could also be something like [genesis.runtimeGenesis.patch.balances] + const TOML: &str = "[genesis.runtime.balances] + devAccounts = [ + 20000, + 1000000000000000000, + \"//Sender//{}\" + ]"; + let override_toml: MockConfig = toml::from_str(TOML).unwrap(); + let overrides = override_toml.genesis_overrides.unwrap(); + let pointer = get_runtime_config_pointer(&chain_spec_json).unwrap(); + + let percolated_overrides = percolate_overrides(&pointer, &overrides) + .map_err(|e| GeneratorError::ChainSpecGeneration(e.to_string())) + .unwrap(); + trace!("percolated_overrides: {:#?}", percolated_overrides); + if let Some(genesis) = chain_spec_json.pointer_mut(&pointer) { + merge(genesis, percolated_overrides); + } + + trace!("chain spec: {chain_spec_json:#?}"); + assert!(chain_spec_json + .pointer("/genesis/runtime/balances/devAccounts") + .is_some()); + } + + #[test] + fn add_balances_works() { + let mut spec_plain = chain_spec_test(ROCOCO_LOCAL_PLAIN_TESTING); + let mut name = String::from("luca"); + let initial_balance = 1_000_000_000_000_u128; + let seed = format!("//{}{name}", name.remove(0).to_uppercase()); + let accounts = NodeAccounts { + accounts: generators::generate_node_keys(&seed).unwrap(), + seed, + }; + let node = NodeSpec { + name, + accounts, + initial_balance, + ..Default::default() + }; + + let nodes = vec![node]; + add_balances("/genesis/runtime", &mut spec_plain, &nodes, 12, 0); + + let new_balances = spec_plain + .pointer("/genesis/runtime/balances/balances") + .unwrap(); + + let balances_map = generate_balance_map(new_balances); + + // sr and sr_stash keys exists + let sr = nodes[0].accounts.accounts.get("sr").unwrap(); + let sr_stash = nodes[0].accounts.accounts.get("sr_stash").unwrap(); + assert_eq!(balances_map.get(&sr.address).unwrap(), &initial_balance); + assert_eq!( + balances_map.get(&sr_stash.address).unwrap(), + &initial_balance + ); + } + + #[test] + fn add_balances_ensure_zombie_account() { + let mut spec_plain = chain_spec_test(ROCOCO_LOCAL_PLAIN_TESTING); + + let balances = spec_plain + .pointer("/genesis/runtime/balances/balances") + .unwrap(); + let balances_map = generate_balance_map(balances); + + let nodes: Vec = vec![]; + add_balances("/genesis/runtime", &mut spec_plain, &nodes, 12, 0); + + let new_balances = spec_plain + .pointer("/genesis/runtime/balances/balances") + .unwrap(); + + let new_balances_map = generate_balance_map(new_balances); + + // sr and sr_stash keys exists + assert!(new_balances_map.contains_key("5FTcLfwFc7ctvqp3RhbEig6UuHLHcHVRujuUm8r21wy4dAR8")); + assert_eq!(new_balances_map.len(), balances_map.len() + 1); + } + + #[test] + fn add_balances_spec_without_balances() { + let mut spec_plain = chain_spec_test(ROCOCO_LOCAL_PLAIN_TESTING); + + { + let balances = spec_plain.pointer_mut("/genesis/runtime/balances").unwrap(); + *balances = json!(serde_json::Value::Null); + } + + let mut name = String::from("luca"); + let initial_balance = 1_000_000_000_000_u128; + let seed = format!("//{}{name}", name.remove(0).to_uppercase()); + let accounts = NodeAccounts { + accounts: generators::generate_node_keys(&seed).unwrap(), + seed, + }; + let node = NodeSpec { + name, + accounts, + initial_balance, + ..Default::default() + }; + + let nodes = vec![node]; + add_balances("/genesis/runtime", &mut spec_plain, &nodes, 12, 0); + + let new_balances = spec_plain.pointer("/genesis/runtime/balances/balances"); + + // assert 'balances' is not created + assert_eq!(new_balances, None); + } + + #[test] + fn add_staking_works() { + let mut chain_spec_json = chain_spec_with_stake(); + let mut name = String::from("luca"); + let initial_balance = 1_000_000_000_000_u128; + let seed = format!("//{}{name}", name.remove(0).to_uppercase()); + let accounts = NodeAccounts { + accounts: generators::generate_node_keys(&seed).unwrap(), + seed, + }; + let node = NodeSpec { + name, + accounts, + initial_balance, + ..Default::default() + }; + + let pointer = get_runtime_config_pointer(&chain_spec_json).unwrap(); + let min = get_staking_min(&pointer, &mut chain_spec_json); + + let nodes = vec![node]; + add_staking(&pointer, &mut chain_spec_json, &nodes, min); + + let new_staking = chain_spec_json + .pointer("/genesis/runtimeGenesis/patch/staking") + .unwrap(); + + // stakers should be one (with the luca sr_stash accounts) + let sr_stash = nodes[0].accounts.accounts.get("sr_stash").unwrap(); + assert_eq!(new_staking["stakers"][0][0], json!(sr_stash.address)); + // with the calculated minimal bound + assert_eq!(new_staking["stakers"][0][2], json!(min)); + // and only one + assert_eq!(new_staking["stakers"].as_array().unwrap().len(), 1); + } + + #[test] + fn adding_hrmp_channels_works() { + let mut spec_plain = chain_spec_test(ROCOCO_LOCAL_PLAIN_TESTING); + + { + let current_hrmp_channels = spec_plain + .pointer("/genesis/runtime/hrmp/preopenHrmpChannels") + .unwrap(); + // assert should be empty + assert_eq!(current_hrmp_channels, &json!([])); + } + + let para_100_101 = HrmpChannelConfigBuilder::new() + .with_sender(100) + .with_recipient(101) + .build(); + let para_101_100 = HrmpChannelConfigBuilder::new() + .with_sender(101) + .with_recipient(100) + .build(); + let channels = vec![para_100_101, para_101_100]; + + add_hrmp_channels("/genesis/runtime", &mut spec_plain, &channels); + let new_hrmp_channels = spec_plain + .pointer("/genesis/runtime/hrmp/preopenHrmpChannels") + .unwrap() + .as_array() + .unwrap(); + + assert_eq!(new_hrmp_channels.len(), 2); + assert_eq!(new_hrmp_channels.first().unwrap()[0], 100); + assert_eq!(new_hrmp_channels.first().unwrap()[1], 101); + assert_eq!(new_hrmp_channels.last().unwrap()[0], 101); + assert_eq!(new_hrmp_channels.last().unwrap()[1], 100); + } + + #[test] + fn adding_hrmp_channels_to_an_spec_without_channels() { + let mut spec_plain = chain_spec_test("./testing/rococo-local-plain.json"); + + { + let hrmp = spec_plain.pointer_mut("/genesis/runtime/hrmp").unwrap(); + *hrmp = json!(serde_json::Value::Null); + } + + let para_100_101 = HrmpChannelConfigBuilder::new() + .with_sender(100) + .with_recipient(101) + .build(); + let para_101_100 = HrmpChannelConfigBuilder::new() + .with_sender(101) + .with_recipient(100) + .build(); + let channels = vec![para_100_101, para_101_100]; + + add_hrmp_channels("/genesis/runtime", &mut spec_plain, &channels); + let new_hrmp_channels = spec_plain.pointer("/genesis/runtime/hrmp/preopenHrmpChannels"); + + // assert 'preopenHrmpChannels' is not created + assert_eq!(new_hrmp_channels, None); + } + + #[test] + fn get_node_keys_works() { + let mut name = String::from("luca"); + let seed = format!("//{}{name}", name.remove(0).to_uppercase()); + let accounts = NodeAccounts { + accounts: generators::generate_node_keys(&seed).unwrap(), + seed, + }; + let node = NodeSpec { + name, + accounts, + ..Default::default() + }; + + let sr = &node.accounts.accounts["sr"]; + let keys = [ + ("babe".into(), sr.address.clone()), + ("im_online".into(), sr.address.clone()), + ("parachain_validator".into(), sr.address.clone()), + ("authority_discovery".into(), sr.address.clone()), + ("para_validator".into(), sr.address.clone()), + ("para_assignment".into(), sr.address.clone()), + ("aura".into(), sr.address.clone()), + ("nimbus".into(), sr.address.clone()), + ("vrf".into(), sr.address.clone()), + ( + "grandpa".into(), + node.accounts.accounts["ed"].address.clone(), + ), + ("beefy".into(), node.accounts.accounts["ec"].address.clone()), + ("eth".into(), node.accounts.accounts["eth"].address.clone()), + ] + .into(); + + // Stash + let sr_stash = &node.accounts.accounts["sr_stash"]; + let node_key = get_node_keys(&node, SessionKeyType::Stash, false); + assert_eq!(node_key.0, sr_stash.address); + assert_eq!(node_key.1, sr_stash.address); + assert_eq!(node_key.2, keys); + // Non-stash + let node_key = get_node_keys(&node, SessionKeyType::Default, false); + assert_eq!(node_key.0, sr.address); + assert_eq!(node_key.1, sr.address); + assert_eq!(node_key.2, keys); + } + + #[test] + fn get_node_keys_supports_asset_hub_polkadot() { + let mut name = String::from("luca"); + let seed = format!("//{}{name}", name.remove(0).to_uppercase()); + let accounts = NodeAccounts { + accounts: generators::generate_node_keys(&seed).unwrap(), + seed, + }; + let node = NodeSpec { + name, + accounts, + ..Default::default() + }; + + let node_key = get_node_keys(&node, SessionKeyType::default(), false); + assert_eq!(node_key.2["aura"], node.accounts.accounts["sr"].address); + + let node_key = get_node_keys(&node, SessionKeyType::default(), true); + assert_eq!(node_key.2["aura"], node.accounts.accounts["ed"].address); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/command.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/command.rs new file mode 100644 index 00000000..32a60cc3 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/command.rs @@ -0,0 +1,634 @@ +use configuration::types::Arg; +use support::constants::THIS_IS_A_BUG; + +use super::arg_filter::{apply_arg_removals, parse_removal_args}; +use crate::{network_spec::node::NodeSpec, shared::constants::*}; + +pub struct GenCmdOptions<'a> { + pub relay_chain_name: &'a str, + pub cfg_path: &'a str, + pub data_path: &'a str, + pub relay_data_path: &'a str, + pub use_wrapper: bool, + pub bootnode_addr: Vec, + pub use_default_ports_in_cmd: bool, + pub is_native: bool, +} + +impl Default for GenCmdOptions<'_> { + fn default() -> Self { + Self { + relay_chain_name: "rococo-local", + cfg_path: "/cfg", + data_path: "/data", + relay_data_path: "/relay-data", + use_wrapper: true, + bootnode_addr: vec![], + use_default_ports_in_cmd: false, + is_native: true, + } + } +} + +const FLAGS_ADDED_BY_US: [&str; 3] = ["--no-telemetry", "--collator", "--"]; +const OPS_ADDED_BY_US: [&str; 6] = [ + "--chain", + "--name", + "--rpc-cors", + "--rpc-methods", + "--parachain-id", + "--node-key", +]; + +// TODO: can we abstract this and use only one fn (or at least split and reuse in small fns) +pub fn generate_for_cumulus_node( + node: &NodeSpec, + options: GenCmdOptions, + para_id: u32, +) -> (String, Vec) { + let NodeSpec { + key, + args, + is_validator, + bootnodes_addresses, + .. + } = node; + + let mut tmp_args: Vec = vec!["--node-key".into(), key.clone()]; + + if !args.contains(&Arg::Flag("--prometheus-external".into())) { + tmp_args.push("--prometheus-external".into()) + } + + if *is_validator && !args.contains(&Arg::Flag("--validator".into())) { + tmp_args.push("--collator".into()) + } + + if !bootnodes_addresses.is_empty() { + tmp_args.push("--bootnodes".into()); + let bootnodes = bootnodes_addresses + .iter() + .map(|m| m.to_string()) + .collect::>() + .join(" "); + tmp_args.push(bootnodes) + } + + // ports + let (prometheus_port, rpc_port, p2p_port) = + resolve_ports(node, options.use_default_ports_in_cmd); + + tmp_args.push("--prometheus-port".into()); + tmp_args.push(prometheus_port.to_string()); + + tmp_args.push("--rpc-port".into()); + tmp_args.push(rpc_port.to_string()); + + tmp_args.push("--listen-addr".into()); + tmp_args.push(format!("/ip4/0.0.0.0/tcp/{p2p_port}/ws")); + + let mut collator_args: &[Arg] = &[]; + let mut full_node_args: &[Arg] = &[]; + if !args.is_empty() { + if let Some(index) = args.iter().position(|arg| match arg { + Arg::Flag(flag) => flag.eq("--"), + Arg::Option(..) => false, + Arg::Array(..) => false, + }) { + (collator_args, full_node_args) = args.split_at(index); + } else { + // Assume args are those specified for collator only + collator_args = args; + } + } + + // set our base path + tmp_args.push("--base-path".into()); + tmp_args.push(options.data_path.into()); + + let node_specific_bootnodes: Vec = node + .bootnodes_addresses + .iter() + .map(|b| b.to_string()) + .collect(); + let full_bootnodes = [node_specific_bootnodes, options.bootnode_addr].concat(); + if !full_bootnodes.is_empty() { + tmp_args.push("--bootnodes".into()); + tmp_args.push(full_bootnodes.join(" ")); + } + + let mut full_node_p2p_needs_to_be_injected = true; + let mut full_node_prometheus_needs_to_be_injected = true; + let mut full_node_args_filtered = full_node_args + .iter() + .filter_map(|arg| match arg { + Arg::Flag(flag) => { + if flag.starts_with("-:") || FLAGS_ADDED_BY_US.contains(&flag.as_str()) { + None + } else { + Some(vec![flag.to_owned()]) + } + }, + Arg::Option(k, v) => { + if OPS_ADDED_BY_US.contains(&k.as_str()) { + None + } else if k.eq(&"port") { + if v.eq(&"30333") { + full_node_p2p_needs_to_be_injected = true; + None + } else { + // non default + full_node_p2p_needs_to_be_injected = false; + Some(vec![k.to_owned(), v.to_owned()]) + } + } else if k.eq(&"--prometheus-port") { + if v.eq(&"9616") { + full_node_prometheus_needs_to_be_injected = true; + None + } else { + // non default + full_node_prometheus_needs_to_be_injected = false; + Some(vec![k.to_owned(), v.to_owned()]) + } + } else { + Some(vec![k.to_owned(), v.to_owned()]) + } + }, + Arg::Array(k, v) => { + let mut args = vec![k.to_owned()]; + args.extend(v.to_owned()); + Some(args) + }, + }) + .flatten() + .collect::>(); + + let full_p2p_port = node + .full_node_p2p_port + .as_ref() + .expect(&format!( + "full node p2p_port should be specifed: {THIS_IS_A_BUG}" + )) + .0; + let full_prometheus_port = node + .full_node_prometheus_port + .as_ref() + .expect(&format!( + "full node prometheus_port should be specifed: {THIS_IS_A_BUG}" + )) + .0; + + // full_node: change p2p port if is the default + if full_node_p2p_needs_to_be_injected { + full_node_args_filtered.push("--port".into()); + full_node_args_filtered.push(full_p2p_port.to_string()); + } + + // full_node: change prometheus port if is the default + if full_node_prometheus_needs_to_be_injected { + full_node_args_filtered.push("--prometheus-port".into()); + full_node_args_filtered.push(full_prometheus_port.to_string()); + } + + let mut args_filtered = collator_args + .iter() + .filter_map(|arg| match arg { + Arg::Flag(flag) => { + if flag.starts_with("-:") || FLAGS_ADDED_BY_US.contains(&flag.as_str()) { + None + } else { + Some(vec![flag.to_owned()]) + } + }, + Arg::Option(k, v) => { + if OPS_ADDED_BY_US.contains(&k.as_str()) { + None + } else { + Some(vec![k.to_owned(), v.to_owned()]) + } + }, + Arg::Array(k, v) => { + let mut args = vec![k.to_owned()]; + args.extend(v.to_owned()); + Some(args) + }, + }) + .flatten() + .collect::>(); + + tmp_args.append(&mut args_filtered); + + let parachain_spec_path = format!("{}/{}.json", options.cfg_path, para_id); + let mut final_args = vec![ + node.command.as_str().to_string(), + "--chain".into(), + parachain_spec_path, + "--name".into(), + node.name.clone(), + "--rpc-cors".into(), + "all".into(), + "--rpc-methods".into(), + "unsafe".into(), + ]; + + // The `--unsafe-rpc-external` option spawns an additional RPC server on a random port, + // which can conflict with reserved ports, causing an "Address already in use" error + // when using the `native` provider. Since this option isn't needed for `native`, + // it should be omitted in that case. + if !options.is_native { + final_args.push("--unsafe-rpc-external".into()); + } + + final_args.append(&mut tmp_args); + + let relaychain_spec_path = format!("{}/{}.json", options.cfg_path, options.relay_chain_name); + let mut full_node_injected: Vec = vec![ + "--".into(), + "--base-path".into(), + options.relay_data_path.into(), + "--chain".into(), + relaychain_spec_path, + "--execution".into(), + "wasm".into(), + ]; + + final_args.append(&mut full_node_injected); + final_args.append(&mut full_node_args_filtered); + + let removals = parse_removal_args(args); + final_args = apply_arg_removals(final_args, &removals); + + if options.use_wrapper { + ("/cfg/zombie-wrapper.sh".to_string(), final_args) + } else { + (final_args.remove(0), final_args) + } +} + +pub fn generate_for_node( + node: &NodeSpec, + options: GenCmdOptions, + para_id: Option, +) -> (String, Vec) { + let NodeSpec { + key, + args, + is_validator, + bootnodes_addresses, + .. + } = node; + let mut tmp_args: Vec = vec![ + "--node-key".into(), + key.clone(), + // TODO:(team) we should allow to set the telemetry url from config + "--no-telemetry".into(), + ]; + + if !args.contains(&Arg::Flag("--prometheus-external".into())) { + tmp_args.push("--prometheus-external".into()) + } + + if let Some(para_id) = para_id { + tmp_args.push("--parachain-id".into()); + tmp_args.push(para_id.to_string()); + } + + if *is_validator && !args.contains(&Arg::Flag("--validator".into())) { + tmp_args.push("--validator".into()); + if node.supports_arg("--insecure-validator-i-know-what-i-do") { + tmp_args.push("--insecure-validator-i-know-what-i-do".into()); + } + } + + if !bootnodes_addresses.is_empty() { + tmp_args.push("--bootnodes".into()); + let bootnodes = bootnodes_addresses + .iter() + .map(|m| m.to_string()) + .collect::>() + .join(" "); + tmp_args.push(bootnodes) + } + + // ports + let (prometheus_port, rpc_port, p2p_port) = + resolve_ports(node, options.use_default_ports_in_cmd); + + // Prometheus + tmp_args.push("--prometheus-port".into()); + tmp_args.push(prometheus_port.to_string()); + + // RPC + // TODO (team): do we want to support old --ws-port? + tmp_args.push("--rpc-port".into()); + tmp_args.push(rpc_port.to_string()); + + let listen_value = if let Some(listen_val) = args.iter().find_map(|arg| match arg { + Arg::Flag(_) => None, + Arg::Option(k, v) => { + if k.eq("--listen-addr") { + Some(v) + } else { + None + } + }, + Arg::Array(..) => None, + }) { + let mut parts = listen_val.split('/').collect::>(); + // TODO: move this to error + let port_part = parts + .get_mut(4) + .expect(&format!("should have at least 5 parts {THIS_IS_A_BUG}")); + let port_to_use = p2p_port.to_string(); + *port_part = port_to_use.as_str(); + parts.join("/") + } else { + format!("/ip4/0.0.0.0/tcp/{p2p_port}/ws") + }; + + tmp_args.push("--listen-addr".into()); + tmp_args.push(listen_value); + + // set our base path + tmp_args.push("--base-path".into()); + tmp_args.push(options.data_path.into()); + + let node_specific_bootnodes: Vec = node + .bootnodes_addresses + .iter() + .map(|b| b.to_string()) + .collect(); + let full_bootnodes = [node_specific_bootnodes, options.bootnode_addr].concat(); + if !full_bootnodes.is_empty() { + tmp_args.push("--bootnodes".into()); + tmp_args.push(full_bootnodes.join(" ")); + } + + // add the rest of the args + let mut args_filtered = args + .iter() + .filter_map(|arg| match arg { + Arg::Flag(flag) => { + if flag.starts_with("-:") || FLAGS_ADDED_BY_US.contains(&flag.as_str()) { + None + } else { + Some(vec![flag.to_owned()]) + } + }, + Arg::Option(k, v) => { + if OPS_ADDED_BY_US.contains(&k.as_str()) { + None + } else { + Some(vec![k.to_owned(), v.to_owned()]) + } + }, + Arg::Array(k, v) => { + let mut args = vec![k.to_owned()]; + args.extend(v.to_owned()); + Some(args) + }, + }) + .flatten() + .collect::>(); + + tmp_args.append(&mut args_filtered); + + let chain_spec_path = format!("{}/{}.json", options.cfg_path, options.relay_chain_name); + let mut final_args = vec![ + node.command.as_str().to_string(), + "--chain".into(), + chain_spec_path, + "--name".into(), + node.name.clone(), + "--rpc-cors".into(), + "all".into(), + "--rpc-methods".into(), + "unsafe".into(), + ]; + + // The `--unsafe-rpc-external` option spawns an additional RPC server on a random port, + // which can conflict with reserved ports, causing an "Address already in use" error + // when using the `native` provider. Since this option isn't needed for `native`, + // it should be omitted in that case. + if !options.is_native { + final_args.push("--unsafe-rpc-external".into()); + } + + final_args.append(&mut tmp_args); + + if let Some(ref subcommand) = node.subcommand { + final_args.insert(1, subcommand.as_str().to_string()); + } + + let removals = parse_removal_args(args); + final_args = apply_arg_removals(final_args, &removals); + + if options.use_wrapper { + ("/cfg/zombie-wrapper.sh".to_string(), final_args) + } else { + (final_args.remove(0), final_args) + } +} + +/// Returns (prometheus, rpc, p2p) ports to use in the command +fn resolve_ports(node: &NodeSpec, use_default_ports_in_cmd: bool) -> (u16, u16, u16) { + if use_default_ports_in_cmd { + (PROMETHEUS_PORT, RPC_PORT, P2P_PORT) + } else { + (node.prometheus_port.0, node.rpc_port.0, node.p2p_port.0) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{generators, shared::types::NodeAccounts}; + + fn get_node_spec(full_node_present: bool) -> NodeSpec { + let mut name = String::from("luca"); + let initial_balance = 1_000_000_000_000_u128; + let seed = format!("//{}{name}", name.remove(0).to_uppercase()); + let accounts = NodeAccounts { + accounts: generators::generate_node_keys(&seed).unwrap(), + seed, + }; + let (full_node_p2p_port, full_node_prometheus_port) = if full_node_present { + ( + Some(generators::generate_node_port(None).unwrap()), + Some(generators::generate_node_port(None).unwrap()), + ) + } else { + (None, None) + }; + NodeSpec { + name, + accounts, + initial_balance, + full_node_p2p_port, + full_node_prometheus_port, + ..Default::default() + } + } + + #[test] + fn generate_for_native_cumulus_node_works() { + let node = get_node_spec(true); + let opts = GenCmdOptions { + use_wrapper: false, + is_native: true, + ..GenCmdOptions::default() + }; + + let (program, args) = generate_for_cumulus_node(&node, opts, 1000); + assert_eq!(program.as_str(), "polkadot"); + + let divider_flag = args.iter().position(|x| x == "--").unwrap(); + + // ensure full node ports + let i = args[divider_flag..] + .iter() + .position(|x| { + x == node + .full_node_p2p_port + .as_ref() + .unwrap() + .0 + .to_string() + .as_str() + }) + .unwrap(); + assert_eq!(&args[divider_flag + i - 1], "--port"); + + let i = args[divider_flag..] + .iter() + .position(|x| { + x == node + .full_node_prometheus_port + .as_ref() + .unwrap() + .0 + .to_string() + .as_str() + }) + .unwrap(); + assert_eq!(&args[divider_flag + i - 1], "--prometheus-port"); + + assert!(!args.iter().any(|arg| arg == "--unsafe-rpc-external")); + } + + #[test] + fn generate_for_native_cumulus_node_rpc_external_is_not_removed_if_is_set_by_user() { + let mut node = get_node_spec(true); + node.args.push("--unsafe-rpc-external".into()); + let opts = GenCmdOptions { + use_wrapper: false, + is_native: true, + ..GenCmdOptions::default() + }; + + let (_, args) = generate_for_cumulus_node(&node, opts, 1000); + + assert!(args.iter().any(|arg| arg == "--unsafe-rpc-external")); + } + + #[test] + fn generate_for_non_native_cumulus_node_works() { + let node = get_node_spec(true); + let opts = GenCmdOptions { + use_wrapper: false, + is_native: false, + ..GenCmdOptions::default() + }; + + let (program, args) = generate_for_cumulus_node(&node, opts, 1000); + assert_eq!(program.as_str(), "polkadot"); + + let divider_flag = args.iter().position(|x| x == "--").unwrap(); + + // ensure full node ports + let i = args[divider_flag..] + .iter() + .position(|x| { + x == node + .full_node_p2p_port + .as_ref() + .unwrap() + .0 + .to_string() + .as_str() + }) + .unwrap(); + assert_eq!(&args[divider_flag + i - 1], "--port"); + + let i = args[divider_flag..] + .iter() + .position(|x| { + x == node + .full_node_prometheus_port + .as_ref() + .unwrap() + .0 + .to_string() + .as_str() + }) + .unwrap(); + assert_eq!(&args[divider_flag + i - 1], "--prometheus-port"); + + // we expect to find this arg in collator node part + assert!(&args[0..divider_flag] + .iter() + .any(|arg| arg == "--unsafe-rpc-external")); + } + + #[test] + fn generate_for_native_node_rpc_external_works() { + let node = get_node_spec(false); + let opts = GenCmdOptions { + use_wrapper: false, + is_native: true, + ..GenCmdOptions::default() + }; + + let (program, args) = generate_for_node(&node, opts, Some(1000)); + assert_eq!(program.as_str(), "polkadot"); + + assert!(!args.iter().any(|arg| arg == "--unsafe-rpc-external")); + } + + #[test] + fn generate_for_non_native_node_rpc_external_works() { + let node = get_node_spec(false); + let opts = GenCmdOptions { + use_wrapper: false, + is_native: false, + ..GenCmdOptions::default() + }; + + let (program, args) = generate_for_node(&node, opts, Some(1000)); + assert_eq!(program.as_str(), "polkadot"); + + assert!(args.iter().any(|arg| arg == "--unsafe-rpc-external")); + } + + #[test] + fn test_arg_removal_removes_insecure_validator_flag() { + let mut node = get_node_spec(false); + node.args + .push(Arg::Flag("-:--insecure-validator-i-know-what-i-do".into())); + node.is_validator = true; + node.available_args_output = Some("--insecure-validator-i-know-what-i-do".to_string()); + + let opts = GenCmdOptions { + use_wrapper: false, + is_native: true, + ..GenCmdOptions::default() + }; + + let (program, args) = generate_for_node(&node, opts, Some(1000)); + assert_eq!(program.as_str(), "polkadot"); + assert!(args.iter().any(|arg| arg == "--validator")); + assert!(!args + .iter() + .any(|arg| arg == "--insecure-validator-i-know-what-i-do")); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/errors.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/errors.rs new file mode 100644 index 00000000..5828cbe4 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/errors.rs @@ -0,0 +1,24 @@ +use provider::ProviderError; +use support::fs::FileSystemError; + +#[derive(Debug, thiserror::Error)] +pub enum GeneratorError { + #[error("Generating key {0} with input {1}")] + KeyGeneration(String, String), + #[error("Generating port {0}, err {1}")] + PortGeneration(u16, String), + #[error("Chain-spec build error: {0}")] + ChainSpecGeneration(String), + #[error("Provider error: {0}")] + ProviderError(#[from] ProviderError), + #[error("FileSystem error")] + FileSystemError(#[from] FileSystemError), + #[error("Generating identity, err {0}")] + IdentityGeneration(String), + #[error("Generating bootnode address, err {0}")] + BootnodeAddrGeneration(String), + #[error("Error overriding wasm on raw chain-spec, err {0}")] + OverridingWasm(String), + #[error("Error overriding raw chain-spec, err {0}")] + OverridingRawSpec(String), +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/identity.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/identity.rs new file mode 100644 index 00000000..62ac5f36 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/identity.rs @@ -0,0 +1,41 @@ +use hex::FromHex; +use libp2p::identity::{ed25519, Keypair}; +use sha2::digest::Digest; + +use super::errors::GeneratorError; + +// Generate p2p identity for node +// return `node-key` and `peerId` +pub fn generate(node_name: &str) -> Result<(String, String), GeneratorError> { + let key = hex::encode(sha2::Sha256::digest(node_name)); + + let bytes = <[u8; 32]>::from_hex(key.clone()).map_err(|_| { + GeneratorError::IdentityGeneration("can not transform hex to [u8;32]".into()) + })?; + let sk = ed25519::SecretKey::try_from_bytes(bytes) + .map_err(|_| GeneratorError::IdentityGeneration("can not create sk from bytes".into()))?; + let local_identity: Keypair = ed25519::Keypair::from(sk).into(); + let local_public = local_identity.public(); + let local_peer_id = local_public.to_peer_id(); + + Ok((key, local_peer_id.to_base58())) +} + +#[cfg(test)] +mod tests { + + use super::*; + #[test] + fn generate_for_alice() { + let s = "alice"; + let (key, peer_id) = generate(s).unwrap(); + assert_eq!( + &key, + "2bd806c97f0e00af1a1fc3328fa763a9269723c8db8fac4f93af71db186d6e90" + ); + assert_eq!( + &peer_id, + "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm" + ); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/key.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/key.rs new file mode 100644 index 00000000..ffda34cc --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/key.rs @@ -0,0 +1,151 @@ +use sp_core::{crypto::SecretStringError, ecdsa, ed25519, keccak_256, sr25519, Pair, H160, H256}; + +use super::errors::GeneratorError; +use crate::shared::types::{Accounts, NodeAccount}; +const KEYS: [&str; 5] = ["sr", "sr_stash", "ed", "ec", "eth"]; + +pub fn generate_pair(seed: &str) -> Result { + let pair = T::Pair::from_string(seed, None)?; + Ok(pair) +} + +pub fn generate(seed: &str) -> Result { + let mut accounts: Accounts = Default::default(); + for k in KEYS { + let (address, public_key) = match k { + "sr" => { + let pair = generate_pair::(seed) + .map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?; + (pair.public().to_string(), hex::encode(pair.public())) + }, + "sr_stash" => { + let pair = generate_pair::(&format!("{seed}//stash")) + .map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?; + (pair.public().to_string(), hex::encode(pair.public())) + }, + "ed" => { + let pair = generate_pair::(seed) + .map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?; + (pair.public().to_string(), hex::encode(pair.public())) + }, + "ec" => { + let pair = generate_pair::(seed) + .map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?; + (pair.public().to_string(), hex::encode(pair.public())) + }, + "eth" => { + let pair = generate_pair::(seed) + .map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?; + + let decompressed = libsecp256k1::PublicKey::parse_compressed(&pair.public().0) + .map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))? + .serialize(); + let mut m = [0u8; 64]; + m.copy_from_slice(&decompressed[1..65]); + let account = H160::from(H256::from(keccak_256(&m))); + + (hex::encode(account), hex::encode(account)) + }, + _ => unreachable!(), + }; + accounts.insert(k.into(), NodeAccount::new(address, public_key)); + } + Ok(accounts) +} + +#[cfg(test)] +mod tests { + + use super::*; + #[test] + fn generate_for_alice() { + use sp_core::crypto::Ss58Codec; + let s = "Alice"; + let seed = format!("//{s}"); + + let pair = generate_pair::(&seed).unwrap(); + assert_eq!( + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + pair.public().to_ss58check() + ); + + let pair = generate_pair::(&seed).unwrap(); + assert_eq!( + "0x020a1091341fe5664bfa1782d5e04779689068c916b04cb365ec3153755684d9a1", + format!("0x{}", hex::encode(pair.public())) + ); + + let pair = generate_pair::(&seed).unwrap(); + assert_eq!( + "5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu", + pair.public().to_ss58check() + ); + } + + #[test] + fn generate_for_zombie() { + use sp_core::crypto::Ss58Codec; + let s = "Zombie"; + let seed = format!("//{s}"); + + let pair = generate_pair::(&seed).unwrap(); + assert_eq!( + "5FTcLfwFc7ctvqp3RhbEig6UuHLHcHVRujuUm8r21wy4dAR8", + pair.public().to_ss58check() + ); + } + + #[test] + fn generate_pair_invalid_should_fail() { + let s = "Alice"; + let seed = s.to_string(); + + let pair = generate_pair::(&seed); + assert!(pair.is_err()); + } + + #[test] + fn generate_invalid_should_fail() { + let s = "Alice"; + let seed = s.to_string(); + + let pair = generate(&seed); + assert!(pair.is_err()); + assert!(matches!(pair, Err(GeneratorError::KeyGeneration(_, _)))); + } + + #[test] + fn generate_work() { + let s = "Alice"; + let seed = format!("//{s}"); + + let pair = generate(&seed).unwrap(); + let sr = pair.get("sr").unwrap(); + let sr_stash = pair.get("sr_stash").unwrap(); + let ed = pair.get("ed").unwrap(); + let ec = pair.get("ec").unwrap(); + let eth = pair.get("eth").unwrap(); + + assert_eq!( + sr.address, + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" + ); + assert_eq!( + sr_stash.address, + "5GNJqTPyNqANBkUVMN1LPPrxXnFouWXoe2wNSmmEoLctxiZY" + ); + assert_eq!( + ed.address, + "5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu" + ); + assert_eq!( + format!("0x{}", ec.public_key), + "0x020a1091341fe5664bfa1782d5e04779689068c916b04cb365ec3153755684d9a1" + ); + + assert_eq!( + format!("0x{}", eth.public_key), + "0xe04cc55ebee1cbce552f250e85c57b70b2e2625b" + ) + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/keystore.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/keystore.rs new file mode 100644 index 00000000..2a04d56b --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/keystore.rs @@ -0,0 +1,290 @@ +use std::{ + path::{Path, PathBuf}, + vec, +}; + +use hex::encode; +use support::{constants::THIS_IS_A_BUG, fs::FileSystem}; + +use super::errors::GeneratorError; +use crate::{ + generators::keystore_key_types::{parse_keystore_key_types, KeystoreKeyType}, + shared::types::NodeAccounts, + ScopedFilesystem, +}; + +/// Generates keystore files for a node. +/// +/// # Arguments +/// * `acc` - The node accounts containing the seed and public keys +/// * `node_files_path` - The path where keystore files will be created +/// * `scoped_fs` - The scoped filesystem for file operations +/// * `asset_hub_polkadot` - Whether this is for asset-hub-polkadot (affects aura key scheme) +/// * `keystore_key_types` - Optional list of key type specifications +/// +/// If `keystore_key_types` is empty, all default key types will be generated. +/// Otherwise, only the specified key types will be generated. +pub async fn generate<'a, T>( + acc: &NodeAccounts, + node_files_path: impl AsRef, + scoped_fs: &ScopedFilesystem<'a, T>, + asset_hub_polkadot: bool, + keystore_key_types: Vec<&str>, +) -> Result, GeneratorError> +where + T: FileSystem, +{ + // Create local keystore + scoped_fs.create_dir_all(node_files_path.as_ref()).await?; + let mut filenames = vec![]; + + // Parse the key type specifications + let key_types = parse_keystore_key_types(&keystore_key_types, asset_hub_polkadot); + + let futures: Vec<_> = key_types + .iter() + .map(|key_type| { + let filename = generate_keystore_filename(key_type, acc); + let file_path = PathBuf::from(format!( + "{}/{}", + node_files_path.as_ref().to_string_lossy(), + filename + )); + let content = format!("\"{}\"", acc.seed); + (filename, scoped_fs.write(file_path, content)) + }) + .collect(); + + for (filename, future) in futures { + future.await?; + filenames.push(PathBuf::from(filename)); + } + + Ok(filenames) +} + +/// Generates the keystore filename for a given key type. +/// +/// The filename format is: `{hex_encoded_key_type}{public_key}` +fn generate_keystore_filename(key_type: &KeystoreKeyType, acc: &NodeAccounts) -> String { + let account_key = key_type.scheme.account_key(); + let pk = acc + .accounts + .get(account_key) + .expect(&format!( + "Key '{}' should be set for node {THIS_IS_A_BUG}", + account_key + )) + .public_key + .as_str(); + + format!("{}{}", encode(&key_type.key_type), pk) +} + +#[cfg(test)] +mod tests { + use std::{collections::HashMap, ffi::OsString, str::FromStr}; + + use support::fs::in_memory::{InMemoryFile, InMemoryFileSystem}; + + use super::*; + use crate::shared::types::{NodeAccount, NodeAccounts}; + + fn create_test_accounts() -> NodeAccounts { + let mut accounts = HashMap::new(); + accounts.insert( + "sr".to_string(), + NodeAccount::new("sr_address", "sr_public_key"), + ); + accounts.insert( + "ed".to_string(), + NodeAccount::new("ed_address", "ed_public_key"), + ); + accounts.insert( + "ec".to_string(), + NodeAccount::new("ec_address", "ec_public_key"), + ); + NodeAccounts { + seed: "//Alice".to_string(), + accounts, + } + } + + fn create_test_fs() -> InMemoryFileSystem { + InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/").unwrap(), + InMemoryFile::dir(), + )])) + } + + #[tokio::test] + async fn generate_creates_default_keystore_files_when_no_key_types_specified() { + let accounts = create_test_accounts(); + let fs = create_test_fs(); + let base_dir = "/tmp/test"; + + let scoped_fs = ScopedFilesystem { fs: &fs, base_dir }; + let key_types: Vec<&str> = vec![]; + + let res = generate(&accounts, "node1", &scoped_fs, false, key_types).await; + assert!(res.is_ok()); + + let filenames = res.unwrap(); + + assert!(filenames.len() > 10); + + let filename_strs: Vec = filenames + .iter() + .map(|p| p.to_string_lossy().to_string()) + .collect(); + + // Check that aura key is generated (hex of "aura" is 61757261) + assert!(filename_strs.iter().any(|f| f.starts_with("61757261"))); + // Check that babe key is generated (hex of "babe" is 62616265) + assert!(filename_strs.iter().any(|f| f.starts_with("62616265"))); + // Check that gran key is generated (hex of "gran" is 6772616e) + assert!(filename_strs.iter().any(|f| f.starts_with("6772616e"))); + } + + #[tokio::test] + async fn generate_creates_only_specified_keystore_files() { + let accounts = create_test_accounts(); + let fs = create_test_fs(); + let base_dir = "/tmp/test"; + + let scoped_fs = ScopedFilesystem { fs: &fs, base_dir }; + let key_types = vec!["audi", "gran"]; + + let res = generate(&accounts, "node1", &scoped_fs, false, key_types).await; + + assert!(res.is_ok()); + + let filenames = res.unwrap(); + assert_eq!(filenames.len(), 2); + + let filename_strs: Vec = filenames + .iter() + .map(|p| p.to_string_lossy().to_string()) + .collect(); + + // audi uses sr scheme by default + assert!(filename_strs + .iter() + .any(|f| f.starts_with("61756469") && f.contains("sr_public_key"))); + // gran uses ed scheme by default + assert!(filename_strs + .iter() + .any(|f| f.starts_with("6772616e") && f.contains("ed_public_key"))); + } + + #[tokio::test] + async fn generate_produces_correct_keystore_files() { + struct TestCase { + name: &'static str, + key_types: Vec<&'static str>, + asset_hub_polkadot: bool, + expected_prefix: &'static str, + expected_public_key: &'static str, + } + + let test_cases = vec![ + TestCase { + name: "explicit scheme override (gran_sr)", + key_types: vec!["gran_sr"], + asset_hub_polkadot: false, + expected_prefix: "6772616e", // "gran" in hex + expected_public_key: "sr_public_key", + }, + TestCase { + name: "aura with asset_hub_polkadot uses ed", + key_types: vec!["aura"], + asset_hub_polkadot: true, + expected_prefix: "61757261", // "aura" in hex + expected_public_key: "ed_public_key", + }, + TestCase { + name: "aura without asset_hub_polkadot uses sr", + key_types: vec!["aura"], + asset_hub_polkadot: false, + expected_prefix: "61757261", // "aura" in hex + expected_public_key: "sr_public_key", + }, + TestCase { + name: "custom key type with explicit ec scheme", + key_types: vec!["cust_ec"], + asset_hub_polkadot: false, + expected_prefix: "63757374", // "cust" in hex + expected_public_key: "ec_public_key", + }, + ]; + + for tc in test_cases { + let accounts = create_test_accounts(); + let fs = create_test_fs(); + let scoped_fs = ScopedFilesystem { + fs: &fs, + base_dir: "/tmp/test", + }; + + let key_types: Vec<&str> = tc.key_types.clone(); + let res = generate( + &accounts, + "node1", + &scoped_fs, + tc.asset_hub_polkadot, + key_types, + ) + .await; + + assert!( + res.is_ok(), + "[{}] Expected Ok but got: {:?}", + tc.name, + res.err() + ); + let filenames = res.unwrap(); + + assert_eq!(filenames.len(), 1, "[{}] Expected 1 file", tc.name); + + let filename = filenames[0].to_string_lossy().to_string(); + assert!( + filename.starts_with(tc.expected_prefix), + "[{}] Expected prefix '{}', got '{}'", + tc.name, + tc.expected_prefix, + filename + ); + assert!( + filename.contains(tc.expected_public_key), + "[{}] Expected public key '{}' in '{}'", + tc.name, + tc.expected_public_key, + filename + ); + } + } + + #[tokio::test] + async fn generate_ignores_invalid_key_specs_and_uses_defaults() { + let accounts = create_test_accounts(); + let fs = create_test_fs(); + let scoped_fs = ScopedFilesystem { + fs: &fs, + base_dir: "/tmp/test", + }; + + let key_types = vec![ + "invalid", // Too long + "xxx", // Too short + "audi_xx", // Invalid sceme + ]; + + let res = generate(&accounts, "node1", &scoped_fs, false, key_types).await; + + assert!(res.is_ok()); + let filenames = res.unwrap(); + + // Should fall back to defaults since all specs are invalid + assert!(filenames.len() > 10); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/keystore_key_types.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/keystore_key_types.rs new file mode 100644 index 00000000..324f6a50 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/keystore_key_types.rs @@ -0,0 +1,282 @@ +use std::{collections::HashMap, fmt::Formatter}; + +use serde::{Deserialize, Serialize}; + +/// Supported cryptographic schemes for keystore keys. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub enum KeyScheme { + /// Sr25519 scheme + Sr, + /// Ed25519 scheme + Ed, + /// ECDSA scheme + Ec, +} + +impl KeyScheme { + /// Returns the account key suffix used in `NodeAccounts` for this scheme. + pub fn account_key(&self) -> &'static str { + match self { + KeyScheme::Sr => "sr", + KeyScheme::Ed => "ed", + KeyScheme::Ec => "ec", + } + } +} + +impl std::fmt::Display for KeyScheme { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + KeyScheme::Sr => write!(f, "sr"), + KeyScheme::Ed => write!(f, "ed"), + KeyScheme::Ec => write!(f, "ec"), + } + } +} + +impl TryFrom<&str> for KeyScheme { + type Error = String; + + fn try_from(value: &str) -> Result { + match value.to_lowercase().as_str() { + "sr" => Ok(KeyScheme::Sr), + "ed" => Ok(KeyScheme::Ed), + "ec" => Ok(KeyScheme::Ec), + _ => Err(format!("Unsupported key scheme: {}", value)), + } + } +} + +/// A parsed keystore key type. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct KeystoreKeyType { + /// The 4-character key type identifier (e.g., "aura", "babe", "gran"). + pub key_type: String, + /// The cryptographic scheme to use for this key type. + pub scheme: KeyScheme, +} + +impl KeystoreKeyType { + pub fn new(key_type: impl Into, scheme: KeyScheme) -> Self { + Self { + key_type: key_type.into(), + scheme, + } + } +} + +/// Returns the default predefined key schemes for known key types. +/// Special handling for `aura` when `is_asset_hub_polkadot` is true. +fn get_predefined_schemes(is_asset_hub_polkadot: bool) -> HashMap<&'static str, KeyScheme> { + let mut schemes = HashMap::new(); + + // aura has special handling for asset-hub-polkadot + if is_asset_hub_polkadot { + schemes.insert("aura", KeyScheme::Ed); + } else { + schemes.insert("aura", KeyScheme::Sr); + } + + schemes.insert("babe", KeyScheme::Sr); + schemes.insert("imon", KeyScheme::Sr); + schemes.insert("gran", KeyScheme::Ed); + schemes.insert("audi", KeyScheme::Sr); + schemes.insert("asgn", KeyScheme::Sr); + schemes.insert("para", KeyScheme::Sr); + schemes.insert("beef", KeyScheme::Ec); + schemes.insert("nmbs", KeyScheme::Sr); // Nimbus + schemes.insert("rand", KeyScheme::Sr); // Randomness (Moonbeam) + schemes.insert("rate", KeyScheme::Ed); // Equilibrium rate module + schemes.insert("acco", KeyScheme::Sr); + schemes.insert("bcsv", KeyScheme::Sr); // BlockchainSrvc (StorageHub) + schemes.insert("ftsv", KeyScheme::Ed); // FileTransferSrvc (StorageHub) + schemes.insert("mixn", KeyScheme::Sr); // Mixnet + + schemes +} + +/// Parses a single keystore key type specification string. +/// +/// Supports two formats: +/// - Short: `audi` - creates key type with predefined default scheme (defaults to `sr` if not predefined) +/// - Long: `audi_sr` - creates key type with explicit scheme +/// +/// Returns `None` if the spec is invalid or doesn't match the expected format. +fn parse_key_spec(spec: &str, predefined: &HashMap<&str, KeyScheme>) -> Option { + let spec = spec.trim(); + + // Try parsing as long form first: key_type_scheme (e.g., "audi_sr") + if let Some((key_type, scheme_str)) = spec.split_once('_') { + if key_type.len() != 4 { + return None; + } + + let scheme = KeyScheme::try_from(scheme_str).ok()?; + return Some(KeystoreKeyType::new(key_type, scheme)); + } + + // Try parsing as short form: key_type only (e.g., "audi") + if spec.len() == 4 { + // Look up predefined scheme; default to Sr if not found + let scheme = predefined.get(spec).copied().unwrap_or(KeyScheme::Sr); + return Some(KeystoreKeyType::new(spec, scheme)); + } + + None +} + +/// Parses a list of keystore key type specifications. +/// +/// Each spec can be in short form (`audi`) or long form (`audi_sr`). +/// Invalid specs are silently ignored. +/// +/// If the resulting list is empty, returns the default keystore key types. +pub fn parse_keystore_key_types>( + specs: &[T], + is_asset_hub_polkadot: bool, +) -> Vec { + let predefined_schemes = get_predefined_schemes(is_asset_hub_polkadot); + + let parsed: Vec = specs + .iter() + .filter_map(|spec| parse_key_spec(spec.as_ref(), &predefined_schemes)) + .collect(); + + if parsed.is_empty() { + get_default_keystore_key_types(is_asset_hub_polkadot) + } else { + parsed + } +} + +/// Returns the default keystore key types when none are specified. +pub fn get_default_keystore_key_types(is_asset_hub_polkadot: bool) -> Vec { + let predefined_schemes = get_predefined_schemes(is_asset_hub_polkadot); + let default_keys = [ + "aura", "babe", "imon", "gran", "audi", "asgn", "para", "beef", "nmbs", "rand", "rate", + "mixn", "bcsv", "ftsv", + ]; + + default_keys + .iter() + .filter_map(|key_type| { + predefined_schemes + .get(*key_type) + .map(|scheme| KeystoreKeyType::new(*key_type, *scheme)) + }) + .collect() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_keystore_key_types_ignores_invalid_specs() { + let specs = vec![ + "audi".to_string(), + "invalid".to_string(), // Too long - ignored + "xxx".to_string(), // Too short - ignored + "xxxx".to_string(), // Unknown key - defaults to sr + "audi_xx".to_string(), // Invalid scheme - ignored + "gran".to_string(), + ]; + + let result = parse_keystore_key_types(&specs, false); + assert_eq!(result.len(), 3); + assert_eq!(result[1], KeystoreKeyType::new("xxxx", KeyScheme::Sr)); // Unknown defaults to sr + assert_eq!(result[2], KeystoreKeyType::new("gran", KeyScheme::Ed)); + } + + #[test] + fn parse_keystore_key_types_returns_specified_keys() { + let specs = vec!["audi".to_string(), "gran".to_string()]; + let res = parse_keystore_key_types(&specs, false); + + assert_eq!(res.len(), 2); + assert_eq!(res[0], KeystoreKeyType::new("audi", KeyScheme::Sr)); + assert_eq!(res[1], KeystoreKeyType::new("gran", KeyScheme::Ed)); + } + + #[test] + fn parse_keystore_key_types_mixed_short_and_long_forms() { + let specs = vec![ + "audi".to_string(), + "gran_sr".to_string(), // Override gran's default ed to sr + "gran".to_string(), + "beef".to_string(), + ]; + let res = parse_keystore_key_types(&specs, false); + + assert_eq!(res.len(), 4); + assert_eq!(res[0], KeystoreKeyType::new("audi", KeyScheme::Sr)); + assert_eq!(res[1], KeystoreKeyType::new("gran", KeyScheme::Sr)); // Overridden + assert_eq!(res[2], KeystoreKeyType::new("gran", KeyScheme::Ed)); + assert_eq!(res[3], KeystoreKeyType::new("beef", KeyScheme::Ec)); + } + + #[test] + fn parse_keystore_key_types_returns_defaults_when_empty() { + let specs: Vec = vec![]; + let res = parse_keystore_key_types(&specs, false); + + // Should return all default keys + assert!(!res.is_empty()); + assert!(res.iter().any(|k| k.key_type == "aura")); + assert!(res.iter().any(|k| k.key_type == "babe")); + assert!(res.iter().any(|k| k.key_type == "gran")); + } + + #[test] + fn parse_keystore_key_types_allows_custom_key_with_explicit_scheme() { + let specs = vec![ + "cust_sr".to_string(), // Custom key with explicit scheme + "audi".to_string(), + ]; + let result = parse_keystore_key_types(&specs, false); + + assert_eq!(result.len(), 2); + assert_eq!(result[0], KeystoreKeyType::new("cust", KeyScheme::Sr)); + assert_eq!(result[1], KeystoreKeyType::new("audi", KeyScheme::Sr)); + } + + #[test] + fn full_workflow_asset_hub_polkadot() { + // For asset-hub-polkadot, aura should default to ed + let specs = vec!["aura".to_string(), "babe".to_string()]; + + let res = parse_keystore_key_types(&specs, true); + + assert_eq!(res.len(), 2); + assert_eq!(res[0].key_type, "aura"); + assert_eq!(res[0].scheme, KeyScheme::Ed); // ed for asset-hub-polkadot + + assert_eq!(res[1].key_type, "babe"); + assert_eq!(res[1].scheme, KeyScheme::Sr); + } + + #[test] + fn full_workflow_custom_key_types() { + let specs = vec![ + "aura".to_string(), // Use default scheme + "gran_sr".to_string(), // Override gran to use sr instead of ed + "cust_ec".to_string(), // Custom key type with ecdsa + ]; + + let res = parse_keystore_key_types(&specs, false); + + assert_eq!(res.len(), 3); + + // aura uses default sr + assert_eq!(res[0].key_type, "aura"); + assert_eq!(res[0].scheme, KeyScheme::Sr); + + // gran overridden to sr + assert_eq!(res[1].key_type, "gran"); + assert_eq!(res[1].scheme, KeyScheme::Sr); + + // custom key with ec + assert_eq!(res[2].key_type, "cust"); + assert_eq!(res[2].scheme, KeyScheme::Ec); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/para_artifact.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/para_artifact.rs new file mode 100644 index 00000000..d82695b5 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/para_artifact.rs @@ -0,0 +1,165 @@ +use std::path::{Path, PathBuf}; + +use configuration::types::CommandWithCustomArgs; +use provider::{ + constants::NODE_CONFIG_DIR, + types::{GenerateFileCommand, GenerateFilesOptions, TransferedFile}, + DynNamespace, +}; +use serde::{Deserialize, Serialize}; +use support::fs::FileSystem; +use uuid::Uuid; + +use super::errors::GeneratorError; +use crate::ScopedFilesystem; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub(crate) enum ParaArtifactType { + Wasm, + State, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub(crate) enum ParaArtifactBuildOption { + Path(String), + Command(String), + CommandWithCustomArgs(CommandWithCustomArgs), +} + +/// Parachain artifact (could be either the genesis state or genesis wasm) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ParaArtifact { + artifact_type: ParaArtifactType, + build_option: ParaArtifactBuildOption, + artifact_path: Option, + // image to use for building the para artifact + image: Option, +} + +impl ParaArtifact { + pub(crate) fn new( + artifact_type: ParaArtifactType, + build_option: ParaArtifactBuildOption, + ) -> Self { + Self { + artifact_type, + build_option, + artifact_path: None, + image: None, + } + } + + pub(crate) fn image(mut self, image: Option) -> Self { + self.image = image; + self + } + + pub(crate) fn artifact_path(&self) -> Option<&PathBuf> { + self.artifact_path.as_ref() + } + + pub(crate) async fn build<'a, T>( + &mut self, + chain_spec_path: Option>, + artifact_path: impl AsRef, + ns: &DynNamespace, + scoped_fs: &ScopedFilesystem<'a, T>, + maybe_output_path: Option, + ) -> Result<(), GeneratorError> + where + T: FileSystem, + { + let (cmd, custom_args) = match &self.build_option { + ParaArtifactBuildOption::Path(path) => { + let t = TransferedFile::new(PathBuf::from(path), artifact_path.as_ref().into()); + scoped_fs.copy_files(vec![&t]).await?; + self.artifact_path = Some(artifact_path.as_ref().into()); + return Ok(()); // work done! + }, + ParaArtifactBuildOption::Command(cmd) => (cmd, &vec![]), + ParaArtifactBuildOption::CommandWithCustomArgs(cmd_with_custom_args) => { + ( + &cmd_with_custom_args.cmd().as_str().to_string(), + cmd_with_custom_args.args(), + ) + // (cmd.cmd_as_str().to_string(), cmd.1) + }, + }; + + let generate_subcmd = match self.artifact_type { + ParaArtifactType::Wasm => "export-genesis-wasm", + ParaArtifactType::State => "export-genesis-state", + }; + + // TODO: replace uuid with para_id-random + let temp_name = format!("temp-{}-{}", generate_subcmd, Uuid::new_v4()); + let mut args: Vec = vec![generate_subcmd.into()]; + + let files_to_inject = if let Some(chain_spec_path) = chain_spec_path { + // TODO: we should get the full path from the scoped filesystem + let chain_spec_path_local = format!( + "{}/{}", + ns.base_dir().to_string_lossy(), + chain_spec_path.as_ref().to_string_lossy() + ); + // Remote path to be injected + let chain_spec_path_in_pod = format!( + "{}/{}", + NODE_CONFIG_DIR, + chain_spec_path.as_ref().to_string_lossy() + ); + // Path in the context of the node, this can be different in the context of the providers (e.g native) + let chain_spec_path_in_args = if ns.capabilities().prefix_with_full_path { + // In native + format!( + "{}/{}{}", + ns.base_dir().to_string_lossy(), + &temp_name, + &chain_spec_path_in_pod + ) + } else { + chain_spec_path_in_pod.clone() + }; + + args.push("--chain".into()); + args.push(chain_spec_path_in_args); + + for custom_arg in custom_args { + match custom_arg { + configuration::types::Arg::Flag(flag) => { + args.push(flag.into()); + }, + configuration::types::Arg::Option(flag, flag_value) => { + args.push(flag.into()); + args.push(flag_value.into()); + }, + configuration::types::Arg::Array(flag, values) => { + args.push(flag.into()); + values.iter().for_each(|v| args.push(v.into())); + }, + } + } + + vec![TransferedFile::new( + chain_spec_path_local, + chain_spec_path_in_pod, + )] + } else { + vec![] + }; + + let artifact_path_ref = artifact_path.as_ref(); + let generate_command = GenerateFileCommand::new(cmd.as_str(), artifact_path_ref).args(args); + let options = GenerateFilesOptions::with_files( + vec![generate_command], + self.image.clone(), + &files_to_inject, + maybe_output_path, + ) + .temp_name(temp_name); + ns.generate_files(options).await?; + self.artifact_path = Some(artifact_path_ref.into()); + + Ok(()) + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/port.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/port.rs new file mode 100644 index 00000000..409061e0 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/generators/port.rs @@ -0,0 +1,48 @@ +use std::net::TcpListener; + +use configuration::shared::types::Port; +use support::constants::THIS_IS_A_BUG; + +use super::errors::GeneratorError; +use crate::shared::types::ParkedPort; + +// TODO: (team), we want to continue support ws_port? No +enum PortTypes { + Rpc, + P2P, + Prometheus, +} + +pub fn generate(port: Option) -> Result { + let port = port.unwrap_or(0); + let listener = TcpListener::bind(format!("0.0.0.0:{port}")) + .map_err(|_e| GeneratorError::PortGeneration(port, "Can't bind".into()))?; + let port = listener + .local_addr() + .expect(&format!( + "We should always get the local_addr from the listener {THIS_IS_A_BUG}" + )) + .port(); + Ok(ParkedPort::new(port, listener)) +} + +#[cfg(test)] +mod tests { + use super::*; + #[test] + fn generate_random() { + let port = generate(None).unwrap(); + let listener = port.1.write().unwrap(); + + assert!(listener.is_some()); + } + + #[test] + fn generate_fixed_port() { + let port = generate(Some(33056)).unwrap(); + let listener = port.1.write().unwrap(); + + assert!(listener.is_some()); + assert_eq!(port.0, 33056); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/lib.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/lib.rs new file mode 100644 index 00000000..d548132c --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/lib.rs @@ -0,0 +1,1352 @@ +// TODO(Javier): Remove when we implement the logic in the orchestrator to spawn with the provider. +#![allow(dead_code, clippy::expect_fun_call)] + +pub mod errors; +pub mod generators; +pub mod network; +pub mod network_helper; +pub mod tx_helper; + +mod network_spec; +pub mod shared; +mod spawner; +mod utils; + +use std::{ + collections::{HashMap, HashSet, VecDeque}, + env, + net::IpAddr, + path::{Path, PathBuf}, + time::{Duration, SystemTime}, +}; + +use configuration::{NetworkConfig, RegistrationStrategy}; +use errors::OrchestratorError; +use generators::errors::GeneratorError; +use network::{node::NetworkNode, relaychain::Relaychain, teyrchain::Parachain, Network}; +// re-exported +pub use network_spec::NetworkSpec; +use network_spec::{node::NodeSpec, teyrchain::TeyrchainSpec}; +use provider::{ + types::{ProviderCapabilities, TransferedFile}, + DynNamespace, DynProvider, +}; +use serde_json::json; +use support::{ + constants::{ + GRAPH_CONTAINS_DEP, GRAPH_CONTAINS_NAME, INDEGREE_CONTAINS_NAME, QUEUE_NOT_EMPTY, + THIS_IS_A_BUG, + }, + fs::{FileSystem, FileSystemError}, + replacer::{get_tokens_to_replace, has_tokens}, +}; +use tokio::time::timeout; +use tracing::{debug, info, trace, warn}; + +use crate::{ + network::{node::RawNetworkNode, relaychain::RawRelaychain, teyrchain::RawParachain}, + shared::types::RegisterParachainOptions, + spawner::SpawnNodeCtx, +}; +pub struct Orchestrator +where + T: FileSystem + Sync + Send, +{ + filesystem: T, + provider: DynProvider, +} + +impl Orchestrator +where + T: FileSystem + Sync + Send + Clone, +{ + pub fn new(filesystem: T, provider: DynProvider) -> Self { + Self { + filesystem, + provider, + } + } + + pub async fn spawn( + &self, + network_config: NetworkConfig, + ) -> Result, OrchestratorError> { + let global_timeout = network_config.global_settings().network_spawn_timeout(); + let network_spec = NetworkSpec::from_config(&network_config).await?; + + let res = timeout( + Duration::from_secs(global_timeout.into()), + self.spawn_inner(network_spec), + ) + .await + .map_err(|_| OrchestratorError::GlobalTimeOut(global_timeout)); + res? + } + + pub async fn spawn_from_spec( + &self, + network_spec: NetworkSpec, + ) -> Result, OrchestratorError> { + let global_timeout = network_spec.global_settings.network_spawn_timeout(); + let res = timeout( + Duration::from_secs(global_timeout as u64), + self.spawn_inner(network_spec), + ) + .await + .map_err(|_| OrchestratorError::GlobalTimeOut(global_timeout)); + res? + } + + pub async fn attach_to_live( + &self, + zombie_json_path: &Path, + ) -> Result, OrchestratorError> { + info!("attaching to live network..."); + info!("reading zombie.json from {:?}", zombie_json_path); + + let zombie_json_content = self.filesystem.read_to_string(zombie_json_path).await?; + let zombie_json: serde_json::Value = serde_json::from_str(&zombie_json_content)?; + + info!("recreating namespace..."); + let ns: DynNamespace = self + .provider + .create_namespace_from_json(&zombie_json) + .await?; + + info!("recreating relaychain..."); + let (relay, initial_spec) = + recreate_relaychain_from_json(&zombie_json, ns.clone(), self.provider.name()).await?; + let relay_nodes = relay.nodes.clone(); + + let mut network = + Network::new_with_relay(relay, ns.clone(), self.filesystem.clone(), initial_spec); + + for node in relay_nodes { + network.insert_node(node); + } + + info!("recreating parachains..."); + let parachains_map = + recreate_parachains_from_json(&zombie_json, ns.clone(), self.provider.name()).await?; + let para_nodes = parachains_map + .values() + .flat_map(|paras| paras.iter().flat_map(|para| para.collators.clone())) + .collect::>(); + + network.set_parachains(parachains_map); + for node in para_nodes { + network.insert_node(node); + } + + Ok(network) + } + + async fn spawn_inner( + &self, + mut network_spec: NetworkSpec, + ) -> Result, OrchestratorError> { + // main driver for spawn the network + debug!(network_spec = ?network_spec,"Network spec to spawn"); + + // TODO: move to Provider trait + validate_spec_with_provider_capabilities(&network_spec, self.provider.capabilities()) + .map_err(|err| { + OrchestratorError::InvalidConfigForProvider( + self.provider.name().into(), + err.to_string(), + ) + })?; + + // create namespace + let ns = if let Some(base_dir) = network_spec.global_settings.base_dir() { + self.provider + .create_namespace_with_base_dir(base_dir) + .await? + } else { + self.provider.create_namespace().await? + }; + + // set the spawn_concurrency + let (spawn_concurrency, limited_by_tokens) = calculate_concurrency(&network_spec)?; + + let start_time = SystemTime::now(); + info!("🧰 ns: {}", ns.name()); + info!("🧰 base_dir: {:?}", ns.base_dir()); + info!("🕰 start time: {:?}", start_time); + info!("⚙️ spawn concurrency: {spawn_concurrency} (limited by tokens: {limited_by_tokens})"); + + network_spec + .populate_nodes_available_args(ns.clone()) + .await?; + + let base_dir = ns.base_dir().to_string_lossy(); + let scoped_fs = ScopedFilesystem::new(&self.filesystem, &base_dir); + // Create chain-spec for relaychain + network_spec + .relaychain + .chain_spec + .build(&ns, &scoped_fs) + .await?; + + debug!("relaychain spec built!"); + // Create parachain artifacts (chain-spec, wasm, state) + let relay_chain_id = network_spec + .relaychain + .chain_spec + .read_chain_id(&scoped_fs) + .await?; + + let relay_chain_name = network_spec.relaychain.chain.as_str().to_owned(); + let base_dir_exists = network_spec.global_settings.base_dir().is_some(); + network_spec + .build_parachain_artifacts(ns.clone(), &scoped_fs, &relay_chain_id, base_dir_exists) + .await?; + + // Gather the parachains to register in genesis and the ones to register with extrinsic + let (para_to_register_in_genesis, para_to_register_with_extrinsic): ( + Vec<&TeyrchainSpec>, + Vec<&TeyrchainSpec>, + ) = network_spec + .parachains + .iter() + .filter(|para| para.registration_strategy != RegistrationStrategy::Manual) + .partition(|para| { + matches!(para.registration_strategy, RegistrationStrategy::InGenesis) + }); + + let mut para_artifacts = vec![]; + for para in para_to_register_in_genesis { + let genesis_config = para.get_genesis_config()?; + para_artifacts.push(genesis_config) + } + + // Customize relaychain + network_spec + .relaychain + .chain_spec + .customize_relay( + &network_spec.relaychain, + &network_spec.hrmp_channels, + para_artifacts, + &scoped_fs, + ) + .await?; + + // Build raw version + network_spec + .relaychain + .chain_spec + .build_raw(&ns, &scoped_fs, None) + .await?; + + // override wasm if needed + if let Some(ref wasm_override) = network_spec.relaychain.wasm_override { + network_spec + .relaychain + .chain_spec + .override_code(&scoped_fs, wasm_override) + .await?; + } + + // override raw spec if needed + if let Some(ref raw_spec_override) = network_spec.relaychain.raw_spec_override { + network_spec + .relaychain + .chain_spec + .override_raw_spec(&scoped_fs, raw_spec_override) + .await?; + } + + let (bootnodes, relaynodes) = + split_nodes_by_bootnodes(&network_spec.relaychain.nodes, false); + + // TODO: we want to still supporting spawn a dedicated bootnode?? + let mut ctx = SpawnNodeCtx { + chain_id: &relay_chain_id, + parachain_id: None, + chain: relay_chain_name.as_str(), + role: ZombieRole::Node, + ns: &ns, + scoped_fs: &scoped_fs, + parachain: None, + bootnodes_addr: &vec![], + wait_ready: false, + nodes_by_name: json!({}), + global_settings: &network_spec.global_settings, + }; + + let global_files_to_inject = vec![TransferedFile::new( + PathBuf::from(format!( + "{}/{relay_chain_name}.json", + ns.base_dir().to_string_lossy() + )), + PathBuf::from(format!("/cfg/{relay_chain_name}.json")), + )]; + + let r = Relaychain::new( + relay_chain_name.to_string(), + relay_chain_id.clone(), + PathBuf::from(network_spec.relaychain.chain_spec.raw_path().ok_or( + OrchestratorError::InvariantError("chain-spec raw path should be set now"), + )?), + ); + let mut network = + Network::new_with_relay(r, ns.clone(), self.filesystem.clone(), network_spec.clone()); + + // Initiate the node_ws_url which will be later used in the Parachain_with_extrinsic config + let mut node_ws_url: String = "".to_string(); + + // Calculate the bootnodes addr from the running nodes + let mut bootnodes_addr: Vec = vec![]; + + for level in dependency_levels_among(&bootnodes)? { + let mut running_nodes_per_level = vec![]; + for chunk in level.chunks(spawn_concurrency) { + let spawning_tasks = chunk + .iter() + .map(|node| spawner::spawn_node(node, global_files_to_inject.clone(), &ctx)); + + for node in futures::future::try_join_all(spawning_tasks).await? { + let bootnode_multiaddr = node.multiaddr(); + + bootnodes_addr.push(bootnode_multiaddr.to_string()); + + // Is used in the register_para_options (We need to get this from the relay and not the collators) + if node_ws_url.is_empty() { + node_ws_url.clone_from(&node.ws_uri) + } + + running_nodes_per_level.push(node); + } + } + info!( + "🕰 waiting for level: {:?} to be up...", + level.iter().map(|n| n.name.clone()).collect::>() + ); + + // Wait for all nodes in the current level to be up + let waiting_tasks = running_nodes_per_level.iter().map(|node| { + node.wait_until_is_up(network_spec.global_settings.network_spawn_timeout()) + }); + + let _ = futures::future::try_join_all(waiting_tasks).await?; + + for node in running_nodes_per_level { + // Add the node to the context and `Network` instance + ctx.nodes_by_name[node.name().to_owned()] = serde_json::to_value(&node)?; + network.add_running_node(node, None).await; + } + } + + // Add the bootnodes to the relaychain spec file and ctx + network_spec + .relaychain + .chain_spec + .add_bootnodes(&scoped_fs, &bootnodes_addr) + .await?; + + ctx.bootnodes_addr = &bootnodes_addr; + + for level in dependency_levels_among(&relaynodes)? { + let mut running_nodes_per_level = vec![]; + for chunk in level.chunks(spawn_concurrency) { + let spawning_tasks = chunk + .iter() + .map(|node| spawner::spawn_node(node, global_files_to_inject.clone(), &ctx)); + + for node in futures::future::try_join_all(spawning_tasks).await? { + running_nodes_per_level.push(node); + } + } + info!( + "🕰 waiting for level: {:?} to be up...", + level.iter().map(|n| n.name.clone()).collect::>() + ); + + // Wait for all nodes in the current level to be up + let waiting_tasks = running_nodes_per_level.iter().map(|node| { + node.wait_until_is_up(network_spec.global_settings.network_spawn_timeout()) + }); + + let _ = futures::future::try_join_all(waiting_tasks).await?; + + for node in running_nodes_per_level { + ctx.nodes_by_name[node.name().to_owned()] = serde_json::to_value(&node)?; + network.add_running_node(node, None).await; + } + } + + // spawn paras + for para in network_spec.parachains.iter() { + // Create parachain (in the context of the running network) + let parachain = Parachain::from_spec(para, &global_files_to_inject, &scoped_fs).await?; + let parachain_id = parachain.chain_id.clone(); + + let (bootnodes, collators) = + split_nodes_by_bootnodes(¶.collators, para.no_default_bootnodes); + + // Create `ctx` for spawn parachain nodes + let mut ctx_para = SpawnNodeCtx { + parachain: Some(para), + parachain_id: parachain_id.as_deref(), + role: if para.is_cumulus_based { + ZombieRole::CumulusCollator + } else { + ZombieRole::Collator + }, + bootnodes_addr: &vec![], + ..ctx.clone() + }; + + // Calculate the bootnodes addr from the running nodes + let mut bootnodes_addr: Vec = vec![]; + let mut running_nodes: Vec = vec![]; + + for level in dependency_levels_among(&bootnodes)? { + let mut running_nodes_per_level = vec![]; + for chunk in level.chunks(spawn_concurrency) { + let spawning_tasks = chunk.iter().map(|node| { + spawner::spawn_node(node, parachain.files_to_inject.clone(), &ctx_para) + }); + + for node in futures::future::try_join_all(spawning_tasks).await? { + let bootnode_multiaddr = node.multiaddr(); + + bootnodes_addr.push(bootnode_multiaddr.to_string()); + + running_nodes_per_level.push(node); + } + } + info!( + "🕰 waiting for level: {:?} to be up...", + level.iter().map(|n| n.name.clone()).collect::>() + ); + + // Wait for all nodes in the current level to be up + let waiting_tasks = running_nodes_per_level.iter().map(|node| { + node.wait_until_is_up(network_spec.global_settings.network_spawn_timeout()) + }); + + let _ = futures::future::try_join_all(waiting_tasks).await?; + + for node in running_nodes_per_level { + ctx_para.nodes_by_name[node.name().to_owned()] = serde_json::to_value(&node)?; + running_nodes.push(node); + } + } + + if let Some(para_chain_spec) = para.chain_spec.as_ref() { + para_chain_spec + .add_bootnodes(&scoped_fs, &bootnodes_addr) + .await?; + } + + ctx_para.bootnodes_addr = &bootnodes_addr; + + // Spawn the rest of the nodes + for level in dependency_levels_among(&collators)? { + let mut running_nodes_per_level = vec![]; + for chunk in level.chunks(spawn_concurrency) { + let spawning_tasks = chunk.iter().map(|node| { + spawner::spawn_node(node, parachain.files_to_inject.clone(), &ctx_para) + }); + + for node in futures::future::try_join_all(spawning_tasks).await? { + running_nodes_per_level.push(node); + } + } + info!( + "🕰 waiting for level: {:?} to be up...", + level.iter().map(|n| n.name.clone()).collect::>() + ); + + // Wait for all nodes in the current level to be up + let waiting_tasks = running_nodes_per_level.iter().map(|node| { + node.wait_until_is_up(network_spec.global_settings.network_spawn_timeout()) + }); + + let _ = futures::future::try_join_all(waiting_tasks).await?; + + for node in running_nodes_per_level { + ctx_para.nodes_by_name[node.name().to_owned()] = serde_json::to_value(&node)?; + running_nodes.push(node); + } + } + + let running_para_id = parachain.para_id; + network.add_para(parachain); + for node in running_nodes { + network.add_running_node(node, Some(running_para_id)).await; + } + } + + // TODO: + // - add-ons (introspector/tracing/etc) + + // verify nodes + // network_helper::verifier::verify_nodes(&network.nodes()).await?; + + // Now we need to register the paras with extrinsic from the Vec collected before; + for para in para_to_register_with_extrinsic { + let register_para_options: RegisterParachainOptions = RegisterParachainOptions { + id: para.id, + // This needs to resolve correctly + wasm_path: para + .genesis_wasm + .artifact_path() + .ok_or(OrchestratorError::InvariantError( + "artifact path for wasm must be set at this point", + ))? + .to_path_buf(), + state_path: para + .genesis_state + .artifact_path() + .ok_or(OrchestratorError::InvariantError( + "artifact path for state must be set at this point", + ))? + .to_path_buf(), + node_ws_url: node_ws_url.clone(), + onboard_as_para: para.onboard_as_parachain, + seed: None, // TODO: Seed is passed by? + finalization: false, + }; + + Parachain::register(register_para_options, &scoped_fs).await?; + } + + // - write zombie.json state file + let mut zombie_json = serde_json::to_value(&network)?; + zombie_json["local_base_dir"] = serde_json::value::Value::String(base_dir.to_string()); + zombie_json["ns"] = serde_json::value::Value::String(ns.name().to_string()); + + if let Ok(start_time_ts) = start_time.duration_since(SystemTime::UNIX_EPOCH) { + zombie_json["start_time_ts"] = + serde_json::value::Value::String(start_time_ts.as_millis().to_string()); + } else { + // Just warn, do not propagate the err (this should not happens) + warn!("⚠️ Error getting start_time timestamp"); + } + + scoped_fs + .write("zombie.json", serde_json::to_string_pretty(&zombie_json)?) + .await?; + + if network_spec.global_settings.tear_down_on_failure() { + network.spawn_watching_task(); + } + + Ok(network) + } +} + +// Helpers + +async fn recreate_network_nodes_from_json( + nodes_json: &serde_json::Value, + ns: DynNamespace, + provider_name: &str, +) -> Result, OrchestratorError> { + let raw_nodes: Vec = serde_json::from_value(nodes_json.clone())?; + + let mut nodes = Vec::with_capacity(raw_nodes.len()); + for raw in raw_nodes { + // validate provider tag + let provider_tag = raw + .inner + .get("provider_tag") + .and_then(|v| v.as_str()) + .ok_or_else(|| { + OrchestratorError::InvalidConfig("Missing `provider_tag` in inner node JSON".into()) + })?; + + if provider_tag != provider_name { + return Err(OrchestratorError::InvalidConfigForProvider( + provider_name.to_string(), + provider_tag.to_string(), + )); + } + let inner = ns.spawn_node_from_json(&raw.inner).await?; + let relay_node = NetworkNode::new( + raw.name, + raw.ws_uri, + raw.prometheus_uri, + raw.multiaddr, + raw.spec, + inner, + ); + nodes.push(relay_node); + } + + Ok(nodes) +} + +async fn recreate_relaychain_from_json( + zombie_json: &serde_json::Value, + ns: DynNamespace, + provider_name: &str, +) -> Result<(Relaychain, NetworkSpec), OrchestratorError> { + let relay_json = zombie_json + .get("relay") + .ok_or(OrchestratorError::InvalidConfig( + "Missing `relay` field in zombie.json".into(), + ))? + .clone(); + + let mut relay_raw: RawRelaychain = serde_json::from_value(relay_json)?; + + let initial_spec: NetworkSpec = serde_json::from_value( + zombie_json + .get("initial_spec") + .ok_or(OrchestratorError::InvalidConfig( + "Missing `initial_spec` field in zombie.json".into(), + ))? + .clone(), + )?; + + // Populate relay nodes + let nodes = + recreate_network_nodes_from_json(&relay_raw.nodes, ns.clone(), provider_name).await?; + relay_raw.inner.nodes = nodes; + + Ok((relay_raw.inner, initial_spec)) +} + +async fn recreate_parachains_from_json( + zombie_json: &serde_json::Value, + ns: DynNamespace, + provider_name: &str, +) -> Result>, OrchestratorError> { + let paras_json = zombie_json + .get("parachains") + .ok_or(OrchestratorError::InvalidConfig( + "Missing `parachains` field in zombie.json".into(), + ))? + .clone(); + + let raw_paras: HashMap> = serde_json::from_value(paras_json)?; + + let mut parachains_map = HashMap::new(); + + for (id, parachain_entries) in raw_paras { + let mut parsed_vec = Vec::with_capacity(parachain_entries.len()); + + for raw_para in parachain_entries { + let mut para = raw_para.inner; + para.collators = + recreate_network_nodes_from_json(&raw_para.collators, ns.clone(), provider_name) + .await?; + parsed_vec.push(para); + } + + parachains_map.insert(id, parsed_vec); + } + + Ok(parachains_map) +} + +// Split the node list depending if it's bootnode or not +// NOTE: if there isn't a bootnode declared we use the first one +fn split_nodes_by_bootnodes( + nodes: &[NodeSpec], + no_default_bootnodes: bool, +) -> (Vec<&NodeSpec>, Vec<&NodeSpec>) { + // get the bootnodes to spawn first and calculate the bootnode string for use later + let mut bootnodes = vec![]; + let mut other_nodes = vec![]; + nodes.iter().for_each(|node| { + if node.is_bootnode { + bootnodes.push(node) + } else { + other_nodes.push(node) + } + }); + + if bootnodes.is_empty() && !no_default_bootnodes { + bootnodes.push(other_nodes.remove(0)) + } + + (bootnodes, other_nodes) +} + +// Generate a bootnode multiaddress and return as string +fn generate_bootnode_addr( + node: &NetworkNode, + ip: &IpAddr, + port: u16, +) -> Result { + generators::generate_node_bootnode_addr( + &node.spec.peer_id, + ip, + port, + node.inner.args().as_ref(), + &node.spec.p2p_cert_hash, + ) +} +// Validate that the config fulfill all the requirements of the provider +fn validate_spec_with_provider_capabilities( + network_spec: &NetworkSpec, + capabilities: &ProviderCapabilities, +) -> Result<(), anyhow::Error> { + let mut errs: Vec = vec![]; + + if capabilities.requires_image { + // Relaychain + if network_spec.relaychain.default_image.is_none() { + // we should check if each node have an image + let nodes = &network_spec.relaychain.nodes; + if nodes.iter().any(|node| node.image.is_none()) { + errs.push(String::from( + "Missing image for node, and not default is set at relaychain", + )); + } + }; + + // Paras + for para in &network_spec.parachains { + if para.default_image.is_none() { + let nodes = ¶.collators; + if nodes.iter().any(|node| node.image.is_none()) { + errs.push(format!( + "Missing image for node, and not default is set at parachain {}", + para.id + )); + } + } + } + } else { + // native + // We need to get all the `cmds` and verify if are part of the path + let mut cmds: HashSet<&str> = Default::default(); + if let Some(cmd) = network_spec.relaychain.default_command.as_ref() { + cmds.insert(cmd.as_str()); + } + for node in network_spec.relaychain().nodes.iter() { + cmds.insert(node.command()); + } + + // Paras + for para in &network_spec.parachains { + if let Some(cmd) = para.default_command.as_ref() { + cmds.insert(cmd.as_str()); + } + + for node in para.collators.iter() { + cmds.insert(node.command()); + } + } + + // now check the binaries + let path = std::env::var("PATH").unwrap_or_default(); // path should always be set + trace!("current PATH: {path}"); + let parts: Vec<_> = path.split(":").collect(); + for cmd in cmds { + let missing = if cmd.contains('/') { + trace!("checking {cmd}"); + if std::fs::metadata(cmd).is_err() { + true + } else { + info!("🔎 We will use the full path {cmd} to spawn nodes."); + false + } + } else { + // should be in the PATH + !parts.iter().any(|part| { + let path_to = format!("{part}/{cmd}"); + trace!("checking {path_to}"); + let check_result = std::fs::metadata(&path_to); + trace!("result {:?}", check_result); + if check_result.is_ok() { + info!("🔎 We will use the cmd: '{cmd}' at path {path_to} to spawn nodes."); + true + } else { + false + } + }) + }; + + if missing { + errs.push(help_msg(cmd)); + } + } + } + + if !errs.is_empty() { + let msg = errs.join("\n"); + return Err(anyhow::anyhow!(format!("Invalid configuration: \n {msg}"))); + } + + Ok(()) +} + +fn help_msg(cmd: &str) -> String { + match cmd { + "parachain-template-node" | "solochain-template-node" | "minimal-template-node" => { + format!("Missing binary {cmd}, compile by running: \n\tcargo build --package {cmd} --release") + }, + "polkadot" => { + format!("Missing binary {cmd}, compile by running (in the polkadot-sdk repo): \n\t cargo build --locked --release --features fast-runtime --bin {cmd} --bin polkadot-prepare-worker --bin polkadot-execute-worker") + }, + "polkadot-parachain" => { + format!("Missing binary {cmd}, compile by running (in the polkadot-sdk repo): \n\t cargo build --release --locked -p {cmd}-bin --bin {cmd}") + }, + _ => { + format!("Missing binary {cmd}, please compile it.") + }, + } +} + +/// Allow to set the default concurrency through env var `ZOMBIE_SPAWN_CONCURRENCY` +fn spawn_concurrency_from_env() -> Option { + if let Ok(concurrency) = env::var("ZOMBIE_SPAWN_CONCURRENCY") { + concurrency.parse::().ok() + } else { + None + } +} + +fn calculate_concurrency(spec: &NetworkSpec) -> Result<(usize, bool), anyhow::Error> { + let desired_spawn_concurrency = match ( + spawn_concurrency_from_env(), + spec.global_settings.spawn_concurrency(), + ) { + (Some(n), _) => Some(n), + (None, Some(n)) => Some(n), + _ => None, + }; + + let (spawn_concurrency, limited_by_tokens) = + if let Some(spawn_concurrency) = desired_spawn_concurrency { + if spawn_concurrency == 1 { + (1, false) + } else if has_tokens(&serde_json::to_string(spec)?) { + (1, true) + } else { + (spawn_concurrency, false) + } + } else { + // not set + if has_tokens(&serde_json::to_string(spec)?) { + (1, true) + } else { + // use 100 as max concurrency, we can set a max by provider later + (100, false) + } + }; + + Ok((spawn_concurrency, limited_by_tokens)) +} + +/// Build deterministic dependency **levels** among the given nodes. +/// - Only dependencies **between nodes in `nodes`** are considered. +/// - Unknown/out-of-scope references are ignored. +/// - Self-dependencies are ignored. +fn dependency_levels_among<'a>( + nodes: &'a [&'a NodeSpec], +) -> Result>, OrchestratorError> { + let by_name = nodes + .iter() + .map(|n| (n.name.as_str(), *n)) + .collect::>(); + + let mut graph = HashMap::with_capacity(nodes.len()); + let mut indegree = HashMap::with_capacity(nodes.len()); + + for node in nodes { + graph.insert(node.name.as_str(), Vec::new()); + indegree.insert(node.name.as_str(), 0); + } + + // build dependency graph + for &node in nodes { + if let Ok(args_json) = serde_json::to_string(&node.args) { + // collect dependencies + let unique_deps = get_tokens_to_replace(&args_json) + .into_iter() + .filter(|dep| dep != &node.name) + .filter_map(|dep| by_name.get(dep.as_str())) + .map(|&dep_node| dep_node.name.as_str()) + .collect::>(); + + for dep_name in unique_deps { + graph + .get_mut(dep_name) + .expect(&format!("{GRAPH_CONTAINS_DEP} {THIS_IS_A_BUG}")) + .push(node); + *indegree + .get_mut(node.name.as_str()) + .expect(&format!("{INDEGREE_CONTAINS_NAME} {THIS_IS_A_BUG}")) += 1; + } + } + } + + // find all nodes with no dependencies + let mut queue = nodes + .iter() + .filter(|n| { + *indegree + .get(n.name.as_str()) + .expect(&format!("{INDEGREE_CONTAINS_NAME} {THIS_IS_A_BUG}")) + == 0 + }) + .copied() + .collect::>(); + + let mut processed_count = 0; + let mut levels = Vec::new(); + + // Kahn's algorithm + while !queue.is_empty() { + let level_size = queue.len(); + let mut current_level = Vec::with_capacity(level_size); + + for _ in 0..level_size { + let n = queue + .pop_front() + .expect(&format!("{QUEUE_NOT_EMPTY} {THIS_IS_A_BUG}")); + current_level.push(n); + processed_count += 1; + + for &neighbour in graph + .get(n.name.as_str()) + .expect(&format!("{GRAPH_CONTAINS_NAME} {THIS_IS_A_BUG}")) + { + let neighbour_indegree = indegree + .get_mut(neighbour.name.as_str()) + .expect(&format!("{INDEGREE_CONTAINS_NAME} {THIS_IS_A_BUG}")); + *neighbour_indegree -= 1; + + if *neighbour_indegree == 0 { + queue.push_back(neighbour); + } + } + } + + current_level.sort_by_key(|n| &n.name); + levels.push(current_level); + } + + // cycles detected, e.g A -> B -> A + if processed_count != nodes.len() { + return Err(OrchestratorError::InvalidConfig( + "Tokens have cyclical dependencies".to_string(), + )); + } + + Ok(levels) +} + +// TODO: get the fs from `DynNamespace` will make this not needed +// but the FileSystem trait isn't object-safe so we can't pass around +// as `dyn FileSystem`. We can refactor or using some `erase` techniques +// to resolve this and remove this struct +// TODO (Loris): Probably we could have a .scoped(base_dir) method on the +// filesystem itself (the trait), so it will return this and we can move this +// directly to the support crate, it can be useful in the future +#[derive(Clone, Debug)] +pub struct ScopedFilesystem<'a, FS: FileSystem> { + fs: &'a FS, + base_dir: &'a str, +} + +impl<'a, FS: FileSystem> ScopedFilesystem<'a, FS> { + pub fn new(fs: &'a FS, base_dir: &'a str) -> Self { + Self { fs, base_dir } + } + + async fn copy_files(&self, files: Vec<&TransferedFile>) -> Result<(), FileSystemError> { + for file in files { + let full_remote_path = PathBuf::from(format!( + "{}/{}", + self.base_dir, + file.remote_path.to_string_lossy() + )); + trace!("coping file: {file}"); + self.fs + .copy(file.local_path.as_path(), full_remote_path) + .await?; + } + Ok(()) + } + + async fn read(&self, file: impl AsRef) -> Result, FileSystemError> { + let file = file.as_ref(); + + let full_path = if file.is_absolute() { + file.to_owned() + } else { + PathBuf::from(format!("{}/{}", self.base_dir, file.to_string_lossy())) + }; + let content = self.fs.read(full_path).await?; + Ok(content) + } + + async fn read_to_string(&self, file: impl AsRef) -> Result { + let file = file.as_ref(); + + let full_path = if file.is_absolute() { + file.to_owned() + } else { + PathBuf::from(format!("{}/{}", self.base_dir, file.to_string_lossy())) + }; + let content = self.fs.read_to_string(full_path).await?; + Ok(content) + } + + async fn create_dir(&self, path: impl AsRef) -> Result<(), FileSystemError> { + let path = PathBuf::from(format!( + "{}/{}", + self.base_dir, + path.as_ref().to_string_lossy() + )); + self.fs.create_dir(path).await + } + + async fn create_dir_all(&self, path: impl AsRef) -> Result<(), FileSystemError> { + let path = PathBuf::from(format!( + "{}/{}", + self.base_dir, + path.as_ref().to_string_lossy() + )); + self.fs.create_dir_all(path).await + } + + async fn write( + &self, + path: impl AsRef, + contents: impl AsRef<[u8]> + Send, + ) -> Result<(), FileSystemError> { + let path = path.as_ref(); + + let full_path = if path.is_absolute() { + path.to_owned() + } else { + PathBuf::from(format!("{}/{}", self.base_dir, path.to_string_lossy())) + }; + + self.fs.write(full_path, contents).await + } + + /// Get the full_path in the scoped FS + fn full_path(&self, path: impl AsRef) -> PathBuf { + let path = path.as_ref(); + + let full_path = if path.is_absolute() { + path.to_owned() + } else { + PathBuf::from(format!("{}/{}", self.base_dir, path.to_string_lossy())) + }; + + full_path + } +} + +#[derive(Clone, Debug)] +pub enum ZombieRole { + Temp, + Node, + Bootnode, + Collator, + CumulusCollator, + Companion, +} + +// re-exports +pub use network::{AddCollatorOptions, AddNodeOptions}; +pub use network_helper::metrics; +pub use sc_chain_spec; + +#[cfg(test)] +mod tests { + use configuration::{GlobalSettingsBuilder, NetworkConfigBuilder}; + use lazy_static::lazy_static; + use tokio::sync::Mutex; + + use super::*; + + const ENV_KEY: &str = "ZOMBIE_SPAWN_CONCURRENCY"; + // mutex for test that use env + lazy_static! { + static ref ENV_MUTEX: Mutex<()> = Mutex::new(()); + } + + fn set_env(concurrency: Option) { + if let Some(value) = concurrency { + env::set_var(ENV_KEY, value.to_string()); + } else { + env::remove_var(ENV_KEY); + } + } + + fn generate( + with_image: bool, + with_cmd: Option<&'static str>, + ) -> Result> { + NetworkConfigBuilder::new() + .with_relaychain(|r| { + let mut relay = r + .with_chain("rococo-local") + .with_default_command(with_cmd.unwrap_or("polkadot")); + if with_image { + relay = relay.with_default_image("docker.io/parity/polkadot") + } + + relay + .with_validator(|node| node.with_name("alice")) + .with_validator(|node| node.with_name("bob")) + }) + .with_parachain(|p| { + p.with_id(2000).cumulus_based(true).with_collator(|n| { + let node = n + .with_name("collator") + .with_command(with_cmd.unwrap_or("polkadot-parachain")); + if with_image { + node.with_image("docker.io/paritypr/test-parachain") + } else { + node + } + }) + }) + .build() + } + + fn get_node_with_dependencies(name: &str, dependencies: Option>) -> NodeSpec { + let mut spec = NodeSpec { + name: name.to_string(), + ..Default::default() + }; + if let Some(dependencies) = dependencies { + for node in dependencies { + spec.args.push( + format!("{{{{ZOMBIE:{}:someField}}}}", node.name) + .as_str() + .into(), + ); + } + } + spec + } + + fn verify_levels(actual_levels: Vec>, expected_levels: Vec>) { + actual_levels + .iter() + .zip(expected_levels) + .for_each(|(actual_level, expected_level)| { + assert_eq!(actual_level.len(), expected_level.len()); + actual_level + .iter() + .zip(expected_level.iter()) + .for_each(|(node, expected_name)| assert_eq!(node.name, *expected_name)); + }); + } + + #[tokio::test] + async fn valid_config_with_image() { + let network_config = generate(true, None).unwrap(); + let spec = NetworkSpec::from_config(&network_config).await.unwrap(); + let caps = ProviderCapabilities { + requires_image: true, + has_resources: false, + prefix_with_full_path: false, + use_default_ports_in_cmd: false, + }; + + let valid = validate_spec_with_provider_capabilities(&spec, &caps); + assert!(valid.is_ok()) + } + + #[tokio::test] + async fn invalid_config_without_image() { + let network_config = generate(false, None).unwrap(); + let spec = NetworkSpec::from_config(&network_config).await.unwrap(); + let caps = ProviderCapabilities { + requires_image: true, + has_resources: false, + prefix_with_full_path: false, + use_default_ports_in_cmd: false, + }; + + let valid = validate_spec_with_provider_capabilities(&spec, &caps); + assert!(valid.is_err()) + } + + #[tokio::test] + async fn invalid_config_missing_cmd() { + let network_config = generate(false, Some("other")).unwrap(); + let spec = NetworkSpec::from_config(&network_config).await.unwrap(); + let caps = ProviderCapabilities { + requires_image: false, + has_resources: false, + prefix_with_full_path: false, + use_default_ports_in_cmd: false, + }; + + let valid = validate_spec_with_provider_capabilities(&spec, &caps); + assert!(valid.is_err()) + } + + #[tokio::test] + async fn valid_config_present_cmd() { + let network_config = generate(false, Some("cargo")).unwrap(); + let spec = NetworkSpec::from_config(&network_config).await.unwrap(); + let caps = ProviderCapabilities { + requires_image: false, + has_resources: false, + prefix_with_full_path: false, + use_default_ports_in_cmd: false, + }; + + let valid = validate_spec_with_provider_capabilities(&spec, &caps); + println!("{valid:?}"); + assert!(valid.is_ok()) + } + + #[tokio::test] + async fn default_spawn_concurrency() { + let _g = ENV_MUTEX.lock().await; + set_env(None); + let network_config = generate(false, Some("cargo")).unwrap(); + let spec = NetworkSpec::from_config(&network_config).await.unwrap(); + let (concurrency, _) = calculate_concurrency(&spec).unwrap(); + assert_eq!(concurrency, 100); + } + + #[tokio::test] + async fn set_spawn_concurrency() { + let _g = ENV_MUTEX.lock().await; + set_env(None); + + let network_config = generate(false, Some("cargo")).unwrap(); + let mut spec = NetworkSpec::from_config(&network_config).await.unwrap(); + + let global_settings = GlobalSettingsBuilder::new() + .with_spawn_concurrency(4) + .build() + .unwrap(); + + spec.set_global_settings(global_settings); + let (concurrency, limited) = calculate_concurrency(&spec).unwrap(); + assert_eq!(concurrency, 4); + assert!(!limited); + } + + #[tokio::test] + async fn set_spawn_concurrency_but_limited() { + let _g = ENV_MUTEX.lock().await; + set_env(None); + + let network_config = generate(false, Some("cargo")).unwrap(); + let mut spec = NetworkSpec::from_config(&network_config).await.unwrap(); + + let global_settings = GlobalSettingsBuilder::new() + .with_spawn_concurrency(4) + .build() + .unwrap(); + + spec.set_global_settings(global_settings); + let node = spec.relaychain.nodes.first_mut().unwrap(); + node.args + .push("--bootnodes {{ZOMBIE:bob:multiAddress')}}".into()); + let (concurrency, limited) = calculate_concurrency(&spec).unwrap(); + assert_eq!(concurrency, 1); + assert!(limited); + } + + #[tokio::test] + async fn set_spawn_concurrency_from_env() { + let _g = ENV_MUTEX.lock().await; + set_env(Some(10)); + + let network_config = generate(false, Some("cargo")).unwrap(); + let spec = NetworkSpec::from_config(&network_config).await.unwrap(); + let (concurrency, limited) = calculate_concurrency(&spec).unwrap(); + assert_eq!(concurrency, 10); + assert!(!limited); + } + + #[tokio::test] + async fn set_spawn_concurrency_from_env_but_limited() { + let _g = ENV_MUTEX.lock().await; + set_env(Some(12)); + + let network_config = generate(false, Some("cargo")).unwrap(); + let mut spec = NetworkSpec::from_config(&network_config).await.unwrap(); + let node = spec.relaychain.nodes.first_mut().unwrap(); + node.args + .push("--bootnodes {{ZOMBIE:bob:multiAddress')}}".into()); + let (concurrency, limited) = calculate_concurrency(&spec).unwrap(); + assert_eq!(concurrency, 1); + assert!(limited); + } + + #[test] + fn dependency_levels_among_should_work() { + // no nodes + assert!(dependency_levels_among(&[]).unwrap().is_empty()); + + // one node + let alice = get_node_with_dependencies("alice", None); + let nodes = [&alice]; + + let levels = dependency_levels_among(&nodes).unwrap(); + let expected = vec![vec!["alice"]]; + + verify_levels(levels, expected); + + // two independent nodes + let alice = get_node_with_dependencies("alice", None); + let bob = get_node_with_dependencies("bob", None); + let nodes = [&alice, &bob]; + + let levels = dependency_levels_among(&nodes).unwrap(); + let expected = vec![vec!["alice", "bob"]]; + + verify_levels(levels, expected); + + // alice -> bob -> charlie + let alice = get_node_with_dependencies("alice", None); + let bob = get_node_with_dependencies("bob", Some(vec![&alice])); + let charlie = get_node_with_dependencies("charlie", Some(vec![&bob])); + let nodes = [&alice, &bob, &charlie]; + + let levels = dependency_levels_among(&nodes).unwrap(); + let expected = vec![vec!["alice"], vec!["bob"], vec!["charlie"]]; + + verify_levels(levels, expected); + + // ┌─> bob + // alice ──| + // └─> charlie + let alice = get_node_with_dependencies("alice", None); + let bob = get_node_with_dependencies("bob", Some(vec![&alice])); + let charlie = get_node_with_dependencies("charlie", Some(vec![&alice])); + let nodes = [&alice, &bob, &charlie]; + + let levels = dependency_levels_among(&nodes).unwrap(); + let expected = vec![vec!["alice"], vec!["bob", "charlie"]]; + + verify_levels(levels, expected); + + // ┌─> bob ──┐ + // alice ──| ├─> dave + // └─> charlie ┘ + let alice = get_node_with_dependencies("alice", None); + let bob = get_node_with_dependencies("bob", Some(vec![&alice])); + let charlie = get_node_with_dependencies("charlie", Some(vec![&alice])); + let dave = get_node_with_dependencies("dave", Some(vec![&charlie, &bob])); + let nodes = [&alice, &bob, &charlie, &dave]; + + let levels = dependency_levels_among(&nodes).unwrap(); + let expected = vec![vec!["alice"], vec!["bob", "charlie"], vec!["dave"]]; + + verify_levels(levels, expected); + } + + #[test] + fn dependency_levels_among_should_detect_cycles() { + let mut alice = get_node_with_dependencies("alice", None); + let bob = get_node_with_dependencies("bob", Some(vec![&alice])); + alice.args.push("{{ZOMBIE:bob:someField}}".into()); + + assert!(dependency_levels_among(&[&alice, &bob]).is_err()) + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network.rs new file mode 100644 index 00000000..3a0694f5 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network.rs @@ -0,0 +1,842 @@ +pub mod chain_upgrade; +pub mod node; +pub mod relaychain; +pub mod teyrchain; + +use std::{cell::RefCell, collections::HashMap, path::PathBuf, rc::Rc, sync::Arc, time::Duration}; + +use configuration::{ + para_states::{Initial, Running}, + shared::{helpers::generate_unique_node_name_from_names, node::EnvVar}, + types::{Arg, Command, Image, Port, ValidationContext}, + ParachainConfig, ParachainConfigBuilder, RegistrationStrategy, +}; +use provider::{types::TransferedFile, DynNamespace, ProviderError}; +use serde::Serialize; +use support::fs::FileSystem; +use tokio::sync::RwLock; +use tracing::{error, warn}; + +use self::{node::NetworkNode, relaychain::Relaychain, teyrchain::Parachain}; +use crate::{ + generators::chain_spec::ChainSpec, + network_spec::{self, NetworkSpec}, + shared::{ + constants::{NODE_MONITORING_FAILURE_THRESHOLD_SECONDS, NODE_MONITORING_INTERVAL_SECONDS}, + macros, + types::{ChainDefaultContext, RegisterParachainOptions}, + }, + spawner::{self, SpawnNodeCtx}, + ScopedFilesystem, ZombieRole, +}; + +#[derive(Serialize)] +pub struct Network { + #[serde(skip)] + ns: DynNamespace, + #[serde(skip)] + filesystem: T, + relay: Relaychain, + initial_spec: NetworkSpec, + parachains: HashMap>, + #[serde(skip)] + nodes_by_name: HashMap, + #[serde(skip)] + nodes_to_watch: Arc>>, +} + +impl std::fmt::Debug for Network { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Network") + .field("ns", &"ns_skipped") + .field("relay", &self.relay) + .field("initial_spec", &self.initial_spec) + .field("parachains", &self.parachains) + .field("nodes_by_name", &self.nodes_by_name) + .finish() + } +} + +macros::create_add_options!(AddNodeOptions { + chain_spec: Option, + override_eth_key: Option +}); + +macros::create_add_options!(AddCollatorOptions { + chain_spec: Option, + chain_spec_relay: Option, + override_eth_key: Option +}); + +impl Network { + pub(crate) fn new_with_relay( + relay: Relaychain, + ns: DynNamespace, + fs: T, + initial_spec: NetworkSpec, + ) -> Self { + Self { + ns, + filesystem: fs, + relay, + initial_spec, + parachains: Default::default(), + nodes_by_name: Default::default(), + nodes_to_watch: Default::default(), + } + } + + // Pubic API + pub fn ns_name(&self) -> String { + self.ns.name().to_string() + } + + pub fn base_dir(&self) -> Option<&str> { + self.ns.base_dir().to_str() + } + + pub fn relaychain(&self) -> &Relaychain { + &self.relay + } + + // Teardown the network + pub async fn destroy(self) -> Result<(), ProviderError> { + self.ns.destroy().await + } + + /// Add a node to the relaychain + // The new node is added to the running network instance. + /// # Example: + /// ```rust + /// # use provider::NativeProvider; + /// # use support::{fs::local::LocalFileSystem}; + /// # use zombienet_orchestrator::{errors, AddNodeOptions, Orchestrator}; + /// # use configuration::NetworkConfig; + /// # async fn example() -> Result<(), errors::OrchestratorError> { + /// # let provider = NativeProvider::new(LocalFileSystem {}); + /// # let orchestrator = Orchestrator::new(LocalFileSystem {}, provider); + /// # let config = NetworkConfig::load_from_toml("config.toml")?; + /// let mut network = orchestrator.spawn(config).await?; + /// + /// // Create the options to add the new node + /// let opts = AddNodeOptions { + /// rpc_port: Some(9444), + /// is_validator: true, + /// ..Default::default() + /// }; + /// + /// network.add_node("new-node", opts).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn add_node( + &mut self, + name: impl Into, + options: AddNodeOptions, + ) -> Result<(), anyhow::Error> { + let name = generate_unique_node_name_from_names( + name, + &mut self.nodes_by_name.keys().cloned().collect(), + ); + + let relaychain = self.relaychain(); + + let chain_spec_path = if let Some(chain_spec_custom_path) = &options.chain_spec { + chain_spec_custom_path.clone() + } else { + PathBuf::from(format!( + "{}/{}.json", + self.ns.base_dir().to_string_lossy(), + relaychain.chain + )) + }; + + let chain_context = ChainDefaultContext { + default_command: self.initial_spec.relaychain.default_command.as_ref(), + default_image: self.initial_spec.relaychain.default_image.as_ref(), + default_resources: self.initial_spec.relaychain.default_resources.as_ref(), + default_db_snapshot: self.initial_spec.relaychain.default_db_snapshot.as_ref(), + default_args: self.initial_spec.relaychain.default_args.iter().collect(), + }; + + let mut node_spec = network_spec::node::NodeSpec::from_ad_hoc( + &name, + options.into(), + &chain_context, + false, + false, + )?; + + node_spec.available_args_output = Some( + self.initial_spec + .node_available_args_output(&node_spec, self.ns.clone()) + .await?, + ); + + let base_dir = self.ns.base_dir().to_string_lossy(); + let scoped_fs = ScopedFilesystem::new(&self.filesystem, &base_dir); + + let ctx = SpawnNodeCtx { + chain_id: &relaychain.chain_id, + parachain_id: None, + chain: &relaychain.chain, + role: ZombieRole::Node, + ns: &self.ns, + scoped_fs: &scoped_fs, + parachain: None, + bootnodes_addr: &vec![], + wait_ready: true, + nodes_by_name: serde_json::to_value(&self.nodes_by_name)?, + global_settings: &self.initial_spec.global_settings, + }; + + let global_files_to_inject = vec![TransferedFile::new( + chain_spec_path, + PathBuf::from(format!("/cfg/{}.json", relaychain.chain)), + )]; + + let node = spawner::spawn_node(&node_spec, global_files_to_inject, &ctx).await?; + + // TODO: register the new node as validator in the relaychain + // STEPS: + // - check balance of `stash` derivation for validator account + // - call rotate_keys on the new validator + // - call setKeys on the new validator + // if node_spec.is_validator { + // let running_node = self.relay.nodes.first().unwrap(); + // // tx_helper::validator_actions::register(vec![&node], &running_node.ws_uri, None).await?; + // } + + // Let's make sure node is up before adding + node.wait_until_is_up(self.initial_spec.global_settings.network_spawn_timeout()) + .await?; + + // Add node to relaychain data + self.add_running_node(node.clone(), None).await; + + Ok(()) + } + + /// Add a new collator to a parachain + /// + /// NOTE: if more parachains with given id available (rare corner case) + /// then it adds collator to the first parachain + /// + /// # Example: + /// ```rust + /// # use provider::NativeProvider; + /// # use support::{fs::local::LocalFileSystem}; + /// # use zombienet_orchestrator::{errors, AddCollatorOptions, Orchestrator}; + /// # use configuration::NetworkConfig; + /// # async fn example() -> Result<(), anyhow::Error> { + /// # let provider = NativeProvider::new(LocalFileSystem {}); + /// # let orchestrator = Orchestrator::new(LocalFileSystem {}, provider); + /// # let config = NetworkConfig::load_from_toml("config.toml")?; + /// let mut network = orchestrator.spawn(config).await?; + /// + /// let col_opts = AddCollatorOptions { + /// command: Some("polkadot-parachain".try_into()?), + /// ..Default::default() + /// }; + /// + /// network.add_collator("new-col-1", col_opts, 100).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn add_collator( + &mut self, + name: impl Into, + options: AddCollatorOptions, + para_id: u32, + ) -> Result<(), anyhow::Error> { + let name = generate_unique_node_name_from_names( + name, + &mut self.nodes_by_name.keys().cloned().collect(), + ); + let spec = self + .initial_spec + .parachains + .iter() + .find(|para| para.id == para_id) + .ok_or(anyhow::anyhow!(format!("parachain: {para_id} not found!")))?; + let role = if spec.is_cumulus_based { + ZombieRole::CumulusCollator + } else { + ZombieRole::Collator + }; + let chain_context = ChainDefaultContext { + default_command: spec.default_command.as_ref(), + default_image: spec.default_image.as_ref(), + default_resources: spec.default_resources.as_ref(), + default_db_snapshot: spec.default_db_snapshot.as_ref(), + default_args: spec.default_args.iter().collect(), + }; + + let parachain = self + .parachains + .get_mut(¶_id) + .ok_or(anyhow::anyhow!(format!("parachain: {para_id} not found!")))? + .get_mut(0) + .ok_or(anyhow::anyhow!(format!("parachain: {para_id} not found!")))?; + + let base_dir = self.ns.base_dir().to_string_lossy(); + let scoped_fs = ScopedFilesystem::new(&self.filesystem, &base_dir); + + // TODO: we want to still supporting spawn a dedicated bootnode?? + let ctx = SpawnNodeCtx { + chain_id: &self.relay.chain_id, + parachain_id: parachain.chain_id.as_deref(), + chain: &self.relay.chain, + role, + ns: &self.ns, + scoped_fs: &scoped_fs, + parachain: Some(spec), + bootnodes_addr: &vec![], + wait_ready: true, + nodes_by_name: serde_json::to_value(&self.nodes_by_name)?, + global_settings: &self.initial_spec.global_settings, + }; + + let relaychain_spec_path = if let Some(chain_spec_custom_path) = &options.chain_spec_relay { + chain_spec_custom_path.clone() + } else { + PathBuf::from(format!( + "{}/{}.json", + self.ns.base_dir().to_string_lossy(), + self.relay.chain + )) + }; + + let mut global_files_to_inject = vec![TransferedFile::new( + relaychain_spec_path, + PathBuf::from(format!("/cfg/{}.json", self.relay.chain)), + )]; + + let para_chain_spec_local_path = if let Some(para_chain_spec_custom) = &options.chain_spec { + Some(para_chain_spec_custom.clone()) + } else if let Some(para_spec_path) = ¶chain.chain_spec_path { + Some(PathBuf::from(format!( + "{}/{}", + self.ns.base_dir().to_string_lossy(), + para_spec_path.to_string_lossy() + ))) + } else { + None + }; + + if let Some(para_spec_path) = para_chain_spec_local_path { + global_files_to_inject.push(TransferedFile::new( + para_spec_path, + PathBuf::from(format!("/cfg/{para_id}.json")), + )); + } + + let mut node_spec = network_spec::node::NodeSpec::from_ad_hoc( + name, + options.into(), + &chain_context, + true, + spec.is_evm_based, + )?; + + node_spec.available_args_output = Some( + self.initial_spec + .node_available_args_output(&node_spec, self.ns.clone()) + .await?, + ); + + let node = spawner::spawn_node(&node_spec, global_files_to_inject, &ctx).await?; + + // Let's make sure node is up before adding + node.wait_until_is_up(self.initial_spec.global_settings.network_spawn_timeout()) + .await?; + + parachain.collators.push(node.clone()); + self.add_running_node(node, None).await; + + Ok(()) + } + + /// Get a parachain config builder from a running network + /// + /// This allow you to build a new parachain config to be deployed into + /// the running network. + pub fn para_config_builder(&self) -> ParachainConfigBuilder { + let used_ports = self + .nodes_iter() + .map(|node| node.spec()) + .flat_map(|spec| { + [ + spec.ws_port.0, + spec.rpc_port.0, + spec.prometheus_port.0, + spec.p2p_port.0, + ] + }) + .collect(); + + let used_nodes_names = self.nodes_by_name.keys().cloned().collect(); + + // need to inverse logic of generate_unique_para_id + let used_para_ids = self + .parachains + .iter() + .map(|(id, paras)| (*id, paras.len().saturating_sub(1) as u8)) + .collect(); + + let context = ValidationContext { + used_ports, + used_nodes_names, + used_para_ids, + }; + let context = Rc::new(RefCell::new(context)); + + ParachainConfigBuilder::new_with_running(context) + } + + /// Add a new parachain to the running network + /// + /// # Arguments + /// * `para_config` - Parachain configuration to deploy + /// * `custom_relaychain_spec` - Optional path to a custom relaychain spec to use + /// * `custom_parchain_fs_prefix` - Optional prefix to use when artifacts are created + /// + /// + /// # Example: + /// ```rust + /// # use anyhow::anyhow; + /// # use provider::NativeProvider; + /// # use support::{fs::local::LocalFileSystem}; + /// # use zombienet_orchestrator::{errors, AddCollatorOptions, Orchestrator}; + /// # use configuration::NetworkConfig; + /// # async fn example() -> Result<(), anyhow::Error> { + /// # let provider = NativeProvider::new(LocalFileSystem {}); + /// # let orchestrator = Orchestrator::new(LocalFileSystem {}, provider); + /// # let config = NetworkConfig::load_from_toml("config.toml")?; + /// let mut network = orchestrator.spawn(config).await?; + /// let para_config = network + /// .para_config_builder() + /// .with_id(100) + /// .with_default_command("polkadot-parachain") + /// .with_collator(|c| c.with_name("col-100-1")) + /// .build() + /// .map_err(|_e| anyhow!("Building config"))?; + /// + /// network.add_parachain(¶_config, None, None).await?; + /// + /// # Ok(()) + /// # } + /// ``` + pub async fn add_parachain( + &mut self, + para_config: &ParachainConfig, + custom_relaychain_spec: Option, + custom_parchain_fs_prefix: Option, + ) -> Result<(), anyhow::Error> { + let base_dir = self.ns.base_dir().to_string_lossy().to_string(); + let scoped_fs = ScopedFilesystem::new(&self.filesystem, &base_dir); + + let mut global_files_to_inject = vec![]; + + // get relaychain id + let relay_chain_id = if let Some(custom_path) = custom_relaychain_spec { + // use this file as relaychain spec + global_files_to_inject.push(TransferedFile::new( + custom_path.clone(), + PathBuf::from(format!("/cfg/{}.json", self.relaychain().chain)), + )); + let content = std::fs::read_to_string(custom_path)?; + ChainSpec::chain_id_from_spec(&content)? + } else { + global_files_to_inject.push(TransferedFile::new( + PathBuf::from(format!( + "{}/{}", + scoped_fs.base_dir, + self.relaychain().chain_spec_path.to_string_lossy() + )), + PathBuf::from(format!("/cfg/{}.json", self.relaychain().chain)), + )); + self.relay.chain_id.clone() + }; + + let mut para_spec = network_spec::teyrchain::TeyrchainSpec::from_config( + para_config, + relay_chain_id.as_str().try_into()?, + )?; + + let chain_spec_raw_path = para_spec + .build_chain_spec(&relay_chain_id, &self.ns, &scoped_fs) + .await?; + + // Para artifacts + let para_path_prefix = if let Some(custom_prefix) = custom_parchain_fs_prefix { + custom_prefix + } else { + para_spec.id.to_string() + }; + + scoped_fs.create_dir(¶_path_prefix).await?; + // create wasm/state + para_spec + .genesis_state + .build( + chain_spec_raw_path.as_ref(), + format!("{}/genesis-state", ¶_path_prefix), + &self.ns, + &scoped_fs, + None, + ) + .await?; + para_spec + .genesis_wasm + .build( + chain_spec_raw_path.as_ref(), + format!("{}/para_spec-wasm", ¶_path_prefix), + &self.ns, + &scoped_fs, + None, + ) + .await?; + + let parachain = + Parachain::from_spec(¶_spec, &global_files_to_inject, &scoped_fs).await?; + let parachain_id = parachain.chain_id.clone(); + + // Create `ctx` for spawn the nodes + let ctx_para = SpawnNodeCtx { + parachain: Some(¶_spec), + parachain_id: parachain_id.as_deref(), + role: if para_spec.is_cumulus_based { + ZombieRole::CumulusCollator + } else { + ZombieRole::Collator + }, + bootnodes_addr: ¶_config + .bootnodes_addresses() + .iter() + .map(|&a| a.to_string()) + .collect(), + chain_id: &self.relaychain().chain_id, + chain: &self.relaychain().chain, + ns: &self.ns, + scoped_fs: &scoped_fs, + wait_ready: false, + nodes_by_name: serde_json::to_value(&self.nodes_by_name)?, + global_settings: &self.initial_spec.global_settings, + }; + + // Register the parachain to the running network + let first_node_url = self + .relaychain() + .nodes + .first() + .ok_or(anyhow::anyhow!( + "At least one node of the relaychain should be running" + ))? + .ws_uri(); + + if para_config.registration_strategy() == Some(&RegistrationStrategy::UsingExtrinsic) { + let register_para_options = RegisterParachainOptions { + id: parachain.para_id, + // This needs to resolve correctly + wasm_path: para_spec + .genesis_wasm + .artifact_path() + .ok_or(anyhow::anyhow!( + "artifact path for wasm must be set at this point", + ))? + .to_path_buf(), + state_path: para_spec + .genesis_state + .artifact_path() + .ok_or(anyhow::anyhow!( + "artifact path for state must be set at this point", + ))? + .to_path_buf(), + node_ws_url: first_node_url.to_string(), + onboard_as_para: para_spec.onboard_as_parachain, + seed: None, // TODO: Seed is passed by? + finalization: false, + }; + + Parachain::register(register_para_options, &scoped_fs).await?; + } + + // Spawn the nodes + let spawning_tasks = para_spec + .collators + .iter() + .map(|node| spawner::spawn_node(node, parachain.files_to_inject.clone(), &ctx_para)); + + let running_nodes = futures::future::try_join_all(spawning_tasks).await?; + + // Let's make sure nodes are up before adding them + let waiting_tasks = running_nodes.iter().map(|node| { + node.wait_until_is_up(self.initial_spec.global_settings.network_spawn_timeout()) + }); + + let _ = futures::future::try_join_all(waiting_tasks).await?; + + let running_para_id = parachain.para_id; + self.add_para(parachain); + for node in running_nodes { + self.add_running_node(node, Some(running_para_id)).await; + } + + Ok(()) + } + + /// Register a parachain, which has already been added to the network (with manual registration + /// strategy) + /// + /// # Arguments + /// * `para_id` - Parachain Id + /// + /// + /// # Example: + /// ```rust + /// # use anyhow::anyhow; + /// # use provider::NativeProvider; + /// # use support::{fs::local::LocalFileSystem}; + /// # use zombienet_orchestrator::Orchestrator; + /// # use configuration::{NetworkConfig, NetworkConfigBuilder, RegistrationStrategy}; + /// # async fn example() -> Result<(), anyhow::Error> { + /// # let provider = NativeProvider::new(LocalFileSystem {}); + /// # let orchestrator = Orchestrator::new(LocalFileSystem {}, provider); + /// # let config = NetworkConfigBuilder::new() + /// # .with_relaychain(|r| { + /// # r.with_chain("rococo-local") + /// # .with_default_command("polkadot") + /// # .with_node(|node| node.with_name("alice")) + /// # }) + /// # .with_parachain(|p| { + /// # p.with_id(100) + /// # .with_registration_strategy(RegistrationStrategy::Manual) + /// # .with_default_command("test-parachain") + /// # .with_collator(|n| n.with_name("dave").validator(false)) + /// # }) + /// # .build() + /// # .map_err(|_e| anyhow!("Building config"))?; + /// let mut network = orchestrator.spawn(config).await?; + /// + /// network.register_parachain(100).await?; + /// + /// # Ok(()) + /// # } + /// ``` + pub async fn register_parachain(&mut self, para_id: u32) -> Result<(), anyhow::Error> { + let para = self + .initial_spec + .parachains + .iter() + .find(|p| p.id == para_id) + .ok_or(anyhow::anyhow!( + "no parachain with id = {para_id} available", + ))?; + let para_genesis_config = para.get_genesis_config()?; + let first_node_url = self + .relaychain() + .nodes + .first() + .ok_or(anyhow::anyhow!( + "At least one node of the relaychain should be running" + ))? + .ws_uri(); + let register_para_options: RegisterParachainOptions = RegisterParachainOptions { + id: para_id, + // This needs to resolve correctly + wasm_path: para_genesis_config.wasm_path.clone(), + state_path: para_genesis_config.state_path.clone(), + node_ws_url: first_node_url.to_string(), + onboard_as_para: para_genesis_config.as_parachain, + seed: None, // TODO: Seed is passed by? + finalization: false, + }; + let base_dir = self.ns.base_dir().to_string_lossy().to_string(); + let scoped_fs = ScopedFilesystem::new(&self.filesystem, &base_dir); + Parachain::register(register_para_options, &scoped_fs).await?; + + Ok(()) + } + + // deregister and stop the collator? + // remove_parachain() + + pub fn get_node(&self, name: impl Into) -> Result<&NetworkNode, anyhow::Error> { + let name = name.into(); + if let Some(node) = self.nodes_iter().find(|&n| n.name == name) { + return Ok(node); + } + + let list = self + .nodes_iter() + .map(|n| &n.name) + .cloned() + .collect::>() + .join(", "); + + Err(anyhow::anyhow!( + "can't find node with name: {name:?}, should be one of {list}" + )) + } + + pub fn get_node_mut( + &mut self, + name: impl Into, + ) -> Result<&mut NetworkNode, anyhow::Error> { + let name = name.into(); + self.nodes_iter_mut() + .find(|n| n.name == name) + .ok_or(anyhow::anyhow!("can't find node with name: {name:?}")) + } + + pub fn nodes(&self) -> Vec<&NetworkNode> { + self.nodes_by_name.values().collect::>() + } + + pub async fn detach(&self) { + self.ns.detach().await + } + + // Internal API + pub(crate) async fn add_running_node(&mut self, node: NetworkNode, para_id: Option) { + if let Some(para_id) = para_id { + if let Some(para) = self.parachains.get_mut(¶_id).and_then(|p| p.get_mut(0)) { + para.collators.push(node.clone()); + } else { + // is the first node of the para, let create the entry + unreachable!() + } + } else { + self.relay.nodes.push(node.clone()); + } + // TODO: we should hold a ref to the node in the vec in the future. + node.set_is_running(true); + let node_name = node.name.clone(); + self.nodes_by_name.insert(node_name, node.clone()); + self.nodes_to_watch.write().await.push(node); + } + + pub(crate) fn add_para(&mut self, para: Parachain) { + self.parachains.entry(para.para_id).or_default().push(para); + } + + pub fn name(&self) -> &str { + self.ns.name() + } + + /// Get a first parachain from the list of the parachains with specified id. + /// NOTE! + /// Usually the list will contain only one parachain. + /// Multiple parachains with the same id is a corner case. + /// If this is the case then one can get such parachain with + /// `parachain_by_unique_id()` method + /// + /// # Arguments + /// * `para_id` - Parachain Id + pub fn parachain(&self, para_id: u32) -> Option<&Parachain> { + self.parachains.get(¶_id)?.first() + } + + /// Get a parachain by its unique id. + /// + /// This is particularly useful if there are multiple parachains + /// with the same id (this is a rare corner case). + /// + /// # Arguments + /// * `unique_id` - unique id of the parachain + pub fn parachain_by_unique_id(&self, unique_id: impl AsRef) -> Option<&Parachain> { + self.parachains + .values() + .flat_map(|p| p.iter()) + .find(|p| p.unique_id == unique_id.as_ref()) + } + + pub fn parachains(&self) -> Vec<&Parachain> { + self.parachains.values().flatten().collect() + } + + pub(crate) fn nodes_iter(&self) -> impl Iterator { + self.relay.nodes.iter().chain( + self.parachains + .values() + .flat_map(|p| p.iter()) + .flat_map(|p| &p.collators), + ) + } + + pub(crate) fn nodes_iter_mut(&mut self) -> impl Iterator { + self.relay.nodes.iter_mut().chain( + self.parachains + .values_mut() + .flat_map(|p| p.iter_mut()) + .flat_map(|p| &mut p.collators), + ) + } + + /// Waits given number of seconds until all nodes in the network report that they are + /// up and running. + /// + /// # Arguments + /// * `timeout_secs` - The number of seconds to wait. + /// + /// # Returns + /// * `Ok()` if the node is up before timeout occured. + /// * `Err(e)` if timeout or other error occurred while waiting. + pub async fn wait_until_is_up(&self, timeout_secs: u64) -> Result<(), anyhow::Error> { + let handles = self + .nodes_iter() + .map(|node| node.wait_until_is_up(timeout_secs)); + + futures::future::try_join_all(handles).await?; + + Ok(()) + } + + pub(crate) fn spawn_watching_task(&self) { + let nodes_to_watch = Arc::clone(&self.nodes_to_watch); + let ns = Arc::clone(&self.ns); + + tokio::spawn(async move { + loop { + tokio::time::sleep(Duration::from_secs(NODE_MONITORING_INTERVAL_SECONDS)).await; + + let all_running = { + let guard = nodes_to_watch.read().await; + let nodes = guard.iter().filter(|n| n.is_running()).collect::>(); + + let all_running = + futures::future::try_join_all(nodes.iter().map(|n| { + n.wait_until_is_up(NODE_MONITORING_FAILURE_THRESHOLD_SECONDS) + })) + .await; + + // Re-check `is_running` to make sure we don't kill the network unnecessarily + if nodes.iter().any(|n| !n.is_running()) { + continue; + } else { + all_running + } + }; + + if let Err(e) = all_running { + warn!("\n\t🧟 One of the nodes crashed: {e}. tearing the network down..."); + + if let Err(e) = ns.destroy().await { + error!("an error occurred during network teardown: {}", e); + } + + std::process::exit(1); + } + } + }); + } + + pub(crate) fn set_parachains(&mut self, parachains: HashMap>) { + self.parachains = parachains; + } + + pub(crate) fn insert_node(&mut self, node: NetworkNode) { + self.nodes_by_name.insert(node.name.clone(), node); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network/chain_upgrade.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network/chain_upgrade.rs new file mode 100644 index 00000000..bfa9a474 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network/chain_upgrade.rs @@ -0,0 +1,41 @@ +use std::str::FromStr; + +use anyhow::anyhow; +use async_trait::async_trait; +use pezkuwi_subxt_signer::{sr25519::Keypair, SecretUri}; + +use super::node::NetworkNode; +use crate::{shared::types::RuntimeUpgradeOptions, tx_helper}; + +#[async_trait] +pub trait ChainUpgrade { + /// Perform a runtime upgrade (with sudo) + /// + /// This call 'System.set_code_without_checks' wrapped in + /// 'Sudo.sudo_unchecked_weight' + async fn runtime_upgrade(&self, options: RuntimeUpgradeOptions) -> Result<(), anyhow::Error>; + + /// Perform a runtime upgrade (with sudo), inner call with the node pass as arg. + /// + /// This call 'System.set_code_without_checks' wrapped in + /// 'Sudo.sudo_unchecked_weight' + async fn perform_runtime_upgrade( + &self, + node: &NetworkNode, + options: RuntimeUpgradeOptions, + ) -> Result<(), anyhow::Error> { + let sudo = if let Some(possible_seed) = options.seed { + Keypair::from_secret_key(possible_seed) + .map_err(|_| anyhow!("seed should return a Keypair"))? + } else { + let uri = SecretUri::from_str("//Alice")?; + Keypair::from_uri(&uri).map_err(|_| anyhow!("'//Alice' should return a Keypair"))? + }; + + let wasm_data = options.wasm.get_asset().await?; + + tx_helper::runtime_upgrade::upgrade(node, &wasm_data, &sudo).await?; + + Ok(()) + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network/node.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network/node.rs new file mode 100644 index 00000000..901a072d --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network/node.rs @@ -0,0 +1,1176 @@ +use std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; + +use anyhow::anyhow; +use fancy_regex::Regex; +use glob_match::glob_match; +use pezkuwi_subxt::{backend::rpc::RpcClient, OnlineClient}; +use prom_metrics_parser::MetricMap; +use provider::DynNode; +use serde::{Deserialize, Serialize, Serializer}; +use support::net::{skip_err_while_waiting, wait_ws_ready}; +use thiserror::Error; +use tokio::sync::RwLock; +use tracing::{debug, trace}; + +use crate::{network_spec::node::NodeSpec, tx_helper::client::get_client_from_url}; + +type BoxedClosure = Box Result + Send + Sync>; + +#[derive(Error, Debug)] +pub enum NetworkNodeError { + #[error("metric '{0}' not found!")] + MetricNotFound(String), +} + +#[derive(Clone, Serialize)] +pub struct NetworkNode { + #[serde(serialize_with = "serialize_provider_node")] + pub(crate) inner: DynNode, + // TODO: do we need the full spec here? + // Maybe a reduce set of values. + pub(crate) spec: NodeSpec, + pub(crate) name: String, + pub(crate) ws_uri: String, + pub(crate) multiaddr: String, + pub(crate) prometheus_uri: String, + #[serde(skip)] + metrics_cache: Arc>, + #[serde(skip)] + is_running: Arc, +} + +#[derive(Deserialize)] +pub(crate) struct RawNetworkNode { + pub(crate) name: String, + pub(crate) ws_uri: String, + pub(crate) prometheus_uri: String, + pub(crate) multiaddr: String, + pub(crate) spec: NodeSpec, + pub(crate) inner: serde_json::Value, +} + +/// Result of waiting for a certain number of log lines to appear. +/// +/// Indicates whether the log line count condition was met within the timeout period. +/// +/// # Variants +/// - `TargetReached(count)` – The predicate condition was satisfied within the timeout. +/// * `count`: The number of matching log lines at the time of satisfaction. +/// - `TargetFailed(count)` – The condition was not met within the timeout. +/// * `count`: The final number of matching log lines at timeout expiration. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum LogLineCount { + TargetReached(u32), + TargetFailed(u32), +} + +impl LogLineCount { + pub fn success(&self) -> bool { + match self { + Self::TargetReached(..) => true, + Self::TargetFailed(..) => false, + } + } +} + +/// Configuration for controlling log line count waiting behavior. +/// +/// Allows specifying a custom predicate on the number of matching log lines, +/// a timeout in seconds, and whether the system should wait the entire timeout duration. +/// +/// # Fields +/// - `predicate`: A function that takes the current number of matching lines and +/// returns `true` if the condition is satisfied. +/// - `timeout_secs`: Maximum number of seconds to wait. +/// - `wait_until_timeout_elapses`: If `true`, the system will continue waiting +/// for the full timeout duration, even if the condition is already met early. +/// Useful when you need to verify sustained absence or stability (e.g., "ensure no new logs appear"). +#[derive(Clone)] +pub struct LogLineCountOptions { + pub predicate: Arc bool + Send + Sync>, + pub timeout: Duration, + pub wait_until_timeout_elapses: bool, +} + +impl LogLineCountOptions { + pub fn new( + predicate: impl Fn(u32) -> bool + 'static + Send + Sync, + timeout: Duration, + wait_until_timeout_elapses: bool, + ) -> Self { + Self { + predicate: Arc::new(predicate), + timeout, + wait_until_timeout_elapses, + } + } + + pub fn no_occurences_within_timeout(timeout: Duration) -> Self { + Self::new(|n| n == 0, timeout, true) + } +} + +// #[derive(Clone, Debug)] +// pub struct QueryMetricOptions { +// use_cache: bool, +// treat_not_found_as_zero: bool, +// } + +// impl Default for QueryMetricOptions { +// fn default() -> Self { +// Self { use_cache: false, treat_not_found_as_zero: true } +// } +// } + +impl NetworkNode { + /// Create a new NetworkNode + pub(crate) fn new>( + name: T, + ws_uri: T, + prometheus_uri: T, + multiaddr: T, + spec: NodeSpec, + inner: DynNode, + ) -> Self { + Self { + name: name.into(), + ws_uri: ws_uri.into(), + prometheus_uri: prometheus_uri.into(), + inner, + spec, + multiaddr: multiaddr.into(), + metrics_cache: Arc::new(Default::default()), + is_running: Arc::new(AtomicBool::new(false)), + } + } + + pub(crate) fn is_running(&self) -> bool { + self.is_running.load(Ordering::Acquire) + } + + pub(crate) fn set_is_running(&self, is_running: bool) { + self.is_running.store(is_running, Ordering::Release); + } + + pub(crate) fn set_multiaddr(&mut self, multiaddr: impl Into) { + self.multiaddr = multiaddr.into(); + } + + pub fn name(&self) -> &str { + &self.name + } + + pub fn args(&self) -> Vec<&str> { + self.inner.args() + } + + pub fn spec(&self) -> &NodeSpec { + &self.spec + } + + pub fn ws_uri(&self) -> &str { + &self.ws_uri + } + + pub fn multiaddr(&self) -> &str { + self.multiaddr.as_ref() + } + + // Subxt + + /// Get the rpc client for the node + pub async fn rpc(&self) -> Result { + get_client_from_url(&self.ws_uri).await + } + + /// Get the [online client](subxt::client::OnlineClient) for the node + #[deprecated = "Use `wait_client` instead."] + pub async fn client( + &self, + ) -> Result, pezkuwi_subxt::Error> { + self.try_client().await + } + + /// Try to connect to the node. + /// + /// Most of the time you only want to use [`NetworkNode::wait_client`] that waits for + /// the node to appear before it connects to it. This function directly tries + /// to connect to the node and returns an error if the node is not yet available + /// at that point in time. + /// + /// Returns a [`OnlineClient`] on success. + pub async fn try_client( + &self, + ) -> Result, pezkuwi_subxt::Error> { + get_client_from_url(&self.ws_uri).await + } + + /// Wait until get the [online client](subxt::client::OnlineClient) for the node + pub async fn wait_client( + &self, + ) -> Result, anyhow::Error> { + debug!("wait_client ws_uri: {}", self.ws_uri()); + wait_ws_ready(self.ws_uri()) + .await + .map_err(|e| anyhow!("Error awaiting http_client to ws be ready, err: {e}"))?; + + self.try_client() + .await + .map_err(|e| anyhow!("Can't create a subxt client, err: {e}")) + } + + /// Wait until get the [online client](subxt::client::OnlineClient) for the node with a defined timeout + pub async fn wait_client_with_timeout( + &self, + timeout_secs: impl Into, + ) -> Result, anyhow::Error> { + debug!("waiting until subxt client is ready"); + tokio::time::timeout( + Duration::from_secs(timeout_secs.into()), + self.wait_client::(), + ) + .await? + } + + // Commands + + /// Pause the node, this is implemented by pausing the + /// actual process (e.g polkadot) with sending `SIGSTOP` signal + /// + /// Note: If you're using this method with the native provider on the attached network, the live network has to be running + /// with global setting `teardown_on_failure` disabled. + pub async fn pause(&self) -> Result<(), anyhow::Error> { + self.set_is_running(false); + self.inner.pause().await?; + Ok(()) + } + + /// Resume the node, this is implemented by resuming the + /// actual process (e.g polkadot) with sending `SIGCONT` signal + /// + /// Note: If you're using this method with the native provider on the attached network, the live network has to be running + /// with global setting `teardown_on_failure` disabled. + pub async fn resume(&self) -> Result<(), anyhow::Error> { + self.set_is_running(true); + self.inner.resume().await?; + Ok(()) + } + + /// Restart the node using the same `cmd`, `args` and `env` (and same isolated dir) + /// + /// Note: If you're using this method with the native provider on the attached network, the live network has to be running + /// with global setting `teardown_on_failure` disabled. + pub async fn restart(&self, after: Option) -> Result<(), anyhow::Error> { + self.set_is_running(false); + self.inner.restart(after).await?; + self.set_is_running(true); + Ok(()) + } + + // Metrics assertions + + /// Get metric value 'by name' from Prometheus (exposed by the node) + /// metric name can be: + /// with prefix (e.g: 'polkadot_') + /// with chain attribute (e.g: 'chain=rococo-local') + /// without prefix and/or without chain attribute + pub async fn reports(&self, metric_name: impl Into) -> Result { + let metric_name = metric_name.into(); + // force cache reload + self.fetch_metrics().await?; + // by default we treat not found as 0 (same in v1) + self.metric(&metric_name, true).await + } + + /// Assert on a metric value 'by name' from Prometheus (exposed by the node) + /// metric name can be: + /// with prefix (e.g: 'polkadot_') + /// with chain attribute (e.g: 'chain=rococo-local') + /// without prefix and/or without chain attribute + /// + /// We first try to assert on the value using the cached metrics and + /// if not meet the criteria we reload the cache and check again + pub async fn assert( + &self, + metric_name: impl Into, + value: impl Into, + ) -> Result { + let value: f64 = value.into(); + self.assert_with(metric_name, |v| v == value).await + } + + /// Assert on a metric value using a given predicate. + /// See [`NetworkNode::reports`] description for details on metric name. + pub async fn assert_with( + &self, + metric_name: impl Into, + predicate: impl Fn(f64) -> bool, + ) -> Result { + let metric_name = metric_name.into(); + // reload metrics + self.fetch_metrics().await?; + let val = self.metric(&metric_name, true).await?; + trace!("🔎 Current value {val} passed to the predicated?"); + Ok(predicate(val)) + } + + // Wait methods for metrics + + /// Wait until a metric value pass the `predicate` + pub async fn wait_metric( + &self, + metric_name: impl Into, + predicate: impl Fn(f64) -> bool, + ) -> Result<(), anyhow::Error> { + let metric_name = metric_name.into(); + debug!("waiting until metric {metric_name} pass the predicate"); + loop { + let res = self.assert_with(&metric_name, &predicate).await; + match res { + Ok(res) => { + if res { + return Ok(()); + } + }, + Err(e) => match e.downcast::() { + Ok(io_err) => { + if !skip_err_while_waiting(&io_err) { + return Err(io_err.into()); + } + }, + Err(other) => { + match other.downcast::() { + Ok(node_err) => { + if !matches!(node_err, NetworkNodeError::MetricNotFound(_)) { + return Err(node_err.into()); + } + }, + Err(other) => return Err(other), + }; + }, + }, + } + + // sleep to not spam prometheus + tokio::time::sleep(Duration::from_secs(1)).await; + } + } + + /// Wait until a metric value pass the `predicate` + /// with a timeout (secs) + pub async fn wait_metric_with_timeout( + &self, + metric_name: impl Into, + predicate: impl Fn(f64) -> bool, + timeout_secs: impl Into, + ) -> Result<(), anyhow::Error> { + let metric_name = metric_name.into(); + let secs = timeout_secs.into(); + debug!("waiting until metric {metric_name} pass the predicate"); + let res = tokio::time::timeout( + Duration::from_secs(secs), + self.wait_metric(&metric_name, predicate), + ) + .await; + + if let Ok(inner_res) = res { + match inner_res { + Ok(_) => Ok(()), + Err(e) => Err(anyhow!("Error waiting for metric: {e}")), + } + } else { + // timeout + Err(anyhow!( + "Timeout ({secs}), waiting for metric {metric_name} pass the predicate" + )) + } + } + + // Logs + + /// Get the logs of the node + /// TODO: do we need the `since` param, maybe we could be handy later for loop filtering + pub async fn logs(&self) -> Result { + Ok(self.inner.logs().await?) + } + + /// Wait until a the number of matching log lines is reach + pub async fn wait_log_line_count( + &self, + pattern: impl Into, + is_glob: bool, + count: usize, + ) -> Result<(), anyhow::Error> { + let pattern = pattern.into(); + let pattern_clone = pattern.clone(); + debug!("waiting until we find pattern {pattern} {count} times"); + let match_fn: BoxedClosure = if is_glob { + Box::new(move |line: &str| Ok(glob_match(&pattern, line))) + } else { + let re = Regex::new(&pattern)?; + Box::new(move |line: &str| re.is_match(line).map_err(|e| anyhow!(e.to_string()))) + }; + + loop { + let mut q = 0_usize; + let logs = self.logs().await?; + for line in logs.lines() { + trace!("line is {line}"); + if match_fn(line)? { + trace!("pattern {pattern_clone} match in line {line}"); + q += 1; + if q >= count { + return Ok(()); + } + } + } + + tokio::time::sleep(Duration::from_secs(2)).await; + } + } + + /// Waits until the number of matching log lines satisfies a custom condition, + /// optionally waiting for the entire duration of the timeout. + /// + /// This method searches log lines for a given substring or glob pattern, + /// and evaluates the number of matching lines using a user-provided predicate function. + /// Optionally, it can wait for the full timeout duration to ensure the condition + /// holds consistently (e.g., for verifying absence of logs). + /// + /// # Arguments + /// * `substring` - The substring or pattern to match within log lines. + /// * `is_glob` - Whether to treat `substring` as a glob pattern (`true`) or a regex (`false`). + /// * `options` - Configuration for timeout, match count predicate, and full-duration waiting. + /// + /// # Returns + /// * `Ok(LogLineCount::TargetReached(n))` if the predicate was satisfied within the timeout, + /// * `Ok(LogLineCount::TargetFails(n))` if the predicate was not satisfied in time, + /// * `Err(e)` if an error occurred during log retrieval or matching. + /// + /// # Example + /// ```rust + /// # use std::{sync::Arc, time::Duration}; + /// # use provider::NativeProvider; + /// # use support::{fs::local::LocalFileSystem}; + /// # use zombienet_orchestrator::{Orchestrator, network::node::{NetworkNode, LogLineCountOptions}}; + /// # use configuration::NetworkConfig; + /// # async fn example() -> Result<(), anyhow::Error> { + /// # let provider = NativeProvider::new(LocalFileSystem {}); + /// # let orchestrator = Orchestrator::new(LocalFileSystem {}, provider); + /// # let config = NetworkConfig::load_from_toml("config.toml")?; + /// # let network = orchestrator.spawn(config).await?; + /// let node = network.get_node("alice")?; + /// // Wait (up to 10 seconds) until pattern occurs once + /// let options = LogLineCountOptions { + /// predicate: Arc::new(|count| count == 1), + /// timeout: Duration::from_secs(10), + /// wait_until_timeout_elapses: false, + /// }; + /// let result = node + /// .wait_log_line_count_with_timeout("error", false, options) + /// .await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn wait_log_line_count_with_timeout( + &self, + substring: impl Into, + is_glob: bool, + options: LogLineCountOptions, + ) -> Result { + let substring = substring.into(); + debug!( + "waiting until match lines count within {} seconds", + options.timeout.as_secs_f64() + ); + + let start = tokio::time::Instant::now(); + + let match_fn: BoxedClosure = if is_glob { + Box::new(move |line: &str| Ok(glob_match(&substring, line))) + } else { + let re = Regex::new(&substring)?; + Box::new(move |line: &str| re.is_match(line).map_err(|e| anyhow!(e.to_string()))) + }; + + if options.wait_until_timeout_elapses { + tokio::time::sleep(options.timeout).await; + } + + let mut q; + loop { + q = 0_u32; + let logs = self.logs().await?; + for line in logs.lines() { + if match_fn(line)? { + q += 1; + + // If `wait_until_timeout_elapses` is set then check the condition just once at the + // end after the whole log file is processed. This is to address the cases when the + // predicate becomes true and false again. + // eg. expected exactly 2 matching lines are expected but 3 are present + if !options.wait_until_timeout_elapses && (options.predicate)(q) { + return Ok(LogLineCount::TargetReached(q)); + } + } + } + + if start.elapsed() >= options.timeout { + break; + } + + tokio::time::sleep(Duration::from_secs(2)).await; + } + + if (options.predicate)(q) { + Ok(LogLineCount::TargetReached(q)) + } else { + Ok(LogLineCount::TargetFailed(q)) + } + } + + async fn fetch_metrics(&self) -> Result<(), anyhow::Error> { + let response = reqwest::get(&self.prometheus_uri).await?; + let metrics = prom_metrics_parser::parse(&response.text().await?)?; + let mut cache = self.metrics_cache.write().await; + *cache = metrics; + Ok(()) + } + + /// Query individual metric by name + async fn metric( + &self, + metric_name: &str, + treat_not_found_as_zero: bool, + ) -> Result { + let mut metrics_map = self.metrics_cache.read().await; + if metrics_map.is_empty() { + // reload metrics + drop(metrics_map); + self.fetch_metrics().await?; + metrics_map = self.metrics_cache.read().await; + } + + if let Some(val) = metrics_map.get(metric_name) { + Ok(*val) + } else if treat_not_found_as_zero { + Ok(0_f64) + } else { + Err(NetworkNodeError::MetricNotFound(metric_name.into()).into()) + } + } + + /// Waits given number of seconds until node reports that it is up and running, which + /// is determined by metric 'process_start_time_seconds', which should appear, + /// when node finished booting up. + /// + /// + /// # Arguments + /// * `timeout_secs` - The number of seconds to wait. + /// + /// # Returns + /// * `Ok()` if the node is up before timeout occured. + /// * `Err(e)` if timeout or other error occurred while waiting. + pub async fn wait_until_is_up( + &self, + timeout_secs: impl Into, + ) -> Result<(), anyhow::Error> { + self.wait_metric_with_timeout("process_start_time_seconds", |b| b >= 1.0, timeout_secs) + .await + .map_err(|err| anyhow::anyhow!("{}: {:?}", self.name(), err)) + } +} + +impl std::fmt::Debug for NetworkNode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("NetworkNode") + .field("inner", &"inner_skipped") + .field("spec", &self.spec) + .field("name", &self.name) + .field("ws_uri", &self.ws_uri) + .field("prometheus_uri", &self.prometheus_uri) + .finish() + } +} + +fn serialize_provider_node(node: &DynNode, serializer: S) -> Result +where + S: Serializer, +{ + erased_serde::serialize(node.as_ref(), serializer) +} + +// TODO: mock and impl more unit tests +#[cfg(test)] +mod tests { + use std::{ + path::{Path, PathBuf}, + sync::{Arc, Mutex}, + }; + + use async_trait::async_trait; + use provider::{types::*, ProviderError, ProviderNode}; + + use super::*; + + #[derive(Serialize)] + struct MockNode { + logs: Arc>>, + } + + impl MockNode { + fn new() -> Self { + Self { + logs: Arc::new(Mutex::new(vec![])), + } + } + + fn logs_push(&self, lines: Vec>) { + self.logs + .lock() + .unwrap() + .extend(lines.into_iter().map(|l| l.into())); + } + } + + #[async_trait] + impl ProviderNode for MockNode { + fn name(&self) -> &str { + todo!() + } + + fn args(&self) -> Vec<&str> { + todo!() + } + + fn base_dir(&self) -> &PathBuf { + todo!() + } + + fn config_dir(&self) -> &PathBuf { + todo!() + } + + fn data_dir(&self) -> &PathBuf { + todo!() + } + + fn relay_data_dir(&self) -> &PathBuf { + todo!() + } + + fn scripts_dir(&self) -> &PathBuf { + todo!() + } + + fn log_path(&self) -> &PathBuf { + todo!() + } + + fn log_cmd(&self) -> String { + todo!() + } + + fn path_in_node(&self, _file: &Path) -> PathBuf { + todo!() + } + + async fn logs(&self) -> Result { + Ok(self.logs.lock().unwrap().join("\n")) + } + + async fn dump_logs(&self, _local_dest: PathBuf) -> Result<(), ProviderError> { + todo!() + } + + async fn run_command( + &self, + _options: RunCommandOptions, + ) -> Result { + todo!() + } + + async fn run_script( + &self, + _options: RunScriptOptions, + ) -> Result { + todo!() + } + + async fn send_file( + &self, + _local_file_path: &Path, + _remote_file_path: &Path, + _mode: &str, + ) -> Result<(), ProviderError> { + todo!() + } + + async fn receive_file( + &self, + _remote_file_path: &Path, + _local_file_path: &Path, + ) -> Result<(), ProviderError> { + todo!() + } + + async fn pause(&self) -> Result<(), ProviderError> { + todo!() + } + + async fn resume(&self) -> Result<(), ProviderError> { + todo!() + } + + async fn restart(&self, _after: Option) -> Result<(), ProviderError> { + todo!() + } + + async fn destroy(&self) -> Result<(), ProviderError> { + todo!() + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_wait_log_count_target_reached_immediately() -> Result<(), anyhow::Error> { + let mock_provider = Arc::new(MockNode::new()); + let mock_node = NetworkNode::new( + "node1", + "ws_uri", + "prometheus_uri", + "multiaddr", + NodeSpec::default(), + mock_provider.clone(), + ); + + mock_provider.logs_push(vec![ + "system booting", + "stub line 1", + "stub line 2", + "system ready", + ]); + + // Wait (up to 10 seconds) until pattern occurs once + let options = LogLineCountOptions { + predicate: Arc::new(|n| n == 1), + timeout: Duration::from_secs(10), + wait_until_timeout_elapses: false, + }; + + let log_line_count = mock_node + .wait_log_line_count_with_timeout("system ready", false, options) + .await?; + + assert!(matches!(log_line_count, LogLineCount::TargetReached(1))); + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_wait_log_count_target_reached_after_delay() -> Result<(), anyhow::Error> { + let mock_provider = Arc::new(MockNode::new()); + let mock_node = NetworkNode::new( + "node1", + "ws_uri", + "prometheus_uri", + "multiaddr", + NodeSpec::default(), + mock_provider.clone(), + ); + + mock_provider.logs_push(vec![ + "system booting", + "stub line 1", + "stub line 2", + "system ready", + ]); + + // Wait (up to 4 seconds) until pattern occurs twice + let options = LogLineCountOptions { + predicate: Arc::new(|n| n == 2), + timeout: Duration::from_secs(4), + wait_until_timeout_elapses: false, + }; + + let task = tokio::spawn({ + async move { + mock_node + .wait_log_line_count_with_timeout("system ready", false, options) + .await + .unwrap() + } + }); + + tokio::time::sleep(Duration::from_secs(2)).await; + + mock_provider.logs_push(vec!["system ready"]); + + let log_line_count = task.await?; + + assert!(matches!(log_line_count, LogLineCount::TargetReached(2))); + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_wait_log_count_target_failed_timeout() -> Result<(), anyhow::Error> { + let mock_provider = Arc::new(MockNode::new()); + let mock_node = NetworkNode::new( + "node1", + "ws_uri", + "prometheus_uri", + "multiaddr", + NodeSpec::default(), + mock_provider.clone(), + ); + + mock_provider.logs_push(vec![ + "system booting", + "stub line 1", + "stub line 2", + "system ready", + ]); + + // Wait (up to 2 seconds) until pattern occurs twice + let options = LogLineCountOptions { + predicate: Arc::new(|n| n == 2), + timeout: Duration::from_secs(2), + wait_until_timeout_elapses: false, + }; + + let log_line_count = mock_node + .wait_log_line_count_with_timeout("system ready", false, options) + .await?; + + assert!(matches!(log_line_count, LogLineCount::TargetFailed(1))); + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_wait_log_count_target_failed_exceeded() -> Result<(), anyhow::Error> { + let mock_provider = Arc::new(MockNode::new()); + let mock_node = NetworkNode::new( + "node1", + "ws_uri", + "prometheus_uri", + "multiaddr", + NodeSpec::default(), + mock_provider.clone(), + ); + + mock_provider.logs_push(vec![ + "system booting", + "stub line 1", + "stub line 2", + "system ready", + ]); + + // Wait until timeout and check if pattern occurs exactly twice + let options = LogLineCountOptions { + predicate: Arc::new(|n| n == 2), + timeout: Duration::from_secs(2), + wait_until_timeout_elapses: true, + }; + + let task = tokio::spawn({ + async move { + mock_node + .wait_log_line_count_with_timeout("system ready", false, options) + .await + .unwrap() + } + }); + + tokio::time::sleep(Duration::from_secs(1)).await; + + mock_provider.logs_push(vec!["system ready"]); + mock_provider.logs_push(vec!["system ready"]); + + let log_line_count = task.await?; + + assert!(matches!(log_line_count, LogLineCount::TargetFailed(3))); + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_wait_log_count_target_reached_no_occurences() -> Result<(), anyhow::Error> { + let mock_provider = Arc::new(MockNode::new()); + let mock_node = NetworkNode::new( + "node1", + "ws_uri", + "prometheus_uri", + "multiaddr", + NodeSpec::default(), + mock_provider.clone(), + ); + + mock_provider.logs_push(vec!["system booting", "stub line 1", "stub line 2"]); + + let task = tokio::spawn({ + async move { + mock_node + .wait_log_line_count_with_timeout( + "system ready", + false, + // Wait until timeout and make sure pattern occurred zero times + LogLineCountOptions::no_occurences_within_timeout(Duration::from_secs(2)), + ) + .await + .unwrap() + } + }); + + tokio::time::sleep(Duration::from_secs(1)).await; + + mock_provider.logs_push(vec!["stub line 3"]); + + assert!(task.await?.success()); + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_wait_log_count_target_reached_in_range() -> Result<(), anyhow::Error> { + let mock_provider = Arc::new(MockNode::new()); + let mock_node = NetworkNode::new( + "node1", + "ws_uri", + "prometheus_uri", + "multiaddr", + NodeSpec::default(), + mock_provider.clone(), + ); + + mock_provider.logs_push(vec!["system booting", "stub line 1", "stub line 2"]); + + // Wait until timeout and make sure pattern occurrence count is in range between 2 and 5 + let options = LogLineCountOptions { + predicate: Arc::new(|n| (2..=5).contains(&n)), + timeout: Duration::from_secs(2), + wait_until_timeout_elapses: true, + }; + + let task = tokio::spawn({ + async move { + mock_node + .wait_log_line_count_with_timeout("system ready", false, options) + .await + .unwrap() + } + }); + + tokio::time::sleep(Duration::from_secs(1)).await; + + mock_provider.logs_push(vec!["system ready", "system ready", "system ready"]); + + assert!(task.await?.success()); + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_wait_log_count_with_timeout_with_lookahead_regex() -> Result<(), anyhow::Error> { + let mock_provider = Arc::new(MockNode::new()); + let mock_node = NetworkNode::new( + "node1", + "ws_uri", + "prometheus_uri", + "multiaddr", + NodeSpec::default(), + mock_provider.clone(), + ); + + mock_provider.logs_push(vec![ + "system booting", + "stub line 1", + // this line should not match + "Error importing block 0xfd66e545c446b1c01205503130b816af0ec2c0e504a8472808e6ff4a644ce1fa: block has an unknown parent", + "stub line 2" + ]); + + let options = LogLineCountOptions { + predicate: Arc::new(|n| n == 1), + timeout: Duration::from_secs(3), + wait_until_timeout_elapses: true, + }; + + let task = tokio::spawn({ + async move { + mock_node + .wait_log_line_count_with_timeout( + "error(?! importing block .*: block has an unknown parent)", + false, + options, + ) + .await + .unwrap() + } + }); + + tokio::time::sleep(Duration::from_secs(1)).await; + + mock_provider.logs_push(vec![ + "system ready", + // this line should match + "system error", + "system ready", + ]); + + assert!(task.await?.success()); + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_wait_log_count_with_timeout_with_lookahead_regex_fails( + ) -> Result<(), anyhow::Error> { + let mock_provider = Arc::new(MockNode::new()); + let mock_node = NetworkNode::new( + "node1", + "ws_uri", + "prometheus_uri", + "multiaddr", + NodeSpec::default(), + mock_provider.clone(), + ); + + mock_provider.logs_push(vec![ + "system booting", + "stub line 1", + // this line should not match + "Error importing block 0xfd66e545c446b1c01205503130b816af0ec2c0e504a8472808e6ff4a644ce1fa: block has an unknown parent", + "stub line 2" + ]); + + let options = LogLineCountOptions { + predicate: Arc::new(|n| n == 1), + timeout: Duration::from_secs(6), + wait_until_timeout_elapses: true, + }; + + let task = tokio::spawn({ + async move { + mock_node + .wait_log_line_count_with_timeout( + "error(?! importing block .*: block has an unknown parent)", + false, + options, + ) + .await + .unwrap() + } + }); + + tokio::time::sleep(Duration::from_secs(1)).await; + + mock_provider.logs_push(vec!["system ready", "system ready"]); + + assert!(!task.await?.success()); + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_wait_log_count_with_lockahead_regex() -> Result<(), anyhow::Error> { + let mock_provider = Arc::new(MockNode::new()); + let mock_node = NetworkNode::new( + "node1", + "ws_uri", + "prometheus_uri", + "multiaddr", + NodeSpec::default(), + mock_provider.clone(), + ); + + mock_provider.logs_push(vec![ + "system booting", + "stub line 1", + // this line should not match + "Error importing block 0xfd66e545c446b1c01205503130b816af0ec2c0e504a8472808e6ff4a644ce1fa: block has an unknown parent", + "stub line 2" + ]); + + let task = tokio::spawn({ + async move { + mock_node + .wait_log_line_count( + "error(?! importing block .*: block has an unknown parent)", + false, + 1, + ) + .await + .unwrap() + } + }); + + tokio::time::sleep(Duration::from_secs(1)).await; + + mock_provider.logs_push(vec![ + "system ready", + // this line should match + "system error", + "system ready", + ]); + + assert!(task.await.is_ok()); + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_wait_log_count_with_lookahead_regex_fails() -> Result<(), anyhow::Error> { + let mock_provider = Arc::new(MockNode::new()); + let mock_node = NetworkNode::new( + "node1", + "ws_uri", + "prometheus_uri", + "multiaddr", + NodeSpec::default(), + mock_provider.clone(), + ); + + mock_provider.logs_push(vec![ + "system booting", + "stub line 1", + // this line should not match + "Error importing block 0xfd66e545c446b1c01205503130b816af0ec2c0e504a8472808e6ff4a644ce1fa: block has an unknown parent", + "stub line 2" + ]); + + let options = LogLineCountOptions { + predicate: Arc::new(|count| count == 1), + timeout: Duration::from_secs(2), + wait_until_timeout_elapses: true, + }; + + let task = tokio::spawn({ + async move { + // we expect no match, thus wait with timeout + mock_node + .wait_log_line_count_with_timeout( + "error(?! importing block .*: block has an unknown parent)", + false, + options, + ) + .await + .unwrap() + } + }); + + tokio::time::sleep(Duration::from_secs(1)).await; + + mock_provider.logs_push(vec!["system ready", "system ready"]); + + assert!(!task.await?.success()); + + Ok(()) + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network/relaychain.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network/relaychain.rs new file mode 100644 index 00000000..7cea074d --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network/relaychain.rs @@ -0,0 +1,75 @@ +use std::path::PathBuf; + +use anyhow::anyhow; +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; + +use super::node::NetworkNode; +use crate::{ + network::chain_upgrade::ChainUpgrade, shared::types::RuntimeUpgradeOptions, + utils::default_as_empty_vec, +}; + +#[derive(Debug, Serialize, Deserialize)] +pub struct Relaychain { + pub(crate) chain: String, + pub(crate) chain_id: String, + pub(crate) chain_spec_path: PathBuf, + #[serde(default, deserialize_with = "default_as_empty_vec")] + pub(crate) nodes: Vec, +} + +#[derive(Debug, Deserialize)] +pub(crate) struct RawRelaychain { + #[serde(flatten)] + pub(crate) inner: Relaychain, + pub(crate) nodes: serde_json::Value, +} + +#[async_trait] +impl ChainUpgrade for Relaychain { + async fn runtime_upgrade(&self, options: RuntimeUpgradeOptions) -> Result<(), anyhow::Error> { + // check if the node is valid first + let node = if let Some(node_name) = &options.node_name { + if let Some(node) = self + .nodes() + .into_iter() + .find(|node| node.name() == node_name) + { + node + } else { + return Err(anyhow!("Node: {node_name} is not part of the set of nodes")); + } + } else { + // take the first node + if let Some(node) = self.nodes().first() { + node + } else { + return Err(anyhow!("chain doesn't have any node!")); + } + }; + + self.perform_runtime_upgrade(node, options).await + } +} + +impl Relaychain { + pub(crate) fn new(chain: String, chain_id: String, chain_spec_path: PathBuf) -> Self { + Self { + chain, + chain_id, + chain_spec_path, + nodes: Default::default(), + } + } + + // Public API + pub fn nodes(&self) -> Vec<&NetworkNode> { + self.nodes.iter().collect() + } + + /// Get chain name + pub fn chain(&self) -> &str { + &self.chain + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network/teyrchain.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network/teyrchain.rs new file mode 100644 index 00000000..e0ff315a --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network/teyrchain.rs @@ -0,0 +1,330 @@ +use std::{ + path::{Path, PathBuf}, + str::FromStr, +}; + +use anyhow::anyhow; +use async_trait::async_trait; +use pezkuwi_subxt::{dynamic::Value, tx::TxStatus, BizinikiwConfig, OnlineClient}; +use pezkuwi_subxt_signer::{sr25519::Keypair, SecretUri}; +use provider::types::TransferedFile; +use serde::{Deserialize, Serialize}; +use support::{constants::THIS_IS_A_BUG, fs::FileSystem, net::wait_ws_ready}; +use tracing::info; + +use super::{chain_upgrade::ChainUpgrade, node::NetworkNode}; +use crate::{ + network_spec::teyrchain::TeyrchainSpec, + shared::types::{RegisterParachainOptions, RuntimeUpgradeOptions}, + tx_helper::client::get_client_from_url, + utils::default_as_empty_vec, + ScopedFilesystem, +}; + +#[derive(Debug, Serialize, Deserialize)] +pub struct Parachain { + pub(crate) chain: Option, + pub(crate) para_id: u32, + // unique_id is internally used to allow multiple parachains with the same id + // See `ParachainConfig` for more details + pub(crate) unique_id: String, + pub(crate) chain_id: Option, + pub(crate) chain_spec_path: Option, + #[serde(default, deserialize_with = "default_as_empty_vec")] + pub(crate) collators: Vec, + pub(crate) files_to_inject: Vec, + pub(crate) bootnodes_addresses: Vec, +} + +#[derive(Debug, Deserialize)] +pub(crate) struct RawParachain { + #[serde(flatten)] + pub(crate) inner: Parachain, + pub(crate) collators: serde_json::Value, +} + +#[async_trait] +impl ChainUpgrade for Parachain { + async fn runtime_upgrade(&self, options: RuntimeUpgradeOptions) -> Result<(), anyhow::Error> { + // check if the node is valid first + let node = if let Some(node_name) = &options.node_name { + if let Some(node) = self + .collators() + .into_iter() + .find(|node| node.name() == node_name) + { + node + } else { + return Err(anyhow!("Node: {node_name} is not part of the set of nodes")); + } + } else { + // take the first node + if let Some(node) = self.collators().first() { + node + } else { + return Err(anyhow!("chain doesn't have any node!")); + } + }; + + self.perform_runtime_upgrade(node, options).await + } +} + +impl Parachain { + pub(crate) fn new(para_id: u32, unique_id: impl Into) -> Self { + Self { + chain: None, + para_id, + unique_id: unique_id.into(), + chain_id: None, + chain_spec_path: None, + collators: Default::default(), + files_to_inject: Default::default(), + bootnodes_addresses: vec![], + } + } + + pub(crate) fn with_chain_spec( + para_id: u32, + unique_id: impl Into, + chain_id: impl Into, + chain_spec_path: impl AsRef, + ) -> Self { + Self { + para_id, + unique_id: unique_id.into(), + chain: None, + chain_id: Some(chain_id.into()), + chain_spec_path: Some(chain_spec_path.as_ref().into()), + collators: Default::default(), + files_to_inject: Default::default(), + bootnodes_addresses: vec![], + } + } + + pub(crate) async fn from_spec( + para: &TeyrchainSpec, + files_to_inject: &[TransferedFile], + scoped_fs: &ScopedFilesystem<'_, impl FileSystem>, + ) -> Result { + let mut para_files_to_inject = files_to_inject.to_owned(); + + // parachain id is used for the keystore + let mut parachain = if let Some(chain_spec) = para.chain_spec.as_ref() { + let id = chain_spec.read_chain_id(scoped_fs).await?; + + // add the spec to global files to inject + let spec_name = chain_spec.chain_spec_name(); + let base = PathBuf::from_str(scoped_fs.base_dir)?; + para_files_to_inject.push(TransferedFile::new( + base.join(format!("{spec_name}.json")), + PathBuf::from(format!("/cfg/{}.json", para.id)), + )); + + let raw_path = chain_spec + .raw_path() + .ok_or(anyhow::anyhow!("chain-spec path should be set by now.",))?; + let mut running_para = + Parachain::with_chain_spec(para.id, ¶.unique_id, id, raw_path); + if let Some(chain_name) = chain_spec.chain_name() { + running_para.chain = Some(chain_name.to_string()); + } + + running_para + } else { + Parachain::new(para.id, ¶.unique_id) + }; + + parachain.bootnodes_addresses = para.bootnodes_addresses().into_iter().cloned().collect(); + parachain.files_to_inject = para_files_to_inject; + + Ok(parachain) + } + + pub async fn register( + options: RegisterParachainOptions, + scoped_fs: &ScopedFilesystem<'_, impl FileSystem>, + ) -> Result<(), anyhow::Error> { + info!("Registering parachain: {:?}", options); + // get the seed + let sudo: Keypair; + if let Some(possible_seed) = options.seed { + sudo = Keypair::from_secret_key(possible_seed) + .expect(&format!("seed should return a Keypair {THIS_IS_A_BUG}")); + } else { + let uri = SecretUri::from_str("//Alice")?; + sudo = Keypair::from_uri(&uri)?; + } + + let genesis_state = scoped_fs + .read_to_string(options.state_path) + .await + .expect(&format!( + "State Path should be ok by this point {THIS_IS_A_BUG}" + )); + let wasm_data = scoped_fs + .read_to_string(options.wasm_path) + .await + .expect(&format!( + "Wasm Path should be ok by this point {THIS_IS_A_BUG}" + )); + + wait_ws_ready(options.node_ws_url.as_str()) + .await + .map_err(|_| { + anyhow::anyhow!( + "Error waiting for ws to be ready, at {}", + options.node_ws_url.as_str() + ) + })?; + + let api: OnlineClient = get_client_from_url(&options.node_ws_url).await?; + + let schedule_para = pezkuwi_subxt::dynamic::tx( + "ParasSudoWrapper", + "sudo_schedule_para_initialize", + vec![ + Value::primitive(options.id.into()), + Value::named_composite([ + ( + "genesis_head", + Value::from_bytes(hex::decode(&genesis_state[2..])?), + ), + ( + "validation_code", + Value::from_bytes(hex::decode(&wasm_data[2..])?), + ), + ("para_kind", Value::bool(options.onboard_as_para)), + ]), + ], + ); + + let sudo_call = + pezkuwi_subxt::dynamic::tx("Sudo", "sudo", vec![schedule_para.into_value()]); + + // TODO: uncomment below and fix the sign and submit (and follow afterwards until + // finalized block) to register the parachain + let mut tx = api + .tx() + .sign_and_submit_then_watch_default(&sudo_call, &sudo) + .await?; + + // Below we use the low level API to replicate the `wait_for_in_block` behaviour + // which was removed in subxt 0.33.0. See https://github.com/paritytech/subxt/pull/1237. + while let Some(status) = tx.next().await { + match status? { + TxStatus::InBestBlock(tx_in_block) | TxStatus::InFinalizedBlock(tx_in_block) => { + let _result = tx_in_block.wait_for_success().await?; + info!("In block: {:#?}", tx_in_block.block_hash()); + }, + TxStatus::Error { message } + | TxStatus::Invalid { message } + | TxStatus::Dropped { message } => { + return Err(anyhow::format_err!("Error submitting tx: {message}")); + }, + _ => continue, + } + } + + Ok(()) + } + + pub fn para_id(&self) -> u32 { + self.para_id + } + + pub fn unique_id(&self) -> &str { + self.unique_id.as_str() + } + + pub fn chain_id(&self) -> Option<&str> { + self.chain_id.as_deref() + } + + pub fn collators(&self) -> Vec<&NetworkNode> { + self.collators.iter().collect() + } + + pub fn bootnodes_addresses(&self) -> Vec<&multiaddr::Multiaddr> { + self.bootnodes_addresses.iter().collect() + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use super::*; + + #[test] + fn create_with_is_works() { + let para = Parachain::new(100, "100"); + // only para_id and unique_id should be set + assert_eq!(para.para_id, 100); + assert_eq!(para.unique_id, "100"); + assert_eq!(para.chain_id, None); + assert_eq!(para.chain, None); + assert_eq!(para.chain_spec_path, None); + } + + #[test] + fn create_with_chain_spec_works() { + let para = Parachain::with_chain_spec(100, "100", "rococo-local", "/tmp/rococo-local.json"); + assert_eq!(para.para_id, 100); + assert_eq!(para.unique_id, "100"); + assert_eq!(para.chain_id, Some("rococo-local".to_string())); + assert_eq!(para.chain, None); + assert_eq!( + para.chain_spec_path, + Some(PathBuf::from("/tmp/rococo-local.json")) + ); + } + + #[tokio::test] + async fn create_with_para_spec_works() { + use configuration::ParachainConfigBuilder; + + use crate::network_spec::teyrchain::TeyrchainSpec; + + let bootnode_addresses = vec!["/ip4/10.41.122.55/tcp/45421"]; + + let para_config = ParachainConfigBuilder::new(Default::default()) + .with_id(100) + .cumulus_based(false) + .with_default_command("adder-collator") + .with_raw_bootnodes_addresses(bootnode_addresses.clone()) + .with_collator(|c| c.with_name("col")) + .build() + .unwrap(); + + let para_spec = + TeyrchainSpec::from_config(¶_config, "rococo-local".try_into().unwrap()).unwrap(); + let fs = support::fs::in_memory::InMemoryFileSystem::new(HashMap::default()); + let scoped_fs = ScopedFilesystem { + fs: &fs, + base_dir: "/tmp/some", + }; + + let files = vec![TransferedFile::new( + PathBuf::from("/tmp/some"), + PathBuf::from("/tmp/some"), + )]; + let para = Parachain::from_spec(¶_spec, &files, &scoped_fs) + .await + .unwrap(); + println!("{para:#?}"); + assert_eq!(para.para_id, 100); + assert_eq!(para.unique_id, "100"); + assert_eq!(para.chain_id, None); + assert_eq!(para.chain, None); + // one file should be added. + assert_eq!(para.files_to_inject.len(), 1); + assert_eq!( + para.bootnodes_addresses() + .iter() + .map(|addr| addr.to_string()) + .collect::>(), + bootnode_addresses + ); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_helper.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_helper.rs new file mode 100644 index 00000000..22f21e8f --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_helper.rs @@ -0,0 +1,2 @@ +pub mod metrics; +pub mod verifier; diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_helper/metrics.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_helper/metrics.rs new file mode 100644 index 00000000..48db5cfc --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_helper/metrics.rs @@ -0,0 +1,62 @@ +use std::collections::HashMap; + +use async_trait::async_trait; +use reqwest::Url; + +#[async_trait] +pub trait MetricsHelper { + async fn metric(&self, metric_name: &str) -> Result; + async fn metric_with_url( + metric: impl AsRef + Send, + endpoint: impl Into + Send, + ) -> Result; +} + +pub struct Metrics { + endpoint: Url, +} + +impl Metrics { + fn new(endpoint: impl Into) -> Self { + Self { + endpoint: endpoint.into(), + } + } + + async fn fetch_metrics( + endpoint: impl AsRef, + ) -> Result, anyhow::Error> { + let response = reqwest::get(endpoint.as_ref()).await?; + Ok(prom_metrics_parser::parse(&response.text().await?)?) + } + + fn get_metric( + metrics_map: HashMap, + metric_name: &str, + ) -> Result { + let treat_not_found_as_zero = true; + if let Some(val) = metrics_map.get(metric_name) { + Ok(*val) + } else if treat_not_found_as_zero { + Ok(0_f64) + } else { + Err(anyhow::anyhow!("MetricNotFound: {metric_name}")) + } + } +} + +#[async_trait] +impl MetricsHelper for Metrics { + async fn metric(&self, metric_name: &str) -> Result { + let metrics_map = Metrics::fetch_metrics(self.endpoint.as_str()).await?; + Metrics::get_metric(metrics_map, metric_name) + } + + async fn metric_with_url( + metric_name: impl AsRef + Send, + endpoint: impl Into + Send, + ) -> Result { + let metrics_map = Metrics::fetch_metrics(endpoint.into()).await?; + Metrics::get_metric(metrics_map, metric_name.as_ref()) + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_helper/verifier.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_helper/verifier.rs new file mode 100644 index 00000000..73aca1dc --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_helper/verifier.rs @@ -0,0 +1,34 @@ +use std::time::Duration; + +use tokio::time::timeout; +use tracing::trace; + +use crate::network::node::NetworkNode; + +pub(crate) async fn verify_nodes(nodes: &[&NetworkNode]) -> Result<(), anyhow::Error> { + timeout(Duration::from_secs(90), check_nodes(nodes)) + .await + .map_err(|_| anyhow::anyhow!("one or more nodes are not ready!")) +} + +// TODO: we should inject in someway the logic to make the request +// in order to allow us to `mock` and easily test this. +// maybe moved to the provider with a NodeStatus, and some helpers like wait_running, wait_ready, etc... ? to be discussed +async fn check_nodes(nodes: &[&NetworkNode]) { + loop { + let tasks: Vec<_> = nodes + .iter() + .map(|node| { + trace!("🔎 checking node: {} ", node.name); + reqwest::get(node.prometheus_uri.clone()) + }) + .collect(); + + let all_ready = futures::future::try_join_all(tasks).await; + if all_ready.is_ok() { + return; + } + + tokio::time::sleep(Duration::from_millis(1000)).await; + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_spec.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_spec.rs new file mode 100644 index 00000000..53c72a48 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_spec.rs @@ -0,0 +1,330 @@ +use std::{ + collections::{hash_map::Entry, HashMap}, + sync::Arc, +}; + +use configuration::{GlobalSettings, HrmpChannelConfig, NetworkConfig}; +use futures::future::try_join_all; +use provider::{DynNamespace, ProviderError, ProviderNamespace}; +use serde::{Deserialize, Serialize}; +use support::{constants::THIS_IS_A_BUG, fs::FileSystem}; +use tracing::{debug, trace}; + +use crate::{errors::OrchestratorError, ScopedFilesystem}; + +pub mod node; +pub mod relaychain; +pub mod teyrchain; + +use self::{node::NodeSpec, relaychain::RelaychainSpec, teyrchain::TeyrchainSpec}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NetworkSpec { + /// Relaychain configuration. + pub(crate) relaychain: RelaychainSpec, + + /// Parachains configurations. + pub(crate) parachains: Vec, + + /// HRMP channels configurations. + pub(crate) hrmp_channels: Vec, + + /// Global settings + pub(crate) global_settings: GlobalSettings, +} + +impl NetworkSpec { + pub async fn from_config( + network_config: &NetworkConfig, + ) -> Result { + let mut errs = vec![]; + let relaychain = RelaychainSpec::from_config(network_config.relaychain())?; + let mut parachains = vec![]; + + // TODO: move to `fold` or map+fold + for para_config in network_config.parachains() { + match TeyrchainSpec::from_config(para_config, relaychain.chain.clone()) { + Ok(para) => parachains.push(para), + Err(err) => errs.push(err), + } + } + + if errs.is_empty() { + Ok(NetworkSpec { + relaychain, + parachains, + hrmp_channels: network_config + .hrmp_channels() + .into_iter() + .cloned() + .collect(), + global_settings: network_config.global_settings().clone(), + }) + } else { + let errs_str = errs + .into_iter() + .map(|e| e.to_string()) + .collect::>() + .join("\n"); + Err(OrchestratorError::InvalidConfig(errs_str)) + } + } + + pub async fn populate_nodes_available_args( + &mut self, + ns: Arc, + ) -> Result<(), OrchestratorError> { + let network_nodes = self.collect_network_nodes(); + + let mut image_command_to_nodes_mapping = + Self::create_image_command_to_nodes_mapping(network_nodes); + + let available_args_outputs = + Self::retrieve_all_nodes_available_args_output(ns, &image_command_to_nodes_mapping) + .await?; + + Self::update_nodes_available_args_output( + &mut image_command_to_nodes_mapping, + available_args_outputs, + ); + + Ok(()) + } + + // + pub async fn node_available_args_output( + &self, + node_spec: &NodeSpec, + ns: Arc, + ) -> Result { + // try to find a node that use the same combination of image/cmd + let cmp_fn = |ad_hoc: &&NodeSpec| -> bool { + ad_hoc.image == node_spec.image && ad_hoc.command == node_spec.command + }; + + // check if we already had computed the args output for this cmd/[image] + let node = self.relaychain.nodes.iter().find(cmp_fn); + let node = if let Some(node) = node { + Some(node) + } else { + let node = self + .parachains + .iter() + .find_map(|para| para.collators.iter().find(cmp_fn)); + + node + }; + + let output = if let Some(node) = node { + node.available_args_output.clone().expect(&format!( + "args_output should be set for running nodes {THIS_IS_A_BUG}" + )) + } else { + // we need to compute the args output + let image = node_spec + .image + .as_ref() + .map(|image| image.as_str().to_string()); + let command = node_spec.command.as_str().to_string(); + + ns.get_node_available_args((command, image)).await? + }; + + Ok(output) + } + + pub fn relaychain(&self) -> &RelaychainSpec { + &self.relaychain + } + + pub fn relaychain_mut(&mut self) -> &mut RelaychainSpec { + &mut self.relaychain + } + + pub fn parachains_iter(&self) -> impl Iterator { + self.parachains.iter() + } + + pub fn parachains_iter_mut(&mut self) -> impl Iterator { + self.parachains.iter_mut() + } + + pub fn set_global_settings(&mut self, global_settings: GlobalSettings) { + self.global_settings = global_settings; + } + + pub async fn build_parachain_artifacts<'a, T: FileSystem>( + &mut self, + ns: DynNamespace, + scoped_fs: &ScopedFilesystem<'a, T>, + relaychain_id: &str, + base_dir_exists: bool, + ) -> Result<(), anyhow::Error> { + for para in self.parachains.iter_mut() { + let chain_spec_raw_path = para.build_chain_spec(relaychain_id, &ns, scoped_fs).await?; + + trace!("creating dirs for {}", ¶.unique_id); + if base_dir_exists { + scoped_fs.create_dir_all(¶.unique_id).await?; + } else { + scoped_fs.create_dir(¶.unique_id).await?; + }; + trace!("created dirs for {}", ¶.unique_id); + + // create wasm/state + para.genesis_state + .build( + chain_spec_raw_path.clone(), + format!("{}/genesis-state", para.unique_id), + &ns, + scoped_fs, + None, + ) + .await?; + debug!("parachain genesis state built!"); + para.genesis_wasm + .build( + chain_spec_raw_path, + format!("{}/genesis-wasm", para.unique_id), + &ns, + scoped_fs, + None, + ) + .await?; + debug!("parachain genesis wasm built!"); + } + + Ok(()) + } + + // collect mutable references to all nodes from relaychain and parachains + fn collect_network_nodes(&mut self) -> Vec<&mut NodeSpec> { + vec![ + self.relaychain.nodes.iter_mut().collect::>(), + self.parachains + .iter_mut() + .flat_map(|para| para.collators.iter_mut()) + .collect(), + ] + .into_iter() + .flatten() + .collect::>() + } + + // initialize the mapping of all possible node image/commands to corresponding nodes + fn create_image_command_to_nodes_mapping( + network_nodes: Vec<&mut NodeSpec>, + ) -> HashMap<(Option, String), Vec<&mut NodeSpec>> { + network_nodes.into_iter().fold( + HashMap::new(), + |mut acc: HashMap<(Option, String), Vec<&mut node::NodeSpec>>, node| { + // build mapping key using image and command if image is present or command only + let key = node + .image + .as_ref() + .map(|image| { + ( + Some(image.as_str().to_string()), + node.command.as_str().to_string(), + ) + }) + .unwrap_or_else(|| (None, node.command.as_str().to_string())); + + // append the node to the vector of nodes for this image/command tuple + if let Entry::Vacant(entry) = acc.entry(key.clone()) { + entry.insert(vec![node]); + } else { + acc.get_mut(&key).unwrap().push(node); + } + + acc + }, + ) + } + + async fn retrieve_all_nodes_available_args_output( + ns: Arc, + image_command_to_nodes_mapping: &HashMap<(Option, String), Vec<&mut NodeSpec>>, + ) -> Result, String, String)>, OrchestratorError> { + try_join_all( + image_command_to_nodes_mapping + .keys() + .map(|(image, command)| { + let ns = ns.clone(); + let image = image.clone(); + let command = command.clone(); + async move { + // get node available args output from image/command + let available_args = ns + .get_node_available_args((command.clone(), image.clone())) + .await?; + debug!( + "retrieved available args for image: {:?}, command: {}", + image, command + ); + + // map the result to include image and command + Ok::<_, OrchestratorError>((image, command, available_args)) + } + }) + .collect::>(), + ) + .await + } + + fn update_nodes_available_args_output( + image_command_to_nodes_mapping: &mut HashMap<(Option, String), Vec<&mut NodeSpec>>, + available_args_outputs: Vec<(Option, String, String)>, + ) { + for (image, command, available_args_output) in available_args_outputs { + let nodes = image_command_to_nodes_mapping + .get_mut(&(image, command)) + .expect(&format!( + "node image/command key should exist {THIS_IS_A_BUG}" + )); + + for node in nodes { + node.available_args_output = Some(available_args_output.clone()); + } + } + } +} + +#[cfg(test)] +mod tests { + + #[tokio::test] + async fn small_network_config_get_spec() { + use configuration::NetworkConfigBuilder; + + use super::*; + + let config = NetworkConfigBuilder::new() + .with_relaychain(|r| { + r.with_chain("rococo-local") + .with_default_command("polkadot") + .with_validator(|node| node.with_name("alice")) + .with_fullnode(|node| node.with_name("bob").with_command("polkadot1")) + }) + .with_parachain(|p| { + p.with_id(100) + .with_default_command("adder-collator") + .with_collator(|c| c.with_name("collator1")) + }) + .build() + .unwrap(); + + let network_spec = NetworkSpec::from_config(&config).await.unwrap(); + let alice = network_spec.relaychain.nodes.first().unwrap(); + let bob = network_spec.relaychain.nodes.get(1).unwrap(); + assert_eq!(alice.command.as_str(), "polkadot"); + assert_eq!(bob.command.as_str(), "polkadot1"); + assert!(alice.is_validator); + assert!(!bob.is_validator); + + // paras + assert_eq!(network_spec.parachains.len(), 1); + let para_100 = network_spec.parachains.first().unwrap(); + assert_eq!(para_100.id, 100); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_spec/node.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_spec/node.rs new file mode 100644 index 00000000..98d6d4a4 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_spec/node.rs @@ -0,0 +1,356 @@ +use std::path::PathBuf; + +use configuration::shared::{ + node::{EnvVar, NodeConfig}, + resources::Resources, + types::{Arg, AssetLocation, Command, Image}, +}; +use multiaddr::Multiaddr; +use provider::types::Port; +use serde::{Deserialize, Serialize}; +use support::constants::THIS_IS_A_BUG; + +use crate::{ + errors::OrchestratorError, + generators, + network::AddNodeOptions, + shared::{ + macros, + types::{ChainDefaultContext, NodeAccount, NodeAccounts, ParkedPort}, + }, + AddCollatorOptions, +}; + +macros::create_add_options!(AddNodeSpecOpts { + override_eth_key: Option +}); + +macro_rules! impl_from_for_add_node_opts { + ($struct:ident) => { + impl From<$struct> for AddNodeSpecOpts { + fn from(value: $struct) -> Self { + Self { + image: value.image, + command: value.command, + subcommand: value.subcommand, + args: value.args, + env: value.env, + is_validator: value.is_validator, + rpc_port: value.rpc_port, + prometheus_port: value.prometheus_port, + p2p_port: value.p2p_port, + override_eth_key: value.override_eth_key, + } + } + } + }; +} + +impl_from_for_add_node_opts!(AddNodeOptions); +impl_from_for_add_node_opts!(AddCollatorOptions); + +/// A node configuration, with fine-grained configuration options. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct NodeSpec { + // Node name (should be unique or an index will be appended). + pub(crate) name: String, + + /// Node key, used for compute the p2p identity. + pub(crate) key: String, + + // libp2p local identity + pub(crate) peer_id: String, + + /// Accounts to be injected in the keystore. + pub(crate) accounts: NodeAccounts, + + /// Image to run (only podman/k8s). Override the default. + pub(crate) image: Option, + + /// Command to run the node. Override the default. + pub(crate) command: Command, + + /// Optional subcommand for the node. + pub(crate) subcommand: Option, + + /// Arguments to use for node. Appended to default. + pub(crate) args: Vec, + + // The help command output containing the available arguments. + pub(crate) available_args_output: Option, + + /// Wether the node is a validator. + pub(crate) is_validator: bool, + + /// Whether the node keys must be added to invulnerables. + pub(crate) is_invulnerable: bool, + + /// Whether the node is a bootnode. + pub(crate) is_bootnode: bool, + + /// Node initial balance present in genesis. + pub(crate) initial_balance: u128, + + /// Environment variables to set (inside pod for podman/k8s, inside shell for native). + pub(crate) env: Vec, + + /// List of node's bootnodes addresses to use. Appended to default. + pub(crate) bootnodes_addresses: Vec, + + /// Default resources. Override the default. + pub(crate) resources: Option, + + /// Websocket port to use. + pub(crate) ws_port: ParkedPort, + + /// RPC port to use. + pub(crate) rpc_port: ParkedPort, + + /// Prometheus port to use. + pub(crate) prometheus_port: ParkedPort, + + /// P2P port to use. + pub(crate) p2p_port: ParkedPort, + + /// libp2p cert hash to use with `webrtc` transport. + pub(crate) p2p_cert_hash: Option, + + /// Database snapshot. Override the default. + pub(crate) db_snapshot: Option, + + /// P2P port to use by full node if this is the case + pub(crate) full_node_p2p_port: Option, + /// Prometheus port to use by full node if this is the case + pub(crate) full_node_prometheus_port: Option, + + /// Optionally specify a log path for the node + pub(crate) node_log_path: Option, + + /// Optionally specify a keystore path for the node + pub(crate) keystore_path: Option, + + /// Keystore key types to generate. + /// Supports short form (e.g., "audi") using predefined schemas, + /// or long form (e.g., "audi_sr") with explicit schema (sr, ed, ec). + pub(crate) keystore_key_types: Vec, +} + +impl NodeSpec { + pub fn from_config( + node_config: &NodeConfig, + chain_context: &ChainDefaultContext, + full_node_present: bool, + evm_based: bool, + ) -> Result { + // Check first if the image is set at node level, then try with the default + let image = node_config.image().or(chain_context.default_image).cloned(); + + // Check first if the command is set at node level, then try with the default + let command = if let Some(cmd) = node_config.command() { + cmd.clone() + } else if let Some(cmd) = chain_context.default_command { + cmd.clone() + } else { + return Err(OrchestratorError::InvalidNodeConfig( + node_config.name().into(), + "command".to_string(), + )); + }; + + let subcommand = node_config.subcommand().cloned(); + + // If `args` is set at `node` level use them + // otherwise use the default_args (can be empty). + let args: Vec = if node_config.args().is_empty() { + chain_context + .default_args + .iter() + .map(|x| x.to_owned().clone()) + .collect() + } else { + node_config.args().into_iter().cloned().collect() + }; + + let (key, peer_id) = generators::generate_node_identity(node_config.name())?; + + let mut name = node_config.name().to_string(); + let seed = format!("//{}{name}", name.remove(0).to_uppercase()); + let accounts = generators::generate_node_keys(&seed)?; + let mut accounts = NodeAccounts { seed, accounts }; + + if evm_based { + if let Some(session_key) = node_config.override_eth_key() { + accounts + .accounts + .insert("eth".into(), NodeAccount::new(session_key, session_key)); + } + } + + let db_snapshot = match (node_config.db_snapshot(), chain_context.default_db_snapshot) { + (Some(db_snapshot), _) => Some(db_snapshot), + (None, Some(db_snapshot)) => Some(db_snapshot), + _ => None, + }; + + let (full_node_p2p_port, full_node_prometheus_port) = if full_node_present { + ( + Some(generators::generate_node_port(None)?), + Some(generators::generate_node_port(None)?), + ) + } else { + (None, None) + }; + + Ok(Self { + name: node_config.name().to_string(), + key, + peer_id, + image, + command, + subcommand, + args, + available_args_output: None, + is_validator: node_config.is_validator(), + is_invulnerable: node_config.is_invulnerable(), + is_bootnode: node_config.is_bootnode(), + initial_balance: node_config.initial_balance(), + env: node_config.env().into_iter().cloned().collect(), + bootnodes_addresses: node_config + .bootnodes_addresses() + .into_iter() + .cloned() + .collect(), + resources: node_config.resources().cloned(), + p2p_cert_hash: node_config.p2p_cert_hash().map(str::to_string), + db_snapshot: db_snapshot.cloned(), + accounts, + ws_port: generators::generate_node_port(node_config.ws_port())?, + rpc_port: generators::generate_node_port(node_config.rpc_port())?, + prometheus_port: generators::generate_node_port(node_config.prometheus_port())?, + p2p_port: generators::generate_node_port(node_config.p2p_port())?, + full_node_p2p_port, + full_node_prometheus_port, + node_log_path: node_config.node_log_path().cloned(), + keystore_path: node_config.keystore_path().cloned(), + keystore_key_types: node_config + .keystore_key_types() + .into_iter() + .map(str::to_string) + .collect(), + }) + } + + pub fn from_ad_hoc( + name: impl Into, + options: AddNodeSpecOpts, + chain_context: &ChainDefaultContext, + full_node_present: bool, + evm_based: bool, + ) -> Result { + // Check first if the image is set at node level, then try with the default + let image = if let Some(img) = options.image { + Some(img.clone()) + } else { + chain_context.default_image.cloned() + }; + + let name = name.into(); + // Check first if the command is set at node level, then try with the default + let command = if let Some(cmd) = options.command { + cmd.clone() + } else if let Some(cmd) = chain_context.default_command { + cmd.clone() + } else { + return Err(OrchestratorError::InvalidNodeConfig( + name, + "command".to_string(), + )); + }; + + let subcommand = options.subcommand.clone(); + + // If `args` is set at `node` level use them + // otherwise use the default_args (can be empty). + let args: Vec = if options.args.is_empty() { + chain_context + .default_args + .iter() + .map(|x| x.to_owned().clone()) + .collect() + } else { + options.args + }; + + let (key, peer_id) = generators::generate_node_identity(&name)?; + + let mut name_capitalized = name.clone(); + let seed = format!( + "//{}{name_capitalized}", + name_capitalized.remove(0).to_uppercase() + ); + let accounts = generators::generate_node_keys(&seed)?; + let mut accounts = NodeAccounts { seed, accounts }; + + if evm_based { + if let Some(session_key) = options.override_eth_key.as_ref() { + accounts + .accounts + .insert("eth".into(), NodeAccount::new(session_key, session_key)); + } + } + + let (full_node_p2p_port, full_node_prometheus_port) = if full_node_present { + ( + Some(generators::generate_node_port(None)?), + Some(generators::generate_node_port(None)?), + ) + } else { + (None, None) + }; + + // + Ok(Self { + name, + key, + peer_id, + image, + command, + subcommand, + args, + available_args_output: None, + is_validator: options.is_validator, + is_invulnerable: false, + is_bootnode: false, + initial_balance: 0, + env: options.env, + bootnodes_addresses: vec![], + resources: None, + p2p_cert_hash: None, + db_snapshot: None, + accounts, + // should be deprecated now! + ws_port: generators::generate_node_port(None)?, + rpc_port: generators::generate_node_port(options.rpc_port)?, + prometheus_port: generators::generate_node_port(options.prometheus_port)?, + p2p_port: generators::generate_node_port(options.p2p_port)?, + full_node_p2p_port, + full_node_prometheus_port, + node_log_path: None, + keystore_path: None, + keystore_key_types: vec![], + }) + } + + pub(crate) fn supports_arg(&self, arg: impl AsRef) -> bool { + self.available_args_output + .as_ref() + .expect(&format!( + "available args should be present at this point {THIS_IS_A_BUG}" + )) + .contains(arg.as_ref()) + } + + pub fn command(&self) -> &str { + self.command.as_str() + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_spec/relaychain.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_spec/relaychain.rs new file mode 100644 index 00000000..0157d831 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_spec/relaychain.rs @@ -0,0 +1,181 @@ +use std::collections::{HashMap, HashSet}; + +use configuration::{ + shared::{ + helpers::generate_unique_node_name_from_names, + resources::Resources, + types::{Arg, AssetLocation, Chain, Command, Image}, + }, + types::JsonOverrides, + NodeConfig, RelaychainConfig, +}; +use serde::{Deserialize, Serialize}; +use support::replacer::apply_replacements; + +use super::node::NodeSpec; +use crate::{ + errors::OrchestratorError, + generators::chain_spec::{ChainSpec, Context}, + shared::{constants::DEFAULT_CHAIN_SPEC_TPL_COMMAND, types::ChainDefaultContext}, +}; + +/// A relaychain configuration spec +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RelaychainSpec { + /// Chain to use (e.g. rococo-local). + pub(crate) chain: Chain, + + /// Default command to run the node. Can be overridden on each node. + pub(crate) default_command: Option, + + /// Default image to use (only podman/k8s). Can be overridden on each node. + pub(crate) default_image: Option, + + /// Default resources. Can be overridden on each node. + pub(crate) default_resources: Option, + + /// Default database snapshot. Can be overridden on each node. + pub(crate) default_db_snapshot: Option, + + /// Default arguments to use in nodes. Can be overridden on each node. + pub(crate) default_args: Vec, + + // chain_spec_path: Option, + pub(crate) chain_spec: ChainSpec, + + /// Set the count of nominators to generator (used with PoS networks). + pub(crate) random_nominators_count: u32, + + /// Set the max nominators value (used with PoS networks). + pub(crate) max_nominations: u8, + + /// Genesis overrides as JSON value. + pub(crate) runtime_genesis_patch: Option, + + /// Wasm override path/url to use. + pub(crate) wasm_override: Option, + + /// Nodes to run. + pub(crate) nodes: Vec, + + /// Raw chain-spec override path, url or inline json to use. + pub(crate) raw_spec_override: Option, +} + +impl RelaychainSpec { + pub fn from_config(config: &RelaychainConfig) -> Result { + // Relaychain main command to use, in order: + // set as `default_command` or + // use the command of the first node. + // If non of those is set, return an error. + let main_cmd = config + .default_command() + .or(config.nodes().first().and_then(|node| node.command())) + .ok_or(OrchestratorError::InvalidConfig( + "Relaychain, either default_command or first node with a command needs to be set." + .to_string(), + ))?; + + // TODO: internally we use image as String + let main_image = config + .default_image() + .or(config.nodes().first().and_then(|node| node.image())) + .map(|image| image.as_str().to_string()); + + let replacements = HashMap::from([ + ("disableBootnodes", "--disable-default-bootnode"), + ("mainCommand", main_cmd.as_str()), + ]); + let tmpl = if let Some(tmpl) = config.chain_spec_command() { + apply_replacements(tmpl, &replacements) + } else { + apply_replacements(DEFAULT_CHAIN_SPEC_TPL_COMMAND, &replacements) + }; + + let chain_spec = ChainSpec::new(config.chain().as_str(), Context::Relay) + .set_chain_name(config.chain().as_str()) + .command( + tmpl.as_str(), + config.chain_spec_command_is_local(), + config.chain_spec_command_output_path(), + ) + .image(main_image.clone()); + + // Add asset location if present + let chain_spec = if let Some(chain_spec_path) = config.chain_spec_path() { + chain_spec.asset_location(chain_spec_path.clone()) + } else { + chain_spec + }; + + // add chain-spec runtime if present + let chain_spec = if let Some(chain_spec_runtime) = config.chain_spec_runtime() { + chain_spec.runtime(chain_spec_runtime.clone()) + } else { + chain_spec + }; + + // build the `node_specs` + let chain_context = ChainDefaultContext { + default_command: config.default_command(), + default_image: config.default_image(), + default_resources: config.default_resources(), + default_db_snapshot: config.default_db_snapshot(), + default_args: config.default_args(), + }; + + let mut nodes: Vec = config.nodes().into_iter().cloned().collect(); + nodes.extend( + config + .group_node_configs() + .into_iter() + .flat_map(|node_group| node_group.expand_group_configs()), + ); + + let mut names = HashSet::new(); + let (nodes, mut errs) = nodes + .iter() + .map(|node_config| NodeSpec::from_config(node_config, &chain_context, false, false)) + .fold((vec![], vec![]), |(mut nodes, mut errs), result| { + match result { + Ok(mut node) => { + let unique_name = + generate_unique_node_name_from_names(node.name, &mut names); + node.name = unique_name; + nodes.push(node); + }, + Err(err) => errs.push(err), + } + (nodes, errs) + }); + + if !errs.is_empty() { + // TODO: merge errs, maybe return something like Result> + return Err(errs.swap_remove(0)); + } + + Ok(RelaychainSpec { + chain: config.chain().clone(), + default_command: config.default_command().cloned(), + default_image: config.default_image().cloned(), + default_resources: config.default_resources().cloned(), + default_db_snapshot: config.default_db_snapshot().cloned(), + wasm_override: config.wasm_override().cloned(), + default_args: config.default_args().into_iter().cloned().collect(), + chain_spec, + random_nominators_count: config.random_nominators_count().unwrap_or(0), + max_nominations: config.max_nominations().unwrap_or(24), + runtime_genesis_patch: config.runtime_genesis_patch().cloned(), + nodes, + raw_spec_override: config.raw_spec_override().cloned(), + }) + } + + pub fn chain_spec(&self) -> &ChainSpec { + &self.chain_spec + } + + pub fn chain_spec_mut(&mut self) -> &mut ChainSpec { + &mut self.chain_spec + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_spec/teyrchain.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_spec/teyrchain.rs new file mode 100644 index 00000000..2813d9fe --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/network_spec/teyrchain.rs @@ -0,0 +1,386 @@ +use std::{ + collections::{HashMap, HashSet}, + path::PathBuf, +}; + +use configuration::{ + shared::{helpers::generate_unique_node_name_from_names, resources::Resources}, + types::{Arg, AssetLocation, Chain, Command, Image, JsonOverrides}, + NodeConfig, ParachainConfig, RegistrationStrategy, +}; +use provider::DynNamespace; +use serde::{Deserialize, Serialize}; +use support::{fs::FileSystem, replacer::apply_replacements}; +use tracing::debug; + +use super::node::NodeSpec; +use crate::{ + errors::OrchestratorError, + generators::{ + chain_spec::{ChainSpec, Context, ParaGenesisConfig}, + para_artifact::*, + }, + shared::{constants::DEFAULT_CHAIN_SPEC_TPL_COMMAND, types::ChainDefaultContext}, + ScopedFilesystem, +}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TeyrchainSpec { + // `name` of the parachain (used in some corner cases) + // name: Option, + /// Parachain id + pub(crate) id: u32, + + /// Unique id of the parachain, in the patter of - + /// where the suffix is only present if more than one parachain is set with the same id + pub(crate) unique_id: String, + + /// Default command to run the node. Can be overridden on each node. + pub(crate) default_command: Option, + + /// Default image to use (only podman/k8s). Can be overridden on each node. + pub(crate) default_image: Option, + + /// Default resources. Can be overridden on each node. + pub(crate) default_resources: Option, + + /// Default database snapshot. Can be overridden on each node. + pub(crate) default_db_snapshot: Option, + + /// Default arguments to use in nodes. Can be overridden on each node. + pub(crate) default_args: Vec, + + /// Chain-spec, only needed by cumulus based paras + pub(crate) chain_spec: Option, + + /// Do not automatically assign a bootnode role if no nodes are marked as bootnodes. + pub(crate) no_default_bootnodes: bool, + + /// Registration strategy to use + pub(crate) registration_strategy: RegistrationStrategy, + + /// Onboard as parachain or parathread + pub(crate) onboard_as_parachain: bool, + + /// Is the parachain cumulus-based + pub(crate) is_cumulus_based: bool, + + /// Is the parachain evm-based + pub(crate) is_evm_based: bool, + + /// Initial balance + pub(crate) initial_balance: u128, + + /// Genesis state (head) to register the parachain + pub(crate) genesis_state: ParaArtifact, + + /// Genesis WASM to register the parachain + pub(crate) genesis_wasm: ParaArtifact, + + /// Genesis overrides as JSON value. + pub(crate) genesis_overrides: Option, + + /// Wasm override path/url to use. + pub(crate) wasm_override: Option, + + /// Collators to spawn + pub(crate) collators: Vec, + + /// Raw chain-spec override path, url or inline json to use. + pub(crate) raw_spec_override: Option, + + /// Bootnodes addresses to use for the parachain nodes + pub(crate) bootnodes_addresses: Vec, +} + +impl TeyrchainSpec { + pub fn from_config( + config: &ParachainConfig, + relay_chain: Chain, + ) -> Result { + let main_cmd = if let Some(cmd) = config.default_command() { + cmd + } else if let Some(first_node) = config.collators().first() { + let Some(cmd) = first_node.command() else { + return Err(OrchestratorError::InvalidConfig(format!("Parachain {}, either default_command or command in the first node needs to be set.", config.id()))); + }; + + cmd + } else { + return Err(OrchestratorError::InvalidConfig(format!( + "Parachain {}, without nodes and default_command isn't set.", + config.id() + ))); + }; + + // TODO: internally we use image as String + let main_image = config + .default_image() + .or(config.collators().first().and_then(|node| node.image())) + .map(|image| image.as_str().to_string()); + + let chain_spec = if config.is_cumulus_based() { + // we need a chain-spec + let chain_name = if let Some(chain_name) = config.chain() { + chain_name.as_str() + } else { + "" + }; + + let chain_spec_builder = if chain_name.is_empty() { + // if the chain don't have name use the unique_id for the name of the file + ChainSpec::new( + config.unique_id().to_string(), + Context::Para { + relay_chain, + para_id: config.id(), + }, + ) + } else { + let chain_spec_file_name = if config.unique_id().contains('-') { + &format!("{}-{}", chain_name, config.unique_id()) + } else { + chain_name + }; + ChainSpec::new( + chain_spec_file_name, + Context::Para { + relay_chain, + para_id: config.id(), + }, + ) + }; + let chain_spec_builder = chain_spec_builder.set_chain_name(chain_name); + + let replacements = HashMap::from([ + ("disableBootnodes", "--disable-default-bootnode"), + ("mainCommand", main_cmd.as_str()), + ]); + let tmpl = if let Some(tmpl) = config.chain_spec_command() { + apply_replacements(tmpl, &replacements) + } else { + apply_replacements(DEFAULT_CHAIN_SPEC_TPL_COMMAND, &replacements) + }; + + let chain_spec = chain_spec_builder + .command( + tmpl.as_str(), + config.chain_spec_command_is_local(), + config.chain_spec_command_output_path(), + ) + .image(main_image.clone()); + + let chain_spec = if let Some(chain_spec_path) = config.chain_spec_path() { + chain_spec.asset_location(chain_spec_path.clone()) + } else { + chain_spec + }; + + // add chain-spec runtime if present + let chain_spec = if let Some(chain_spec_runtime) = config.chain_spec_runtime() { + chain_spec.runtime(chain_spec_runtime.clone()) + } else { + chain_spec + }; + + Some(chain_spec) + } else { + None + }; + + // build the `node_specs` + let chain_context = ChainDefaultContext { + default_command: config.default_command(), + default_image: config.default_image(), + default_resources: config.default_resources(), + default_db_snapshot: config.default_db_snapshot(), + default_args: config.default_args(), + }; + + // We want to track the errors for all the nodes and report them ones + let mut errs: Vec = Default::default(); + let mut collators: Vec = Default::default(); + + let mut nodes: Vec = config.collators().into_iter().cloned().collect(); + nodes.extend( + config + .group_collators_configs() + .into_iter() + .flat_map(|node_group| node_group.expand_group_configs()), + ); + + let mut names = HashSet::new(); + for node_config in nodes { + match NodeSpec::from_config(&node_config, &chain_context, true, config.is_evm_based()) { + Ok(mut node) => { + let unique_name = generate_unique_node_name_from_names(node.name, &mut names); + node.name = unique_name; + collators.push(node) + }, + Err(err) => errs.push(err), + } + } + let genesis_state = if let Some(path) = config.genesis_state_path() { + ParaArtifact::new( + ParaArtifactType::State, + ParaArtifactBuildOption::Path(path.to_string()), + ) + } else { + let cmd = if let Some(cmd) = config.genesis_state_generator() { + cmd.cmd() + } else { + main_cmd + }; + ParaArtifact::new( + ParaArtifactType::State, + ParaArtifactBuildOption::Command(cmd.as_str().into()), + ) + .image(main_image.clone()) + }; + + let genesis_wasm = if let Some(path) = config.genesis_wasm_path() { + ParaArtifact::new( + ParaArtifactType::Wasm, + ParaArtifactBuildOption::Path(path.to_string()), + ) + } else { + let cmd = if let Some(cmd) = config.genesis_wasm_generator() { + cmd.as_str() + } else { + main_cmd.as_str() + }; + ParaArtifact::new( + ParaArtifactType::Wasm, + ParaArtifactBuildOption::Command(cmd.into()), + ) + .image(main_image.clone()) + }; + + let para_spec = TeyrchainSpec { + id: config.id(), + // ensure unique id is set at this point, if not just set to the para_id + unique_id: if config.unique_id().is_empty() { + config.id().to_string() + } else { + config.unique_id().to_string() + }, + default_command: config.default_command().cloned(), + default_image: config.default_image().cloned(), + default_resources: config.default_resources().cloned(), + default_db_snapshot: config.default_db_snapshot().cloned(), + wasm_override: config.wasm_override().cloned(), + default_args: config.default_args().into_iter().cloned().collect(), + chain_spec, + no_default_bootnodes: config.no_default_bootnodes(), + registration_strategy: config + .registration_strategy() + .unwrap_or(&RegistrationStrategy::InGenesis) + .clone(), + onboard_as_parachain: config.onboard_as_parachain(), + is_cumulus_based: config.is_cumulus_based(), + is_evm_based: config.is_evm_based(), + initial_balance: config.initial_balance(), + genesis_state, + genesis_wasm, + genesis_overrides: config.genesis_overrides().cloned(), + collators, + raw_spec_override: config.raw_spec_override().cloned(), + bootnodes_addresses: config.bootnodes_addresses().into_iter().cloned().collect(), + }; + + Ok(para_spec) + } + + pub fn registration_strategy(&self) -> &RegistrationStrategy { + &self.registration_strategy + } + + pub fn get_genesis_config(&self) -> Result, OrchestratorError> { + let genesis_config = ParaGenesisConfig { + state_path: self.genesis_state.artifact_path().ok_or( + OrchestratorError::InvariantError( + "artifact path for state must be set at this point", + ), + )?, + wasm_path: self.genesis_wasm.artifact_path().ok_or( + OrchestratorError::InvariantError( + "artifact path for wasm must be set at this point", + ), + )?, + id: self.id, + as_parachain: self.onboard_as_parachain, + }; + Ok(genesis_config) + } + + pub fn id(&self) -> u32 { + self.id + } + + pub fn chain_spec(&self) -> Option<&ChainSpec> { + self.chain_spec.as_ref() + } + + pub fn chain_spec_mut(&mut self) -> Option<&mut ChainSpec> { + self.chain_spec.as_mut() + } + + /// Build parachain chain-spec + /// + /// This function customize the chain-spec (if is possible) and build the raw version + /// of the chain-spec. + pub(crate) async fn build_chain_spec<'a, T>( + &mut self, + relay_chain_id: &str, + ns: &DynNamespace, + scoped_fs: &ScopedFilesystem<'a, T>, + ) -> Result, anyhow::Error> + where + T: FileSystem, + { + let cloned = self.clone(); + let chain_spec_raw_path = if let Some(chain_spec) = self.chain_spec.as_mut() { + debug!("parachain chain-spec building!"); + chain_spec.build(ns, scoped_fs).await?; + debug!("parachain chain-spec built!"); + + chain_spec + .customize_para(&cloned, relay_chain_id, scoped_fs) + .await?; + debug!("parachain chain-spec customized!"); + chain_spec + .build_raw(ns, scoped_fs, Some(relay_chain_id.try_into()?)) + .await?; + debug!("parachain chain-spec raw built!"); + + // override wasm if needed + if let Some(ref wasm_override) = self.wasm_override { + chain_spec.override_code(scoped_fs, wasm_override).await?; + } + + // override raw spec if needed + if let Some(ref raw_spec_override) = self.raw_spec_override { + chain_spec + .override_raw_spec(scoped_fs, raw_spec_override) + .await?; + } + + let chain_spec_raw_path = + chain_spec + .raw_path() + .ok_or(OrchestratorError::InvariantError( + "chain-spec raw path should be set now", + ))?; + + Some(chain_spec_raw_path.to_path_buf()) + } else { + None + }; + Ok(chain_spec_raw_path) + } + + /// Get the bootnodes addresses for the parachain spec + pub(crate) fn bootnodes_addresses(&self) -> Vec<&multiaddr::Multiaddr> { + self.bootnodes_addresses.iter().collect() + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/shared.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/shared.rs new file mode 100644 index 00000000..33b7575a --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/shared.rs @@ -0,0 +1,3 @@ +pub mod constants; +pub mod macros; +pub mod types; diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/shared/constants.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/shared/constants.rs new file mode 100644 index 00000000..fa9e3ca8 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/shared/constants.rs @@ -0,0 +1,17 @@ +/// Prometheus exporter default port +pub const PROMETHEUS_PORT: u16 = 9615; +/// Prometheus exporter default port in collator full-node +pub const FULL_NODE_PROMETHEUS_PORT: u16 = 9616; +/// JSON-RPC server (ws) +pub const RPC_PORT: u16 = 9944; +// JSON-RPC server (http, used by old versions) +pub const RPC_HTTP_PORT: u16 = 9933; +// P2P default port +pub const P2P_PORT: u16 = 30333; +// default command template to build chain-spec +pub const DEFAULT_CHAIN_SPEC_TPL_COMMAND: &str = + "{{mainCommand}} build-spec --chain {{chainName}} {{disableBootnodes}}"; +// interval to determine how often to run node liveness checks +pub const NODE_MONITORING_INTERVAL_SECONDS: u64 = 15; +// how long to wait before a node is considered unresponsive +pub const NODE_MONITORING_FAILURE_THRESHOLD_SECONDS: u64 = 5; diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/shared/macros.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/shared/macros.rs new file mode 100644 index 00000000..caeff776 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/shared/macros.rs @@ -0,0 +1,32 @@ +macro_rules! create_add_options { + ($struct:ident {$( $field:ident:$type:ty ),*}) =>{ + #[derive(Default, Debug, Clone)] + pub struct $struct { + /// Image to run the node + pub image: Option, + /// Command to run the node + pub command: Option, + /// Subcommand for the node + pub subcommand: Option, + /// Arguments to pass to the node + pub args: Vec, + /// Env vars to set + pub env: Vec, + /// Make the node a validator + /// + /// This implies `--validator` or `--collator` + pub is_validator: bool, + /// RPC port to use, if None a random one will be set + pub rpc_port: Option, + /// Prometheus port to use, if None a random one will be set + pub prometheus_port: Option, + /// P2P port to use, if None a random one will be set + pub p2p_port: Option, + $( + pub $field: $type, + )* + } + }; +} + +pub(crate) use create_add_options; diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/shared/types.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/shared/types.rs new file mode 100644 index 00000000..692a4b47 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/shared/types.rs @@ -0,0 +1,99 @@ +use std::{ + collections::HashMap, + net::TcpListener, + path::PathBuf, + sync::{Arc, RwLock}, +}; + +use configuration::shared::{ + resources::Resources, + types::{Arg, AssetLocation, Command, Image, Port}, +}; +use serde::{Deserialize, Serialize}; + +pub type Accounts = HashMap; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct NodeAccount { + pub address: String, + pub public_key: String, +} + +impl NodeAccount { + pub fn new(addr: impl Into, pk: impl Into) -> Self { + Self { + address: addr.into(), + public_key: pk.into(), + } + } +} + +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +pub struct NodeAccounts { + pub seed: String, + pub accounts: Accounts, +} + +#[derive(Clone, Default, Debug, Serialize, Deserialize)] +pub struct ParkedPort( + pub(crate) Port, + #[serde(skip)] pub(crate) Arc>>, +); + +impl ParkedPort { + pub(crate) fn new(port: u16, listener: TcpListener) -> ParkedPort { + let listener = Arc::new(RwLock::new(Some(listener))); + ParkedPort(port, listener) + } + + pub(crate) fn drop_listener(&self) { + // drop the listener will allow the running node to start listenen connections + let mut l = self.1.write().unwrap(); + *l = None; + } +} + +#[derive(Debug, Clone, Default)] +pub struct ChainDefaultContext<'a> { + pub default_command: Option<&'a Command>, + pub default_image: Option<&'a Image>, + pub default_resources: Option<&'a Resources>, + pub default_db_snapshot: Option<&'a AssetLocation>, + pub default_args: Vec<&'a Arg>, +} + +#[derive(Debug, Clone)] +pub struct RegisterParachainOptions { + pub id: u32, + pub wasm_path: PathBuf, + pub state_path: PathBuf, + pub node_ws_url: String, + pub onboard_as_para: bool, + pub seed: Option<[u8; 32]>, + pub finalization: bool, +} + +pub struct RuntimeUpgradeOptions { + /// Location of the wasm file (could be either a local file or an url) + pub wasm: AssetLocation, + /// Name of the node to use as rpc endpoint + pub node_name: Option, + /// Seed to use to sign and submit (default to //Alice) + pub seed: Option<[u8; 32]>, +} + +impl RuntimeUpgradeOptions { + pub fn new(wasm: AssetLocation) -> Self { + Self { + wasm, + node_name: None, + seed: None, + } + } +} +#[derive(Debug, Clone)] +pub struct ParachainGenesisArgs { + pub genesis_head: String, + pub validation_code: String, + pub parachain: bool, +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/spawner.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/spawner.rs new file mode 100644 index 00000000..47b0e55a --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/spawner.rs @@ -0,0 +1,305 @@ +use std::{collections::HashMap, path::PathBuf}; + +use anyhow::Context; +use configuration::GlobalSettings; +use provider::{ + constants::{LOCALHOST, NODE_CONFIG_DIR, NODE_DATA_DIR, NODE_RELAY_DATA_DIR, P2P_PORT}, + shared::helpers::running_in_ci, + types::{SpawnNodeOptions, TransferedFile}, + DynNamespace, +}; +use support::{ + constants::THIS_IS_A_BUG, fs::FileSystem, replacer::apply_running_network_replacements, +}; +use tracing::info; + +use crate::{ + generators, + network::node::NetworkNode, + network_spec::{node::NodeSpec, teyrchain::TeyrchainSpec}, + shared::constants::{FULL_NODE_PROMETHEUS_PORT, PROMETHEUS_PORT, RPC_PORT}, + ScopedFilesystem, ZombieRole, +}; + +#[derive(Clone)] +pub struct SpawnNodeCtx<'a, T: FileSystem> { + /// Relaychain id, from the chain-spec (e.g rococo_local_testnet) + pub(crate) chain_id: &'a str, + // Parachain id, from the chain-spec (e.g local_testnet) + pub(crate) parachain_id: Option<&'a str>, + /// Relaychain chain name (e.g rococo-local) + pub(crate) chain: &'a str, + /// Role of the node in the network + pub(crate) role: ZombieRole, + /// Ref to the namespace + pub(crate) ns: &'a DynNamespace, + /// Ref to an scoped filesystem (encapsulate fs actions inside the ns directory) + pub(crate) scoped_fs: &'a ScopedFilesystem<'a, T>, + /// Ref to a parachain (used to spawn collators) + pub(crate) parachain: Option<&'a TeyrchainSpec>, + /// The string representation of the bootnode address to pass to nodes + pub(crate) bootnodes_addr: &'a Vec, + /// Flag to wait node is ready or not + /// Ready state means we can query Prometheus internal server + pub(crate) wait_ready: bool, + /// A json representation of the running nodes with their names as 'key' + pub(crate) nodes_by_name: serde_json::Value, + /// A ref to the global settings + pub(crate) global_settings: &'a GlobalSettings, +} + +pub async fn spawn_node<'a, T>( + node: &NodeSpec, + mut files_to_inject: Vec, + ctx: &SpawnNodeCtx<'a, T>, +) -> Result +where + T: FileSystem, +{ + let mut created_paths = vec![]; + // Create and inject the keystore IFF + // - The node is validator in the relaychain + // - The node is collator (encoded as validator) and the parachain is cumulus_based + // (parachain_id) should be set then. + if node.is_validator && (ctx.parachain.is_none() || ctx.parachain_id.is_some()) { + // Generate keystore for node + let node_files_path = if let Some(para) = ctx.parachain { + para.id.to_string() + } else { + node.name.clone() + }; + let asset_hub_polkadot = ctx + .parachain_id + .map(|id| id.starts_with("asset-hub-polkadot")) + .unwrap_or_default(); + let keystore_key_types = node.keystore_key_types.iter().map(String::as_str).collect(); + let key_filenames = generators::generate_node_keystore( + &node.accounts, + &node_files_path, + ctx.scoped_fs, + asset_hub_polkadot, + keystore_key_types, + ) + .await + .unwrap(); + + // Paths returned are relative to the base dir, we need to convert into + // fullpaths to inject them in the nodes. + let remote_keystore_chain_id = if let Some(id) = ctx.parachain_id { + id + } else { + ctx.chain_id + }; + + let keystore_path = node.keystore_path.clone().unwrap_or(PathBuf::from(format!( + "/data/chains/{remote_keystore_chain_id}/keystore", + ))); + + for key_filename in key_filenames { + let f = TransferedFile::new( + PathBuf::from(format!( + "{}/{}/{}", + ctx.ns.base_dir().to_string_lossy(), + node_files_path, + key_filename.to_string_lossy() + )), + keystore_path.join(key_filename), + ); + files_to_inject.push(f); + } + created_paths.push(keystore_path); + } + + let base_dir = format!("{}/{}", ctx.ns.base_dir().to_string_lossy(), &node.name); + + let (cfg_path, data_path, relay_data_path) = if !ctx.ns.capabilities().prefix_with_full_path { + ( + NODE_CONFIG_DIR.into(), + NODE_DATA_DIR.into(), + NODE_RELAY_DATA_DIR.into(), + ) + } else { + let cfg_path = format!("{}{NODE_CONFIG_DIR}", &base_dir); + let data_path = format!("{}{NODE_DATA_DIR}", &base_dir); + let relay_data_path = format!("{}{NODE_RELAY_DATA_DIR}", &base_dir); + (cfg_path, data_path, relay_data_path) + }; + + let gen_opts = generators::GenCmdOptions { + relay_chain_name: ctx.chain, + cfg_path: &cfg_path, // TODO: get from provider/ns + data_path: &data_path, // TODO: get from provider + relay_data_path: &relay_data_path, // TODO: get from provider + use_wrapper: false, // TODO: get from provider + bootnode_addr: ctx.bootnodes_addr.clone(), + use_default_ports_in_cmd: ctx.ns.capabilities().use_default_ports_in_cmd, + // IFF the provider require an image (e.g k8s) we know this is not native + is_native: !ctx.ns.capabilities().requires_image, + }; + + let mut collator_full_node_prom_port: Option = None; + let mut collator_full_node_prom_port_external: Option = None; + + let (program, args) = match ctx.role { + // Collator should be `non-cumulus` one (e.g adder/undying) + ZombieRole::Node | ZombieRole::Collator => { + let maybe_para_id = ctx.parachain.map(|para| para.id); + + generators::generate_node_command(node, gen_opts, maybe_para_id) + }, + ZombieRole::CumulusCollator => { + let para = ctx.parachain.expect(&format!( + "parachain must be part of the context {THIS_IS_A_BUG}" + )); + collator_full_node_prom_port = node.full_node_prometheus_port.as_ref().map(|p| p.0); + + generators::generate_node_command_cumulus(node, gen_opts, para.id) + }, + _ => unreachable!(), /* TODO: do we need those? + * ZombieRole::Bootnode => todo!(), + * ZombieRole::Companion => todo!(), */ + }; + + // apply running networ replacements + let args: Vec = args + .iter() + .map(|arg| apply_running_network_replacements(arg, &ctx.nodes_by_name)) + .collect(); + + info!( + "🚀 {}, spawning.... with command: {} {}", + node.name, + program, + args.join(" ") + ); + + let ports = if ctx.ns.capabilities().use_default_ports_in_cmd { + // should use default ports to as internal + [ + (P2P_PORT, node.p2p_port.0), + (RPC_PORT, node.rpc_port.0), + (PROMETHEUS_PORT, node.prometheus_port.0), + ] + } else { + [ + (P2P_PORT, P2P_PORT), + (RPC_PORT, RPC_PORT), + (PROMETHEUS_PORT, PROMETHEUS_PORT), + ] + }; + + let spawn_ops = SpawnNodeOptions::new(node.name.clone(), program) + .args(args) + .env( + node.env + .iter() + .map(|var| (var.name.clone(), var.value.clone())), + ) + .injected_files(files_to_inject) + .created_paths(created_paths) + .db_snapshot(node.db_snapshot.clone()) + .port_mapping(HashMap::from(ports)) + .node_log_path(node.node_log_path.clone()); + + let spawn_ops = if let Some(image) = node.image.as_ref() { + spawn_ops.image(image.as_str()) + } else { + spawn_ops + }; + + // Drops the port parking listeners before spawn + node.ws_port.drop_listener(); + node.p2p_port.drop_listener(); + node.rpc_port.drop_listener(); + node.prometheus_port.drop_listener(); + if let Some(port) = &node.full_node_p2p_port { + port.drop_listener(); + } + if let Some(port) = &node.full_node_prometheus_port { + port.drop_listener(); + } + + let running_node = ctx.ns.spawn_node(&spawn_ops).await.with_context(|| { + format!( + "Failed to spawn node: {} with opts: {:#?}", + node.name, spawn_ops + ) + })?; + + let mut ip_to_use = if let Some(local_ip) = ctx.global_settings.local_ip() { + *local_ip + } else { + LOCALHOST + }; + + let (rpc_port_external, prometheus_port_external, p2p_external); + + if running_in_ci() && ctx.ns.provider_name() == "k8s" { + // running kubernets in ci require to use ip and default port + (rpc_port_external, prometheus_port_external, p2p_external) = + (RPC_PORT, PROMETHEUS_PORT, P2P_PORT); + collator_full_node_prom_port_external = Some(FULL_NODE_PROMETHEUS_PORT); + ip_to_use = running_node.ip().await?; + } else { + // Create port-forward iff we are not in CI or provider doesn't use the default ports (native) + let ports = futures::future::try_join_all(vec![ + running_node.create_port_forward(node.rpc_port.0, RPC_PORT), + running_node.create_port_forward(node.prometheus_port.0, PROMETHEUS_PORT), + ]) + .await?; + + (rpc_port_external, prometheus_port_external, p2p_external) = ( + ports[0].unwrap_or(node.rpc_port.0), + ports[1].unwrap_or(node.prometheus_port.0), + // p2p don't need port-fwd + node.p2p_port.0, + ); + + if let Some(full_node_prom_port) = collator_full_node_prom_port { + let port_fwd = running_node + .create_port_forward(full_node_prom_port, FULL_NODE_PROMETHEUS_PORT) + .await?; + collator_full_node_prom_port_external = Some(port_fwd.unwrap_or(full_node_prom_port)); + } + } + + let multiaddr = generators::generate_node_bootnode_addr( + &node.peer_id, + &running_node.ip().await?, + p2p_external, + running_node.args().as_ref(), + &node.p2p_cert_hash, + )?; + + let ws_uri = format!("ws://{ip_to_use}:{rpc_port_external}"); + let prometheus_uri = format!("http://{ip_to_use}:{prometheus_port_external}/metrics"); + info!("🚀 {}, should be running now", node.name); + info!( + "💻 {}: direct link (pjs) https://polkadot.js.org/apps/?rpc={ws_uri}#/explorer", + node.name + ); + info!( + "💻 {}: direct link (papi) https://dev.papi.how/explorer#networkId=custom&endpoint={ws_uri}", + node.name + ); + + info!("📊 {}: metrics link {prometheus_uri}", node.name); + + if let Some(full_node_prom_port) = collator_full_node_prom_port_external { + info!( + "📊 {}: collator full-node metrics link http://{}:{}/metrics", + node.name, ip_to_use, full_node_prom_port + ); + } + + info!("📓 logs cmd: {}", running_node.log_cmd()); + + Ok(NetworkNode::new( + node.name.clone(), + ws_uri, + prometheus_uri, + multiaddr, + node.clone(), + running_node, + )) +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/tx_helper.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/tx_helper.rs new file mode 100644 index 00000000..389c4d87 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/tx_helper.rs @@ -0,0 +1,2 @@ +pub mod client; +pub mod runtime_upgrade; diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/tx_helper/client.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/tx_helper/client.rs new file mode 100644 index 00000000..e08b326b --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/tx_helper/client.rs @@ -0,0 +1,43 @@ +use pezkuwi_subxt::{backend::rpc::RpcClient, OnlineClient}; + +#[async_trait::async_trait] +pub trait ClientFromUrl: Sized { + async fn from_secure_url(url: &str) -> Result; + async fn from_insecure_url(url: &str) -> Result; +} + +#[async_trait::async_trait] +impl ClientFromUrl for OnlineClient { + async fn from_secure_url(url: &str) -> Result { + Self::from_url(url).await.map_err(Into::into) + } + + async fn from_insecure_url(url: &str) -> Result { + Self::from_insecure_url(url).await.map_err(Into::into) + } +} + +#[async_trait::async_trait] +impl ClientFromUrl for RpcClient { + async fn from_secure_url(url: &str) -> Result { + Self::from_url(url) + .await + .map_err(pezkuwi_subxt::Error::from) + } + + async fn from_insecure_url(url: &str) -> Result { + Self::from_insecure_url(url) + .await + .map_err(pezkuwi_subxt::Error::from) + } +} + +pub async fn get_client_from_url( + url: &str, +) -> Result { + if pezkuwi_subxt::utils::url_is_secure(url)? { + T::from_secure_url(url).await + } else { + T::from_insecure_url(url).await + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/tx_helper/runtime_upgrade.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/tx_helper/runtime_upgrade.rs new file mode 100644 index 00000000..57a65d7d --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/tx_helper/runtime_upgrade.rs @@ -0,0 +1,69 @@ +use pezkuwi_subxt::{dynamic::Value, tx::TxStatus, BizinikiwConfig, OnlineClient}; +use pezkuwi_subxt_signer::sr25519::Keypair; +use tracing::{debug, info}; + +use crate::network::node::NetworkNode; + +pub async fn upgrade( + node: &NetworkNode, + wasm_data: &[u8], + sudo: &Keypair, +) -> Result<(), anyhow::Error> { + debug!( + "Upgrading runtime, using node: {} with endpoting {}", + node.name, node.ws_uri + ); + let api: OnlineClient = node.wait_client().await?; + + let upgrade = pezkuwi_subxt::dynamic::tx( + "System", + "set_code_without_checks", + vec![Value::from_bytes(wasm_data)], + ); + + let sudo_call = pezkuwi_subxt::dynamic::tx( + "Sudo", + "sudo_unchecked_weight", + vec![ + upgrade.into_value(), + Value::named_composite([ + ("ref_time", Value::primitive(1.into())), + ("proof_size", Value::primitive(1.into())), + ]), + ], + ); + + let mut tx = api + .tx() + .sign_and_submit_then_watch_default(&sudo_call, sudo) + .await?; + + // Below we use the low level API to replicate the `wait_for_in_block` behaviour + // which was removed in subxt 0.33.0. See https://github.com/paritytech/subxt/pull/1237. + while let Some(status) = tx.next().await { + let status = status?; + match &status { + TxStatus::InBestBlock(tx_in_block) | TxStatus::InFinalizedBlock(tx_in_block) => { + let _result = tx_in_block.wait_for_success().await?; + let block_status = if status.as_finalized().is_some() { + "Finalized" + } else { + "Best" + }; + info!( + "[{}] In block: {:#?}", + block_status, + tx_in_block.block_hash() + ); + }, + TxStatus::Error { message } + | TxStatus::Invalid { message } + | TxStatus::Dropped { message } => { + return Err(anyhow::format_err!("Error submitting tx: {message}")); + }, + _ => continue, + } + } + + Ok(()) +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/utils.rs b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/utils.rs new file mode 100644 index 00000000..7ae3dc1b --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/src/utils.rs @@ -0,0 +1,8 @@ +use serde::Deserializer; + +pub fn default_as_empty_vec<'de, D, T>(_deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + Ok(Vec::new()) +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/testing/rococo-local-plain.json b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/testing/rococo-local-plain.json new file mode 100644 index 00000000..fcec8519 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/orchestrator/testing/rococo-local-plain.json @@ -0,0 +1,211 @@ +{ + "name": "Rococo Local Testnet", + "id": "rococo_local_testnet", + "chainType": "Local", + "bootNodes": [ + "/ip4/127.0.0.1/tcp/30333/p2p/12D3KooWJcDp2Cdok4uSHz5zpjWzfduNCzis9GsMfpej1jwdaYij" + ], + "telemetryEndpoints": null, + "protocolId": "dot", + "properties": null, + "forkBlocks": null, + "badBlocks": null, + "lightSyncState": null, + "codeSubstitutes": {}, + "genesis": { + "runtime": { + "system": { + "code": "0x52" + }, + "babe": { + "authorities": [], + "epochConfig": { + "c": [ + 1, + 4 + ], + "allowed_slots": "PrimaryAndSecondaryVRFSlots" + } + }, + "indices": { + "indices": [] + }, + "balances": { + "balances": [ + [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + 1000000000000000000 + ], + [ + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + 1000000000000000000 + ], + [ + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", + 1000000000000000000 + ], + [ + "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy", + 1000000000000000000 + ], + [ + "5HGjWAeFDfFCWPsjFQdVV2Msvz2XtMktvgocEZcCj68kUMaw", + 1000000000000000000 + ], + [ + "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", + 1000000000000000000 + ], + [ + "5GNJqTPyNqANBkUVMN1LPPrxXnFouWXoe2wNSmmEoLctxiZY", + 1000000000000000000 + ], + [ + "5HpG9w8EBLe5XCrbczpwq5TSXvedjrBGCwqxK1iQ7qUsSWFc", + 1000000000000000000 + ], + [ + "5Ck5SLSHYac6WFt5UZRSsdJjwmpSZq85fd5TRNAdZQVzEAPT", + 1000000000000000000 + ], + [ + "5HKPmK9GYtE1PSLsS1qiYU9xQ9Si1NcEhdeCq9sw5bqu4ns8", + 1000000000000000000 + ], + [ + "5FCfAonRZgTFrTd9HREEyeJjDpT397KMzizE6T3DvebLFE7n", + 1000000000000000000 + ], + [ + "5CRmqmsiNFExV6VbdmPJViVxrWmkaXXvBrSX8oqBT8R9vmWk", + 1000000000000000000 + ] + ] + }, + "beefy": { + "authorities": [], + "genesisBlock": 1 + }, + "session": { + "keys": [ + [ + "5GNJqTPyNqANBkUVMN1LPPrxXnFouWXoe2wNSmmEoLctxiZY", + "5GNJqTPyNqANBkUVMN1LPPrxXnFouWXoe2wNSmmEoLctxiZY", + { + "grandpa": "5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu", + "babe": "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + "im_online": "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + "para_validator": "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + "para_assignment": "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + "authority_discovery": "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + "beefy": "KW39r9CJjAVzmkf9zQ4YDb2hqfAVGdRqn53eRqyruqpxAP5YL" + } + ], + [ + "5HpG9w8EBLe5XCrbczpwq5TSXvedjrBGCwqxK1iQ7qUsSWFc", + "5HpG9w8EBLe5XCrbczpwq5TSXvedjrBGCwqxK1iQ7qUsSWFc", + { + "grandpa": "5GoNkf6WdbxCFnPdAnYYQyCjAKPJgLNxXwPjwTh6DGg6gN3E", + "babe": "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + "im_online": "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + "para_validator": "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + "para_assignment": "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + "authority_discovery": "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + "beefy": "KWByAN7WfZABWS5AoWqxriRmF5f2jnDqy3rB5pfHLGkY93ibN" + } + ] + ] + }, + "grandpa": { + "authorities": [] + }, + "imOnline": { + "keys": [] + }, + "authorityDiscovery": { + "keys": [] + }, + "treasury": {}, + "claims": { + "claims": [], + "vesting": [] + }, + "vesting": { + "vesting": [] + }, + "nisCounterpartBalances": { + "balances": [] + }, + "configuration": { + "config": { + "max_code_size": 3145728, + "max_head_data_size": 32768, + "max_upward_queue_count": 8, + "max_upward_queue_size": 1048576, + "max_upward_message_size": 51200, + "max_upward_message_num_per_candidate": 5, + "hrmp_max_message_num_per_candidate": 5, + "validation_upgrade_cooldown": 2, + "validation_upgrade_delay": 2, + "async_backing_params": { + "max_candidate_depth": 0, + "allowed_ancestry_len": 0 + }, + "max_pov_size": 5242880, + "max_downward_message_size": 1048576, + "hrmp_max_parachain_outbound_channels": 4, + "hrmp_sender_deposit": 0, + "hrmp_recipient_deposit": 0, + "hrmp_channel_max_capacity": 8, + "hrmp_channel_max_total_size": 8192, + "hrmp_max_parachain_inbound_channels": 4, + "hrmp_channel_max_message_size": 1048576, + "executor_params": [], + "code_retention_period": 1200, + "on_demand_cores": 0, + "on_demand_retries": 0, + "on_demand_queue_max_size": 10000, + "on_demand_target_queue_utilization": 250000000, + "on_demand_fee_variability": 30000000, + "on_demand_base_fee": 10000000, + "on_demand_ttl": 5, + "group_rotation_frequency": 20, + "paras_availability_period": 4, + "scheduling_lookahead": 1, + "max_validators_per_core": 1, + "max_validators": null, + "dispute_period": 6, + "dispute_post_conclusion_acceptance_period": 100, + "no_show_slots": 2, + "n_delay_tranches": 25, + "zeroth_delay_tranche_width": 0, + "needed_approvals": 2, + "relay_vrf_modulo_samples": 2, + "pvf_voting_ttl": 2, + "minimum_validation_upgrade_delay": 5, + "minimum_backing_votes": 2 + } + }, + "paras": { + "paras": [] + }, + "hrmp": { + "preopenHrmpChannels": [] + }, + "registrar": { + "nextFreeParaId": 2000 + }, + "xcmPallet": { + "safeXcmVersion": 3 + }, + "assignedSlots": { + "maxTemporarySlots": 0, + "maxPermanentSlots": 0, + "config": null + }, + "sudo": { + "key": "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" + } + } + } +} \ No newline at end of file diff --git a/vendor/pezkuwi-zombienet-sdk/crates/prom-metrics-parser/Cargo.toml b/vendor/pezkuwi-zombienet-sdk/crates/prom-metrics-parser/Cargo.toml new file mode 100644 index 00000000..ff2b7505 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/prom-metrics-parser/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "zombienet-prom-metrics-parser" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +publish = true +license.workspace = true +repository.workspace = true +description = "Prometheus metric parser, parse metrics provided by internal prometheus server" +keywords = ["zombienet", "prometheus"] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +pest = { workspace = true } +pest_derive = { workspace = true } +thiserror = { workspace = true } diff --git a/vendor/pezkuwi-zombienet-sdk/crates/prom-metrics-parser/src/grammar.pest b/vendor/pezkuwi-zombienet-sdk/crates/prom-metrics-parser/src/grammar.pest new file mode 100644 index 00000000..16265abc --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/prom-metrics-parser/src/grammar.pest @@ -0,0 +1,47 @@ +// Grammar taken from https://github.com/mitghi/promerge/ with +// some small modifications. +alpha = _{'a'..'z' | 'A'..'Z'} +alphanum = _{'a'..'z' | 'A'..'Z' | '0'..'9'} +number = @{ + "-"? + ~ ("0" | ASCII_NONZERO_DIGIT ~ ASCII_DIGIT*) + ~ ("." ~ ASCII_DIGIT*)? + ~ (^"e" ~ ("+" | "-")? ~ ASCII_DIGIT+)? +} +string = ${"\"" ~ inner ~ "\""} +inner = @{char*} +char = { + !("\"" | "\\") ~ ANY + | "\\" ~ ("\"" | "\\" | "/" | "b" | "f" | "n" | "r" | "t") + | "\\" ~ ("u" ~ ASCII_HEX_DIGIT{4}) +} +whitespace_or_newline = _{(" "| "\n")*} +hash = _{"#"} +posInf = {"+Inf"} +negInf = {"-Inf"} +NaN = {"NaN"} +lbrace = _{"{"} +rbrace = _{"}"} +typelit = _{"TYPE"} +helplit = _{"HELP"} +comma = _{","} +countertype = {"counter"} +gaugetype = {"gauge"} +histogramtype = {"histogram"} +summarytype = {"summary"} +untyped = {"untyped"} +ident = {alphanum+} +key = @{ident ~ ("_" ~ ident)*} +label = {key ~ "=" ~ string} +labels = {label ~ (comma ~ label)*} +helpkey = {key} +helpval = {inner} +typekey = {key} +typeval = {countertype | gaugetype | histogramtype | summarytype | untyped} +commentval = @{((ASCII_DIGIT| ASCII_NONZERO_DIGIT | ASCII_BIN_DIGIT | ASCII_OCT_DIGIT | ASCII_HEX_DIGIT | ASCII_ALPHA_LOWER | ASCII_ALPHA_UPPER | ASCII_ALPHA | ASCII_ALPHANUMERIC | !"\n" ~ ANY ))*} +helpexpr = {hash ~ whitespace_or_newline ~ helplit ~ whitespace_or_newline ~ helpkey ~ whitespace_or_newline ~ commentval} +typexpr = {hash ~ whitespace_or_newline ~ typelit ~ whitespace_or_newline ~ typekey ~ whitespace_or_newline ~ typeval } +genericomment = {hash ~ whitespace_or_newline ~ commentval} +promstmt = {key ~ (lbrace ~ (labels)* ~ rbrace){0,1} ~ whitespace_or_newline ~ ((posInf | negInf | NaN | number) ~ whitespace_or_newline ){1,2}} +block = {((helpexpr | typexpr | genericomment)~ NEWLINE?)+ ~ (promstmt ~ NEWLINE?)+} +statement = {SOI ~ block+ ~ EOI} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/prom-metrics-parser/src/lib.rs b/vendor/pezkuwi-zombienet-sdk/crates/prom-metrics-parser/src/lib.rs new file mode 100644 index 00000000..d9da185a --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/prom-metrics-parser/src/lib.rs @@ -0,0 +1,178 @@ +use std::{collections::HashMap, num::ParseFloatError}; + +use pest::Parser; +use pest_derive::Parser; + +/// An error at parsing level. +#[derive(thiserror::Error, Debug)] +pub enum ParserError { + #[error("error parsing input")] + ParseError(Box>), + #[error("root node should be valid: {0}")] + ParseRootNodeError(String), + #[error("can't cast metric value as f64: {0}")] + CastValueError(#[from] ParseFloatError), +} + +// This include forces recompiling this source file if the grammar file changes. +// Uncomment it when doing changes to the .pest file +const _GRAMMAR: &str = include_str!("grammar.pest"); + +#[derive(Parser)] +#[grammar = "grammar.pest"] +pub struct MetricsParser; + +pub type MetricMap = HashMap; + +pub fn parse(input: &str) -> Result { + let mut metric_map: MetricMap = Default::default(); + let mut pairs = MetricsParser::parse(Rule::statement, input) + .map_err(|e| ParserError::ParseError(Box::new(e)))?; + + let root = pairs + .next() + .ok_or(ParserError::ParseRootNodeError(pairs.as_str().to_string()))?; + for token in root.into_inner() { + if token.as_rule() == Rule::block { + let inner = token.into_inner(); + for value in inner { + match value.as_rule() { + Rule::genericomment | Rule::typexpr | Rule::helpexpr => { + // don't need to collect comments/types/helpers blocks. + continue; + }, + Rule::promstmt => { + let mut key: &str = ""; + let mut labels: Vec<(&str, &str)> = Vec::new(); + let mut val: f64 = 0_f64; + for v in value.clone().into_inner() { + match &v.as_rule() { + Rule::key => { + key = v.as_span().as_str(); + }, + Rule::NaN | Rule::posInf | Rule::negInf => { + // noop (not used in substrate metrics) + }, + Rule::number => { + val = v.as_span().as_str().parse::()?; + }, + Rule::labels => { + // SAFETY: use unwrap should be safe since we are just + // walking the parser struct and if are matching a label + // should have a key/vals + for p in v.into_inner() { + let mut inner = p.into_inner(); + let key = inner.next().unwrap().as_span().as_str(); + let value = inner + .next() + .unwrap() + .into_inner() + .next() + .unwrap() + .as_span() + .as_str(); + + labels.push((key, value)); + } + }, + _ => { + todo!("not implemented"); + }, + } + } + + // we should store to make it compatible with zombienet v1: + // key_without_prefix + // key_without_prefix_and_without_chain + // key_with_prefix_with_chain + // key_with_prefix_and_without_chain + let key_with_out_prefix = + key.split('_').collect::>()[1..].join("_"); + let (labels_without_chain, labels_with_chain) = + labels.iter().fold((vec![], vec![]), |mut acc, item| { + if item.0.eq("chain") { + acc.1.push(format!("{}=\"{}\"", item.0, item.1)); + } else { + acc.0.push(format!("{}=\"{}\"", item.0, item.1)); + acc.1.push(format!("{}=\"{}\"", item.0, item.1)); + } + acc + }); + + let labels_with_chain_str = if labels_with_chain.is_empty() { + String::from("") + } else { + format!("{{{}}}", labels_with_chain.join(",")) + }; + + let labels_without_chain_str = if labels_without_chain.is_empty() { + String::from("") + } else { + format!("{{{}}}", labels_without_chain.join(",")) + }; + + metric_map.insert(format!("{key}{labels_without_chain_str}"), val); + metric_map.insert( + format!("{key_with_out_prefix}{labels_without_chain_str}"), + val, + ); + metric_map.insert(format!("{key}{labels_with_chain_str}"), val); + metric_map + .insert(format!("{key_with_out_prefix}{labels_with_chain_str}"), val); + }, + _ => {}, + } + } + } + } + + Ok(metric_map) +} + +#[cfg(test)] +mod tests { + use std::fs; + + use super::*; + + #[test] + fn parse_metrics_works() { + let metrics_raw = fs::read_to_string("./testing/metrics.txt").unwrap(); + let metrics = parse(&metrics_raw).unwrap(); + + // full key + assert_eq!( + metrics + .get("polkadot_node_is_active_validator{chain=\"rococo_local_testnet\"}") + .unwrap(), + &1_f64 + ); + // with prefix and no chain + assert_eq!( + metrics.get("polkadot_node_is_active_validator").unwrap(), + &1_f64 + ); + // no prefix with chain + assert_eq!( + metrics + .get("node_is_active_validator{chain=\"rococo_local_testnet\"}") + .unwrap(), + &1_f64 + ); + // no prefix without chain + assert_eq!(metrics.get("node_is_active_validator").unwrap(), &1_f64); + } + + #[test] + fn parse_invalid_metrics_str_should_fail() { + let metrics_raw = r" + # HELP polkadot_node_is_active_validator Tracks if the validator is in the active set. Updates at session boundary. + # TYPE polkadot_node_is_active_validator gauge + polkadot_node_is_active_validator{chain=} 1 + "; + + let metrics = parse(metrics_raw); + assert!(metrics.is_err()); + assert!(matches!(metrics, Err(ParserError::ParseError(_)))); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/prom-metrics-parser/testing/metrics.txt b/vendor/pezkuwi-zombienet-sdk/crates/prom-metrics-parser/testing/metrics.txt new file mode 100644 index 00000000..fc98ff90 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/prom-metrics-parser/testing/metrics.txt @@ -0,0 +1,3879 @@ +# HELP beefy_on_demand_justification_peer_refused Number of on-demand justification peer refused valid requests +# TYPE beefy_on_demand_justification_peer_refused counter +beefy_on_demand_justification_peer_refused{chain="rococo_local_testnet"} 0 +# HELP polkadot_node_is_active_validator Tracks if the validator is in the active set. Updates at session boundary. +# TYPE polkadot_node_is_active_validator gauge +polkadot_node_is_active_validator{chain="rococo_local_testnet"} 1 +# HELP polkadot_node_is_parachain_validator Tracks if the validator participates in parachain consensus. Parachain validators are a subset of the active set validators that perform approval checking of all parachain candidates in a session.Updates at session boundary. +# TYPE polkadot_node_is_parachain_validator gauge +polkadot_node_is_parachain_validator{chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_activated_heads_total Number of activated heads. +# TYPE polkadot_parachain_activated_heads_total counter +polkadot_parachain_activated_heads_total{chain="rococo_local_testnet"} 14 +# HELP polkadot_parachain_approval_candidate_signatures_requests_total Number of times signatures got requested by other subsystems +# TYPE polkadot_parachain_approval_candidate_signatures_requests_total counter +polkadot_parachain_approval_candidate_signatures_requests_total{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_approval_checking_finality_lag How far behind the head of the chain the Approval Checking protocol wants to vote +# TYPE polkadot_parachain_approval_checking_finality_lag gauge +polkadot_parachain_approval_checking_finality_lag{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_approval_distribution_aggression_l1_messages_total Number of messages in approval distribution for which aggression L1 has been triggered +# TYPE polkadot_parachain_approval_distribution_aggression_l1_messages_total counter +polkadot_parachain_approval_distribution_aggression_l1_messages_total{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_approval_distribution_aggression_l2_messages_total Number of messages in approval distribution for which aggression L2 has been triggered +# TYPE polkadot_parachain_approval_distribution_aggression_l2_messages_total counter +polkadot_parachain_approval_distribution_aggression_l2_messages_total{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_approval_unapproved_candidates_in_unfinalized_chain Number of unapproved candidates in unfinalized chain +# TYPE polkadot_parachain_approval_unapproved_candidates_in_unfinalized_chain gauge +polkadot_parachain_approval_unapproved_candidates_in_unfinalized_chain{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_approvals_blockapproval_time_ticks Number of ticks (500ms) to approve blocks. +# TYPE polkadot_parachain_approvals_blockapproval_time_ticks histogram +polkadot_parachain_approvals_blockapproval_time_ticks_bucket{chain="rococo_local_testnet",le="6"} 0 +polkadot_parachain_approvals_blockapproval_time_ticks_bucket{chain="rococo_local_testnet",le="12"} 0 +polkadot_parachain_approvals_blockapproval_time_ticks_bucket{chain="rococo_local_testnet",le="18"} 0 +polkadot_parachain_approvals_blockapproval_time_ticks_bucket{chain="rococo_local_testnet",le="24"} 0 +polkadot_parachain_approvals_blockapproval_time_ticks_bucket{chain="rococo_local_testnet",le="30"} 0 +polkadot_parachain_approvals_blockapproval_time_ticks_bucket{chain="rococo_local_testnet",le="36"} 0 +polkadot_parachain_approvals_blockapproval_time_ticks_bucket{chain="rococo_local_testnet",le="72"} 0 +polkadot_parachain_approvals_blockapproval_time_ticks_bucket{chain="rococo_local_testnet",le="100"} 0 +polkadot_parachain_approvals_blockapproval_time_ticks_bucket{chain="rococo_local_testnet",le="144"} 0 +polkadot_parachain_approvals_blockapproval_time_ticks_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_approvals_blockapproval_time_ticks_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_approvals_blockapproval_time_ticks_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_approvals_candidate_approval_time_ticks Number of ticks (500ms) to approve candidates. +# TYPE polkadot_parachain_approvals_candidate_approval_time_ticks histogram +polkadot_parachain_approvals_candidate_approval_time_ticks_bucket{chain="rococo_local_testnet",le="6"} 1 +polkadot_parachain_approvals_candidate_approval_time_ticks_bucket{chain="rococo_local_testnet",le="12"} 1 +polkadot_parachain_approvals_candidate_approval_time_ticks_bucket{chain="rococo_local_testnet",le="18"} 1 +polkadot_parachain_approvals_candidate_approval_time_ticks_bucket{chain="rococo_local_testnet",le="24"} 1 +polkadot_parachain_approvals_candidate_approval_time_ticks_bucket{chain="rococo_local_testnet",le="30"} 1 +polkadot_parachain_approvals_candidate_approval_time_ticks_bucket{chain="rococo_local_testnet",le="36"} 1 +polkadot_parachain_approvals_candidate_approval_time_ticks_bucket{chain="rococo_local_testnet",le="72"} 1 +polkadot_parachain_approvals_candidate_approval_time_ticks_bucket{chain="rococo_local_testnet",le="100"} 1 +polkadot_parachain_approvals_candidate_approval_time_ticks_bucket{chain="rococo_local_testnet",le="144"} 1 +polkadot_parachain_approvals_candidate_approval_time_ticks_bucket{chain="rococo_local_testnet",le="+Inf"} 1 +polkadot_parachain_approvals_candidate_approval_time_ticks_sum{chain="rococo_local_testnet"} 5 +polkadot_parachain_approvals_candidate_approval_time_ticks_count{chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_approvals_imported_total Number of valid approvals imported locally or from other peers. +# TYPE polkadot_parachain_approvals_imported_total counter +polkadot_parachain_approvals_imported_total{chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_approvals_no_shows_total Number of assignments which became no-shows in the approval voting subsystem +# TYPE polkadot_parachain_approvals_no_shows_total counter +polkadot_parachain_approvals_no_shows_total{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_approvals_produced_total Number of approvals produced by the approval voting subsystem +# TYPE polkadot_parachain_approvals_produced_total counter +polkadot_parachain_approvals_produced_total{status="success",chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_approvals_wakeups_total Number of times we woke up to process a candidate in the approval voting subsystem +# TYPE polkadot_parachain_approvals_wakeups_total counter +polkadot_parachain_approvals_wakeups_total{chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_assignments_imported_total Number of valid assignments imported locally or from other peers. +# TYPE polkadot_parachain_assignments_imported_total counter +polkadot_parachain_assignments_imported_total{chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_assignments_produced Assignments and tranches produced by the approval voting subsystem +# TYPE polkadot_parachain_assignments_produced histogram +polkadot_parachain_assignments_produced_bucket{chain="rococo_local_testnet",le="0"} 1 +polkadot_parachain_assignments_produced_bucket{chain="rococo_local_testnet",le="1"} 1 +polkadot_parachain_assignments_produced_bucket{chain="rococo_local_testnet",le="2"} 1 +polkadot_parachain_assignments_produced_bucket{chain="rococo_local_testnet",le="3"} 1 +polkadot_parachain_assignments_produced_bucket{chain="rococo_local_testnet",le="4"} 1 +polkadot_parachain_assignments_produced_bucket{chain="rococo_local_testnet",le="5"} 1 +polkadot_parachain_assignments_produced_bucket{chain="rococo_local_testnet",le="10"} 1 +polkadot_parachain_assignments_produced_bucket{chain="rococo_local_testnet",le="15"} 1 +polkadot_parachain_assignments_produced_bucket{chain="rococo_local_testnet",le="25"} 1 +polkadot_parachain_assignments_produced_bucket{chain="rococo_local_testnet",le="40"} 1 +polkadot_parachain_assignments_produced_bucket{chain="rococo_local_testnet",le="70"} 1 +polkadot_parachain_assignments_produced_bucket{chain="rococo_local_testnet",le="+Inf"} 1 +polkadot_parachain_assignments_produced_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_assignments_produced_count{chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_av_store_block_activated Time spent within `av_store::process_block_activated` +# TYPE polkadot_parachain_av_store_block_activated histogram +polkadot_parachain_av_store_block_activated_bucket{chain="rococo_local_testnet",le="0.005"} 14 +polkadot_parachain_av_store_block_activated_bucket{chain="rococo_local_testnet",le="0.01"} 14 +polkadot_parachain_av_store_block_activated_bucket{chain="rococo_local_testnet",le="0.025"} 14 +polkadot_parachain_av_store_block_activated_bucket{chain="rococo_local_testnet",le="0.05"} 14 +polkadot_parachain_av_store_block_activated_bucket{chain="rococo_local_testnet",le="0.1"} 14 +polkadot_parachain_av_store_block_activated_bucket{chain="rococo_local_testnet",le="0.25"} 14 +polkadot_parachain_av_store_block_activated_bucket{chain="rococo_local_testnet",le="0.5"} 14 +polkadot_parachain_av_store_block_activated_bucket{chain="rococo_local_testnet",le="1"} 14 +polkadot_parachain_av_store_block_activated_bucket{chain="rococo_local_testnet",le="2.5"} 14 +polkadot_parachain_av_store_block_activated_bucket{chain="rococo_local_testnet",le="5"} 14 +polkadot_parachain_av_store_block_activated_bucket{chain="rococo_local_testnet",le="10"} 14 +polkadot_parachain_av_store_block_activated_bucket{chain="rococo_local_testnet",le="+Inf"} 14 +polkadot_parachain_av_store_block_activated_sum{chain="rococo_local_testnet"} 0.03075325 +polkadot_parachain_av_store_block_activated_count{chain="rococo_local_testnet"} 14 +# HELP polkadot_parachain_av_store_get_chunk Time spent fetching requested chunks.` +# TYPE polkadot_parachain_av_store_get_chunk histogram +polkadot_parachain_av_store_get_chunk_bucket{chain="rococo_local_testnet",le="0.000625"} 0 +polkadot_parachain_av_store_get_chunk_bucket{chain="rococo_local_testnet",le="0.00125"} 0 +polkadot_parachain_av_store_get_chunk_bucket{chain="rococo_local_testnet",le="0.0025"} 0 +polkadot_parachain_av_store_get_chunk_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_av_store_get_chunk_bucket{chain="rococo_local_testnet",le="0.0075"} 0 +polkadot_parachain_av_store_get_chunk_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_av_store_get_chunk_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_av_store_get_chunk_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_av_store_get_chunk_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_av_store_get_chunk_bucket{chain="rococo_local_testnet",le="0.25"} 0 +polkadot_parachain_av_store_get_chunk_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_av_store_get_chunk_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_av_store_get_chunk_bucket{chain="rococo_local_testnet",le="2.5"} 0 +polkadot_parachain_av_store_get_chunk_bucket{chain="rococo_local_testnet",le="5"} 0 +polkadot_parachain_av_store_get_chunk_bucket{chain="rococo_local_testnet",le="10"} 0 +polkadot_parachain_av_store_get_chunk_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_av_store_get_chunk_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_av_store_get_chunk_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_av_store_process_block_finalized Time spent within `av_store::process_block_finalized` +# TYPE polkadot_parachain_av_store_process_block_finalized histogram +polkadot_parachain_av_store_process_block_finalized_bucket{chain="rococo_local_testnet",le="0.005"} 11 +polkadot_parachain_av_store_process_block_finalized_bucket{chain="rococo_local_testnet",le="0.01"} 11 +polkadot_parachain_av_store_process_block_finalized_bucket{chain="rococo_local_testnet",le="0.025"} 11 +polkadot_parachain_av_store_process_block_finalized_bucket{chain="rococo_local_testnet",le="0.05"} 11 +polkadot_parachain_av_store_process_block_finalized_bucket{chain="rococo_local_testnet",le="0.1"} 11 +polkadot_parachain_av_store_process_block_finalized_bucket{chain="rococo_local_testnet",le="0.25"} 11 +polkadot_parachain_av_store_process_block_finalized_bucket{chain="rococo_local_testnet",le="0.5"} 11 +polkadot_parachain_av_store_process_block_finalized_bucket{chain="rococo_local_testnet",le="1"} 11 +polkadot_parachain_av_store_process_block_finalized_bucket{chain="rococo_local_testnet",le="2.5"} 11 +polkadot_parachain_av_store_process_block_finalized_bucket{chain="rococo_local_testnet",le="5"} 11 +polkadot_parachain_av_store_process_block_finalized_bucket{chain="rococo_local_testnet",le="10"} 11 +polkadot_parachain_av_store_process_block_finalized_bucket{chain="rococo_local_testnet",le="+Inf"} 11 +polkadot_parachain_av_store_process_block_finalized_sum{chain="rococo_local_testnet"} 0.000435126 +polkadot_parachain_av_store_process_block_finalized_count{chain="rococo_local_testnet"} 11 +# HELP polkadot_parachain_av_store_process_message Time spent within `av_store::process_message` +# TYPE polkadot_parachain_av_store_process_message histogram +polkadot_parachain_av_store_process_message_bucket{chain="rococo_local_testnet",le="0.005"} 4 +polkadot_parachain_av_store_process_message_bucket{chain="rococo_local_testnet",le="0.01"} 4 +polkadot_parachain_av_store_process_message_bucket{chain="rococo_local_testnet",le="0.025"} 4 +polkadot_parachain_av_store_process_message_bucket{chain="rococo_local_testnet",le="0.05"} 4 +polkadot_parachain_av_store_process_message_bucket{chain="rococo_local_testnet",le="0.1"} 4 +polkadot_parachain_av_store_process_message_bucket{chain="rococo_local_testnet",le="0.25"} 4 +polkadot_parachain_av_store_process_message_bucket{chain="rococo_local_testnet",le="0.5"} 4 +polkadot_parachain_av_store_process_message_bucket{chain="rococo_local_testnet",le="1"} 4 +polkadot_parachain_av_store_process_message_bucket{chain="rococo_local_testnet",le="2.5"} 4 +polkadot_parachain_av_store_process_message_bucket{chain="rococo_local_testnet",le="5"} 4 +polkadot_parachain_av_store_process_message_bucket{chain="rococo_local_testnet",le="10"} 4 +polkadot_parachain_av_store_process_message_bucket{chain="rococo_local_testnet",le="+Inf"} 4 +polkadot_parachain_av_store_process_message_sum{chain="rococo_local_testnet"} 0.000102749 +polkadot_parachain_av_store_process_message_count{chain="rococo_local_testnet"} 4 +# HELP polkadot_parachain_av_store_pruning Time spent within `av_store::prune_all` +# TYPE polkadot_parachain_av_store_pruning histogram +polkadot_parachain_av_store_pruning_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_av_store_pruning_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_av_store_pruning_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_av_store_pruning_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_av_store_pruning_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_av_store_pruning_bucket{chain="rococo_local_testnet",le="0.25"} 0 +polkadot_parachain_av_store_pruning_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_av_store_pruning_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_av_store_pruning_bucket{chain="rococo_local_testnet",le="2.5"} 0 +polkadot_parachain_av_store_pruning_bucket{chain="rococo_local_testnet",le="5"} 0 +polkadot_parachain_av_store_pruning_bucket{chain="rococo_local_testnet",le="10"} 0 +polkadot_parachain_av_store_pruning_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_av_store_pruning_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_av_store_pruning_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_av_store_store_available_data Time spent within `av_store::store_available_data` +# TYPE polkadot_parachain_av_store_store_available_data histogram +polkadot_parachain_av_store_store_available_data_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_av_store_store_available_data_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_av_store_store_available_data_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_av_store_store_available_data_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_av_store_store_available_data_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_av_store_store_available_data_bucket{chain="rococo_local_testnet",le="0.25"} 0 +polkadot_parachain_av_store_store_available_data_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_av_store_store_available_data_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_av_store_store_available_data_bucket{chain="rococo_local_testnet",le="2.5"} 0 +polkadot_parachain_av_store_store_available_data_bucket{chain="rococo_local_testnet",le="5"} 0 +polkadot_parachain_av_store_store_available_data_bucket{chain="rococo_local_testnet",le="10"} 0 +polkadot_parachain_av_store_store_available_data_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_av_store_store_available_data_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_av_store_store_available_data_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_av_store_store_chunk Time spent within `av_store::store_chunk` +# TYPE polkadot_parachain_av_store_store_chunk histogram +polkadot_parachain_av_store_store_chunk_bucket{chain="rococo_local_testnet",le="0.005"} 1 +polkadot_parachain_av_store_store_chunk_bucket{chain="rococo_local_testnet",le="0.01"} 1 +polkadot_parachain_av_store_store_chunk_bucket{chain="rococo_local_testnet",le="0.025"} 1 +polkadot_parachain_av_store_store_chunk_bucket{chain="rococo_local_testnet",le="0.05"} 1 +polkadot_parachain_av_store_store_chunk_bucket{chain="rococo_local_testnet",le="0.1"} 1 +polkadot_parachain_av_store_store_chunk_bucket{chain="rococo_local_testnet",le="0.25"} 1 +polkadot_parachain_av_store_store_chunk_bucket{chain="rococo_local_testnet",le="0.5"} 1 +polkadot_parachain_av_store_store_chunk_bucket{chain="rococo_local_testnet",le="1"} 1 +polkadot_parachain_av_store_store_chunk_bucket{chain="rococo_local_testnet",le="2.5"} 1 +polkadot_parachain_av_store_store_chunk_bucket{chain="rococo_local_testnet",le="5"} 1 +polkadot_parachain_av_store_store_chunk_bucket{chain="rococo_local_testnet",le="10"} 1 +polkadot_parachain_av_store_store_chunk_bucket{chain="rococo_local_testnet",le="+Inf"} 1 +polkadot_parachain_av_store_store_chunk_sum{chain="rococo_local_testnet"} 0.000064916 +polkadot_parachain_av_store_store_chunk_count{chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_availability_recovery_chunk_requests_issued Total number of issued chunk requests. +# TYPE polkadot_parachain_availability_recovery_chunk_requests_issued counter +polkadot_parachain_availability_recovery_chunk_requests_issued{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_availability_recovery_recoveries_finished Total number of recoveries that finished. +# TYPE polkadot_parachain_availability_recovery_recoveries_finished counter +polkadot_parachain_availability_recovery_recoveries_finished{result="success",chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_availability_recovery_recovieries_started Total number of started recoveries. +# TYPE polkadot_parachain_availability_recovery_recovieries_started counter +polkadot_parachain_availability_recovery_recovieries_started{chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_availability_recovery_time_chunk_request Time spent waiting for a response to a chunk request +# TYPE polkadot_parachain_availability_recovery_time_chunk_request histogram +polkadot_parachain_availability_recovery_time_chunk_request_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_availability_recovery_time_chunk_request_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_availability_recovery_time_chunk_request_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_availability_recovery_time_chunk_request_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_availability_recovery_time_chunk_request_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_availability_recovery_time_chunk_request_bucket{chain="rococo_local_testnet",le="0.25"} 0 +polkadot_parachain_availability_recovery_time_chunk_request_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_availability_recovery_time_chunk_request_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_availability_recovery_time_chunk_request_bucket{chain="rococo_local_testnet",le="2.5"} 0 +polkadot_parachain_availability_recovery_time_chunk_request_bucket{chain="rococo_local_testnet",le="5"} 0 +polkadot_parachain_availability_recovery_time_chunk_request_bucket{chain="rococo_local_testnet",le="10"} 0 +polkadot_parachain_availability_recovery_time_chunk_request_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_availability_recovery_time_chunk_request_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_availability_recovery_time_chunk_request_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_availability_recovery_time_erasure_recovery Time spent to recover the erasure code and verify the merkle root by re-encoding as erasure chunks +# TYPE polkadot_parachain_availability_recovery_time_erasure_recovery histogram +polkadot_parachain_availability_recovery_time_erasure_recovery_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_availability_recovery_time_erasure_recovery_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_availability_recovery_time_erasure_recovery_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_availability_recovery_time_erasure_recovery_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_availability_recovery_time_erasure_recovery_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_availability_recovery_time_erasure_recovery_bucket{chain="rococo_local_testnet",le="0.25"} 0 +polkadot_parachain_availability_recovery_time_erasure_recovery_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_availability_recovery_time_erasure_recovery_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_availability_recovery_time_erasure_recovery_bucket{chain="rococo_local_testnet",le="2.5"} 0 +polkadot_parachain_availability_recovery_time_erasure_recovery_bucket{chain="rococo_local_testnet",le="5"} 0 +polkadot_parachain_availability_recovery_time_erasure_recovery_bucket{chain="rococo_local_testnet",le="10"} 0 +polkadot_parachain_availability_recovery_time_erasure_recovery_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_availability_recovery_time_erasure_recovery_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_availability_recovery_time_erasure_recovery_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_availability_recovery_time_total Time a full recovery process took, either until failure or successful erasure decoding. +# TYPE polkadot_parachain_availability_recovery_time_total histogram +polkadot_parachain_availability_recovery_time_total_bucket{chain="rococo_local_testnet",le="0.005"} 1 +polkadot_parachain_availability_recovery_time_total_bucket{chain="rococo_local_testnet",le="0.01"} 1 +polkadot_parachain_availability_recovery_time_total_bucket{chain="rococo_local_testnet",le="0.025"} 1 +polkadot_parachain_availability_recovery_time_total_bucket{chain="rococo_local_testnet",le="0.05"} 1 +polkadot_parachain_availability_recovery_time_total_bucket{chain="rococo_local_testnet",le="0.1"} 1 +polkadot_parachain_availability_recovery_time_total_bucket{chain="rococo_local_testnet",le="0.25"} 1 +polkadot_parachain_availability_recovery_time_total_bucket{chain="rococo_local_testnet",le="0.5"} 1 +polkadot_parachain_availability_recovery_time_total_bucket{chain="rococo_local_testnet",le="1"} 1 +polkadot_parachain_availability_recovery_time_total_bucket{chain="rococo_local_testnet",le="2.5"} 1 +polkadot_parachain_availability_recovery_time_total_bucket{chain="rococo_local_testnet",le="5"} 1 +polkadot_parachain_availability_recovery_time_total_bucket{chain="rococo_local_testnet",le="10"} 1 +polkadot_parachain_availability_recovery_time_total_bucket{chain="rococo_local_testnet",le="+Inf"} 1 +polkadot_parachain_availability_recovery_time_total_sum{chain="rococo_local_testnet"} 0.001508542 +polkadot_parachain_availability_recovery_time_total_count{chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_availability_reencode_chunks Time spent re-encoding the data as erasure chunks +# TYPE polkadot_parachain_availability_reencode_chunks histogram +polkadot_parachain_availability_reencode_chunks_bucket{chain="rococo_local_testnet",le="0.005"} 1 +polkadot_parachain_availability_reencode_chunks_bucket{chain="rococo_local_testnet",le="0.01"} 1 +polkadot_parachain_availability_reencode_chunks_bucket{chain="rococo_local_testnet",le="0.025"} 1 +polkadot_parachain_availability_reencode_chunks_bucket{chain="rococo_local_testnet",le="0.05"} 1 +polkadot_parachain_availability_reencode_chunks_bucket{chain="rococo_local_testnet",le="0.1"} 1 +polkadot_parachain_availability_reencode_chunks_bucket{chain="rococo_local_testnet",le="0.25"} 1 +polkadot_parachain_availability_reencode_chunks_bucket{chain="rococo_local_testnet",le="0.5"} 1 +polkadot_parachain_availability_reencode_chunks_bucket{chain="rococo_local_testnet",le="1"} 1 +polkadot_parachain_availability_reencode_chunks_bucket{chain="rococo_local_testnet",le="2.5"} 1 +polkadot_parachain_availability_reencode_chunks_bucket{chain="rococo_local_testnet",le="5"} 1 +polkadot_parachain_availability_reencode_chunks_bucket{chain="rococo_local_testnet",le="10"} 1 +polkadot_parachain_availability_reencode_chunks_bucket{chain="rococo_local_testnet",le="+Inf"} 1 +polkadot_parachain_availability_reencode_chunks_sum{chain="rococo_local_testnet"} 0.000549916 +polkadot_parachain_availability_reencode_chunks_count{chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_bitfield_distribution_active_leaves_update Time spent within `bitfield_distribution::active_leaves_update` +# TYPE polkadot_parachain_bitfield_distribution_active_leaves_update histogram +polkadot_parachain_bitfield_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.005"} 15 +polkadot_parachain_bitfield_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.01"} 15 +polkadot_parachain_bitfield_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.025"} 15 +polkadot_parachain_bitfield_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.05"} 15 +polkadot_parachain_bitfield_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.1"} 15 +polkadot_parachain_bitfield_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.25"} 15 +polkadot_parachain_bitfield_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.5"} 15 +polkadot_parachain_bitfield_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="1"} 15 +polkadot_parachain_bitfield_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="2.5"} 15 +polkadot_parachain_bitfield_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="5"} 15 +polkadot_parachain_bitfield_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="10"} 15 +polkadot_parachain_bitfield_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="+Inf"} 15 +polkadot_parachain_bitfield_distribution_active_leaves_update_sum{chain="rococo_local_testnet"} 0.018155626 +polkadot_parachain_bitfield_distribution_active_leaves_update_count{chain="rococo_local_testnet"} 15 +# HELP polkadot_parachain_bitfield_distribution_handle_bitfield_distribution Time spent within `bitfield_distribution::handle_bitfield_distribution` +# TYPE polkadot_parachain_bitfield_distribution_handle_bitfield_distribution histogram +polkadot_parachain_bitfield_distribution_handle_bitfield_distribution_bucket{chain="rococo_local_testnet",le="0.005"} 14 +polkadot_parachain_bitfield_distribution_handle_bitfield_distribution_bucket{chain="rococo_local_testnet",le="0.01"} 14 +polkadot_parachain_bitfield_distribution_handle_bitfield_distribution_bucket{chain="rococo_local_testnet",le="0.025"} 14 +polkadot_parachain_bitfield_distribution_handle_bitfield_distribution_bucket{chain="rococo_local_testnet",le="0.05"} 14 +polkadot_parachain_bitfield_distribution_handle_bitfield_distribution_bucket{chain="rococo_local_testnet",le="0.1"} 14 +polkadot_parachain_bitfield_distribution_handle_bitfield_distribution_bucket{chain="rococo_local_testnet",le="0.25"} 14 +polkadot_parachain_bitfield_distribution_handle_bitfield_distribution_bucket{chain="rococo_local_testnet",le="0.5"} 14 +polkadot_parachain_bitfield_distribution_handle_bitfield_distribution_bucket{chain="rococo_local_testnet",le="1"} 14 +polkadot_parachain_bitfield_distribution_handle_bitfield_distribution_bucket{chain="rococo_local_testnet",le="2.5"} 14 +polkadot_parachain_bitfield_distribution_handle_bitfield_distribution_bucket{chain="rococo_local_testnet",le="5"} 14 +polkadot_parachain_bitfield_distribution_handle_bitfield_distribution_bucket{chain="rococo_local_testnet",le="10"} 14 +polkadot_parachain_bitfield_distribution_handle_bitfield_distribution_bucket{chain="rococo_local_testnet",le="+Inf"} 14 +polkadot_parachain_bitfield_distribution_handle_bitfield_distribution_sum{chain="rococo_local_testnet"} 0.000500624 +polkadot_parachain_bitfield_distribution_handle_bitfield_distribution_count{chain="rococo_local_testnet"} 14 +# HELP polkadot_parachain_bitfield_distribution_handle_network_msg Time spent within `bitfield_distribution::handle_network_msg` +# TYPE polkadot_parachain_bitfield_distribution_handle_network_msg histogram +polkadot_parachain_bitfield_distribution_handle_network_msg_bucket{chain="rococo_local_testnet",le="0.005"} 79 +polkadot_parachain_bitfield_distribution_handle_network_msg_bucket{chain="rococo_local_testnet",le="0.01"} 79 +polkadot_parachain_bitfield_distribution_handle_network_msg_bucket{chain="rococo_local_testnet",le="0.025"} 79 +polkadot_parachain_bitfield_distribution_handle_network_msg_bucket{chain="rococo_local_testnet",le="0.05"} 79 +polkadot_parachain_bitfield_distribution_handle_network_msg_bucket{chain="rococo_local_testnet",le="0.1"} 79 +polkadot_parachain_bitfield_distribution_handle_network_msg_bucket{chain="rococo_local_testnet",le="0.25"} 79 +polkadot_parachain_bitfield_distribution_handle_network_msg_bucket{chain="rococo_local_testnet",le="0.5"} 79 +polkadot_parachain_bitfield_distribution_handle_network_msg_bucket{chain="rococo_local_testnet",le="1"} 79 +polkadot_parachain_bitfield_distribution_handle_network_msg_bucket{chain="rococo_local_testnet",le="2.5"} 79 +polkadot_parachain_bitfield_distribution_handle_network_msg_bucket{chain="rococo_local_testnet",le="5"} 79 +polkadot_parachain_bitfield_distribution_handle_network_msg_bucket{chain="rococo_local_testnet",le="10"} 79 +polkadot_parachain_bitfield_distribution_handle_network_msg_bucket{chain="rococo_local_testnet",le="+Inf"} 79 +polkadot_parachain_bitfield_distribution_handle_network_msg_sum{chain="rococo_local_testnet"} 0.003397581999999999 +polkadot_parachain_bitfield_distribution_handle_network_msg_count{chain="rococo_local_testnet"} 79 +# HELP polkadot_parachain_bitfield_signing_run Time spent within `bitfield_signing::run` +# TYPE polkadot_parachain_bitfield_signing_run histogram +polkadot_parachain_bitfield_signing_run_bucket{chain="rococo_local_testnet",le="0.000625"} 0 +polkadot_parachain_bitfield_signing_run_bucket{chain="rococo_local_testnet",le="0.00125"} 0 +polkadot_parachain_bitfield_signing_run_bucket{chain="rococo_local_testnet",le="0.0025"} 2 +polkadot_parachain_bitfield_signing_run_bucket{chain="rococo_local_testnet",le="0.005"} 8 +polkadot_parachain_bitfield_signing_run_bucket{chain="rococo_local_testnet",le="0.0075"} 13 +polkadot_parachain_bitfield_signing_run_bucket{chain="rococo_local_testnet",le="0.01"} 13 +polkadot_parachain_bitfield_signing_run_bucket{chain="rococo_local_testnet",le="0.025"} 14 +polkadot_parachain_bitfield_signing_run_bucket{chain="rococo_local_testnet",le="0.05"} 14 +polkadot_parachain_bitfield_signing_run_bucket{chain="rococo_local_testnet",le="0.1"} 14 +polkadot_parachain_bitfield_signing_run_bucket{chain="rococo_local_testnet",le="0.25"} 14 +polkadot_parachain_bitfield_signing_run_bucket{chain="rococo_local_testnet",le="0.5"} 14 +polkadot_parachain_bitfield_signing_run_bucket{chain="rococo_local_testnet",le="1"} 14 +polkadot_parachain_bitfield_signing_run_bucket{chain="rococo_local_testnet",le="2.5"} 14 +polkadot_parachain_bitfield_signing_run_bucket{chain="rococo_local_testnet",le="5"} 14 +polkadot_parachain_bitfield_signing_run_bucket{chain="rococo_local_testnet",le="10"} 14 +polkadot_parachain_bitfield_signing_run_bucket{chain="rococo_local_testnet",le="+Inf"} 14 +polkadot_parachain_bitfield_signing_run_sum{chain="rococo_local_testnet"} 0.06996700000000002 +polkadot_parachain_bitfield_signing_run_count{chain="rococo_local_testnet"} 14 +# HELP polkadot_parachain_bitfields_signed_total Number of bitfields signed. +# TYPE polkadot_parachain_bitfields_signed_total counter +polkadot_parachain_bitfields_signed_total{chain="rococo_local_testnet"} 14 +# HELP polkadot_parachain_candidate_backing_candidates_seconded_total Number of candidates seconded. +# TYPE polkadot_parachain_candidate_backing_candidates_seconded_total counter +polkadot_parachain_candidate_backing_candidates_seconded_total{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_candidate_backing_get_backed_candidates Time spent within `candidate_backing::get_backed_candidates` +# TYPE polkadot_parachain_candidate_backing_get_backed_candidates histogram +polkadot_parachain_candidate_backing_get_backed_candidates_bucket{chain="rococo_local_testnet",le="0.005"} 5 +polkadot_parachain_candidate_backing_get_backed_candidates_bucket{chain="rococo_local_testnet",le="0.01"} 5 +polkadot_parachain_candidate_backing_get_backed_candidates_bucket{chain="rococo_local_testnet",le="0.025"} 5 +polkadot_parachain_candidate_backing_get_backed_candidates_bucket{chain="rococo_local_testnet",le="0.05"} 5 +polkadot_parachain_candidate_backing_get_backed_candidates_bucket{chain="rococo_local_testnet",le="0.1"} 5 +polkadot_parachain_candidate_backing_get_backed_candidates_bucket{chain="rococo_local_testnet",le="0.25"} 5 +polkadot_parachain_candidate_backing_get_backed_candidates_bucket{chain="rococo_local_testnet",le="0.5"} 5 +polkadot_parachain_candidate_backing_get_backed_candidates_bucket{chain="rococo_local_testnet",le="1"} 5 +polkadot_parachain_candidate_backing_get_backed_candidates_bucket{chain="rococo_local_testnet",le="2.5"} 5 +polkadot_parachain_candidate_backing_get_backed_candidates_bucket{chain="rococo_local_testnet",le="5"} 5 +polkadot_parachain_candidate_backing_get_backed_candidates_bucket{chain="rococo_local_testnet",le="10"} 5 +polkadot_parachain_candidate_backing_get_backed_candidates_bucket{chain="rococo_local_testnet",le="+Inf"} 5 +polkadot_parachain_candidate_backing_get_backed_candidates_sum{chain="rococo_local_testnet"} 0.000006 +polkadot_parachain_candidate_backing_get_backed_candidates_count{chain="rococo_local_testnet"} 5 +# HELP polkadot_parachain_candidate_backing_process_second Time spent within `candidate_backing::process_second` +# TYPE polkadot_parachain_candidate_backing_process_second histogram +polkadot_parachain_candidate_backing_process_second_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_candidate_backing_process_second_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_candidate_backing_process_second_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_candidate_backing_process_second_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_candidate_backing_process_second_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_candidate_backing_process_second_bucket{chain="rococo_local_testnet",le="0.25"} 0 +polkadot_parachain_candidate_backing_process_second_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_candidate_backing_process_second_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_candidate_backing_process_second_bucket{chain="rococo_local_testnet",le="2.5"} 0 +polkadot_parachain_candidate_backing_process_second_bucket{chain="rococo_local_testnet",le="5"} 0 +polkadot_parachain_candidate_backing_process_second_bucket{chain="rococo_local_testnet",le="10"} 0 +polkadot_parachain_candidate_backing_process_second_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_candidate_backing_process_second_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_candidate_backing_process_second_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_candidate_backing_process_statement Time spent within `candidate_backing::process_statement` +# TYPE polkadot_parachain_candidate_backing_process_statement histogram +polkadot_parachain_candidate_backing_process_statement_bucket{chain="rococo_local_testnet",le="0.005"} 2 +polkadot_parachain_candidate_backing_process_statement_bucket{chain="rococo_local_testnet",le="0.01"} 2 +polkadot_parachain_candidate_backing_process_statement_bucket{chain="rococo_local_testnet",le="0.025"} 2 +polkadot_parachain_candidate_backing_process_statement_bucket{chain="rococo_local_testnet",le="0.05"} 2 +polkadot_parachain_candidate_backing_process_statement_bucket{chain="rococo_local_testnet",le="0.1"} 2 +polkadot_parachain_candidate_backing_process_statement_bucket{chain="rococo_local_testnet",le="0.25"} 2 +polkadot_parachain_candidate_backing_process_statement_bucket{chain="rococo_local_testnet",le="0.5"} 2 +polkadot_parachain_candidate_backing_process_statement_bucket{chain="rococo_local_testnet",le="1"} 2 +polkadot_parachain_candidate_backing_process_statement_bucket{chain="rococo_local_testnet",le="2.5"} 2 +polkadot_parachain_candidate_backing_process_statement_bucket{chain="rococo_local_testnet",le="5"} 2 +polkadot_parachain_candidate_backing_process_statement_bucket{chain="rococo_local_testnet",le="10"} 2 +polkadot_parachain_candidate_backing_process_statement_bucket{chain="rococo_local_testnet",le="+Inf"} 2 +polkadot_parachain_candidate_backing_process_statement_sum{chain="rococo_local_testnet"} 0.000059334 +polkadot_parachain_candidate_backing_process_statement_count{chain="rococo_local_testnet"} 2 +# HELP polkadot_parachain_candidate_backing_signed_statements_total Number of statements signed. +# TYPE polkadot_parachain_candidate_backing_signed_statements_total counter +polkadot_parachain_candidate_backing_signed_statements_total{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_candidate_dispute_votes Accumulated dispute votes, sorted by candidate is `valid` and `invalid`. +# TYPE polkadot_parachain_candidate_dispute_votes counter +polkadot_parachain_candidate_dispute_votes{validity="invalid",chain="rococo_local_testnet"} 0 +polkadot_parachain_candidate_dispute_votes{validity="valid",chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_candidate_disputes_total Total number of raised disputes. +# TYPE polkadot_parachain_candidate_disputes_total counter +polkadot_parachain_candidate_disputes_total{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_candidate_validation_code_size The size of the decompressed WASM validation blob used for checking a candidate +# TYPE polkadot_parachain_candidate_validation_code_size histogram +polkadot_parachain_candidate_validation_code_size_bucket{chain="rococo_local_testnet",le="16384"} 0 +polkadot_parachain_candidate_validation_code_size_bucket{chain="rococo_local_testnet",le="32768"} 0 +polkadot_parachain_candidate_validation_code_size_bucket{chain="rococo_local_testnet",le="65536"} 0 +polkadot_parachain_candidate_validation_code_size_bucket{chain="rococo_local_testnet",le="131072"} 0 +polkadot_parachain_candidate_validation_code_size_bucket{chain="rococo_local_testnet",le="262144"} 0 +polkadot_parachain_candidate_validation_code_size_bucket{chain="rococo_local_testnet",le="524288"} 0 +polkadot_parachain_candidate_validation_code_size_bucket{chain="rococo_local_testnet",le="1048576"} 0 +polkadot_parachain_candidate_validation_code_size_bucket{chain="rococo_local_testnet",le="2097152"} 0 +polkadot_parachain_candidate_validation_code_size_bucket{chain="rococo_local_testnet",le="4194304"} 1 +polkadot_parachain_candidate_validation_code_size_bucket{chain="rococo_local_testnet",le="8388608"} 1 +polkadot_parachain_candidate_validation_code_size_bucket{chain="rococo_local_testnet",le="+Inf"} 1 +polkadot_parachain_candidate_validation_code_size_sum{chain="rococo_local_testnet"} 3244644 +polkadot_parachain_candidate_validation_code_size_count{chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_candidate_validation_pov_size The compressed and decompressed size of the proof of validity of a candidate +# TYPE polkadot_parachain_candidate_validation_pov_size histogram +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="false",chain="rococo_local_testnet",le="16384"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="false",chain="rococo_local_testnet",le="32768"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="false",chain="rococo_local_testnet",le="65536"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="false",chain="rococo_local_testnet",le="131072"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="false",chain="rococo_local_testnet",le="262144"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="false",chain="rococo_local_testnet",le="524288"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="false",chain="rococo_local_testnet",le="1048576"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="false",chain="rococo_local_testnet",le="2097152"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="false",chain="rococo_local_testnet",le="4194304"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="false",chain="rococo_local_testnet",le="8388608"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="false",chain="rococo_local_testnet",le="+Inf"} 1 +polkadot_parachain_candidate_validation_pov_size_sum{compressed="false",chain="rococo_local_testnet"} 4746 +polkadot_parachain_candidate_validation_pov_size_count{compressed="false",chain="rococo_local_testnet"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="true",chain="rococo_local_testnet",le="16384"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="true",chain="rococo_local_testnet",le="32768"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="true",chain="rococo_local_testnet",le="65536"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="true",chain="rococo_local_testnet",le="131072"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="true",chain="rococo_local_testnet",le="262144"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="true",chain="rococo_local_testnet",le="524288"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="true",chain="rococo_local_testnet",le="1048576"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="true",chain="rococo_local_testnet",le="2097152"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="true",chain="rococo_local_testnet",le="4194304"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="true",chain="rococo_local_testnet",le="8388608"} 1 +polkadot_parachain_candidate_validation_pov_size_bucket{compressed="true",chain="rococo_local_testnet",le="+Inf"} 1 +polkadot_parachain_candidate_validation_pov_size_sum{compressed="true",chain="rococo_local_testnet"} 4111 +polkadot_parachain_candidate_validation_pov_size_count{compressed="true",chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_candidate_validation_validate_candidate_exhaustive Time spent within `candidate_validation::validate_candidate_exhaustive` +# TYPE polkadot_parachain_candidate_validation_validate_candidate_exhaustive histogram +polkadot_parachain_candidate_validation_validate_candidate_exhaustive_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_candidate_validation_validate_candidate_exhaustive_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_candidate_validation_validate_candidate_exhaustive_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_candidate_validation_validate_candidate_exhaustive_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_candidate_validation_validate_candidate_exhaustive_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_candidate_validation_validate_candidate_exhaustive_bucket{chain="rococo_local_testnet",le="0.25"} 0 +polkadot_parachain_candidate_validation_validate_candidate_exhaustive_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_candidate_validation_validate_candidate_exhaustive_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_candidate_validation_validate_candidate_exhaustive_bucket{chain="rococo_local_testnet",le="2.5"} 0 +polkadot_parachain_candidate_validation_validate_candidate_exhaustive_bucket{chain="rococo_local_testnet",le="5"} 1 +polkadot_parachain_candidate_validation_validate_candidate_exhaustive_bucket{chain="rococo_local_testnet",le="10"} 1 +polkadot_parachain_candidate_validation_validate_candidate_exhaustive_bucket{chain="rococo_local_testnet",le="+Inf"} 1 +polkadot_parachain_candidate_validation_validate_candidate_exhaustive_sum{chain="rococo_local_testnet"} 2.738384209 +polkadot_parachain_candidate_validation_validate_candidate_exhaustive_count{chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_candidate_validation_validate_from_chain_state Time spent within `candidate_validation::validate_from_chain_state` +# TYPE polkadot_parachain_candidate_validation_validate_from_chain_state histogram +polkadot_parachain_candidate_validation_validate_from_chain_state_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_candidate_validation_validate_from_chain_state_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_candidate_validation_validate_from_chain_state_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_candidate_validation_validate_from_chain_state_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_candidate_validation_validate_from_chain_state_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_candidate_validation_validate_from_chain_state_bucket{chain="rococo_local_testnet",le="0.25"} 0 +polkadot_parachain_candidate_validation_validate_from_chain_state_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_candidate_validation_validate_from_chain_state_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_candidate_validation_validate_from_chain_state_bucket{chain="rococo_local_testnet",le="2.5"} 0 +polkadot_parachain_candidate_validation_validate_from_chain_state_bucket{chain="rococo_local_testnet",le="5"} 0 +polkadot_parachain_candidate_validation_validate_from_chain_state_bucket{chain="rococo_local_testnet",le="10"} 0 +polkadot_parachain_candidate_validation_validate_from_chain_state_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_candidate_validation_validate_from_chain_state_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_candidate_validation_validate_from_chain_state_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_candidate_validation_validate_from_exhaustive Time spent within `candidate_validation::validate_from_exhaustive` +# TYPE polkadot_parachain_candidate_validation_validate_from_exhaustive histogram +polkadot_parachain_candidate_validation_validate_from_exhaustive_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_candidate_validation_validate_from_exhaustive_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_candidate_validation_validate_from_exhaustive_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_candidate_validation_validate_from_exhaustive_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_candidate_validation_validate_from_exhaustive_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_candidate_validation_validate_from_exhaustive_bucket{chain="rococo_local_testnet",le="0.25"} 0 +polkadot_parachain_candidate_validation_validate_from_exhaustive_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_candidate_validation_validate_from_exhaustive_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_candidate_validation_validate_from_exhaustive_bucket{chain="rococo_local_testnet",le="2.5"} 0 +polkadot_parachain_candidate_validation_validate_from_exhaustive_bucket{chain="rococo_local_testnet",le="5"} 1 +polkadot_parachain_candidate_validation_validate_from_exhaustive_bucket{chain="rococo_local_testnet",le="10"} 1 +polkadot_parachain_candidate_validation_validate_from_exhaustive_bucket{chain="rococo_local_testnet",le="+Inf"} 1 +polkadot_parachain_candidate_validation_validate_from_exhaustive_sum{chain="rococo_local_testnet"} 2.738553541 +polkadot_parachain_candidate_validation_validate_from_exhaustive_count{chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_chain_api_ancestors Time spent within `chain_api::ancestors` +# TYPE polkadot_parachain_chain_api_ancestors histogram +polkadot_parachain_chain_api_ancestors_bucket{chain="rococo_local_testnet",le="0.005"} 44 +polkadot_parachain_chain_api_ancestors_bucket{chain="rococo_local_testnet",le="0.01"} 44 +polkadot_parachain_chain_api_ancestors_bucket{chain="rococo_local_testnet",le="0.025"} 44 +polkadot_parachain_chain_api_ancestors_bucket{chain="rococo_local_testnet",le="0.05"} 44 +polkadot_parachain_chain_api_ancestors_bucket{chain="rococo_local_testnet",le="0.1"} 44 +polkadot_parachain_chain_api_ancestors_bucket{chain="rococo_local_testnet",le="0.25"} 44 +polkadot_parachain_chain_api_ancestors_bucket{chain="rococo_local_testnet",le="0.5"} 44 +polkadot_parachain_chain_api_ancestors_bucket{chain="rococo_local_testnet",le="1"} 44 +polkadot_parachain_chain_api_ancestors_bucket{chain="rococo_local_testnet",le="2.5"} 44 +polkadot_parachain_chain_api_ancestors_bucket{chain="rococo_local_testnet",le="5"} 44 +polkadot_parachain_chain_api_ancestors_bucket{chain="rococo_local_testnet",le="10"} 44 +polkadot_parachain_chain_api_ancestors_bucket{chain="rococo_local_testnet",le="+Inf"} 44 +polkadot_parachain_chain_api_ancestors_sum{chain="rococo_local_testnet"} 0.0009038730000000002 +polkadot_parachain_chain_api_ancestors_count{chain="rococo_local_testnet"} 44 +# HELP polkadot_parachain_chain_api_block_headers Time spent within `chain_api::block_headers` +# TYPE polkadot_parachain_chain_api_block_headers histogram +polkadot_parachain_chain_api_block_headers_bucket{chain="rococo_local_testnet",le="0.005"} 42 +polkadot_parachain_chain_api_block_headers_bucket{chain="rococo_local_testnet",le="0.01"} 42 +polkadot_parachain_chain_api_block_headers_bucket{chain="rococo_local_testnet",le="0.025"} 42 +polkadot_parachain_chain_api_block_headers_bucket{chain="rococo_local_testnet",le="0.05"} 42 +polkadot_parachain_chain_api_block_headers_bucket{chain="rococo_local_testnet",le="0.1"} 42 +polkadot_parachain_chain_api_block_headers_bucket{chain="rococo_local_testnet",le="0.25"} 42 +polkadot_parachain_chain_api_block_headers_bucket{chain="rococo_local_testnet",le="0.5"} 42 +polkadot_parachain_chain_api_block_headers_bucket{chain="rococo_local_testnet",le="1"} 42 +polkadot_parachain_chain_api_block_headers_bucket{chain="rococo_local_testnet",le="2.5"} 42 +polkadot_parachain_chain_api_block_headers_bucket{chain="rococo_local_testnet",le="5"} 42 +polkadot_parachain_chain_api_block_headers_bucket{chain="rococo_local_testnet",le="10"} 42 +polkadot_parachain_chain_api_block_headers_bucket{chain="rococo_local_testnet",le="+Inf"} 42 +polkadot_parachain_chain_api_block_headers_sum{chain="rococo_local_testnet"} 0.000393039 +polkadot_parachain_chain_api_block_headers_count{chain="rococo_local_testnet"} 42 +# HELP polkadot_parachain_chain_api_block_number Time spent within `chain_api::block_number` +# TYPE polkadot_parachain_chain_api_block_number histogram +polkadot_parachain_chain_api_block_number_bucket{chain="rococo_local_testnet",le="0.005"} 24 +polkadot_parachain_chain_api_block_number_bucket{chain="rococo_local_testnet",le="0.01"} 24 +polkadot_parachain_chain_api_block_number_bucket{chain="rococo_local_testnet",le="0.025"} 24 +polkadot_parachain_chain_api_block_number_bucket{chain="rococo_local_testnet",le="0.05"} 24 +polkadot_parachain_chain_api_block_number_bucket{chain="rococo_local_testnet",le="0.1"} 24 +polkadot_parachain_chain_api_block_number_bucket{chain="rococo_local_testnet",le="0.25"} 24 +polkadot_parachain_chain_api_block_number_bucket{chain="rococo_local_testnet",le="0.5"} 24 +polkadot_parachain_chain_api_block_number_bucket{chain="rococo_local_testnet",le="1"} 24 +polkadot_parachain_chain_api_block_number_bucket{chain="rococo_local_testnet",le="2.5"} 24 +polkadot_parachain_chain_api_block_number_bucket{chain="rococo_local_testnet",le="5"} 24 +polkadot_parachain_chain_api_block_number_bucket{chain="rococo_local_testnet",le="10"} 24 +polkadot_parachain_chain_api_block_number_bucket{chain="rococo_local_testnet",le="+Inf"} 24 +polkadot_parachain_chain_api_block_number_sum{chain="rococo_local_testnet"} 0.00017533399999999997 +polkadot_parachain_chain_api_block_number_count{chain="rococo_local_testnet"} 24 +# HELP polkadot_parachain_chain_api_block_weight Time spent within `chain_api::block_weight` +# TYPE polkadot_parachain_chain_api_block_weight histogram +polkadot_parachain_chain_api_block_weight_bucket{chain="rococo_local_testnet",le="0.005"} 14 +polkadot_parachain_chain_api_block_weight_bucket{chain="rococo_local_testnet",le="0.01"} 14 +polkadot_parachain_chain_api_block_weight_bucket{chain="rococo_local_testnet",le="0.025"} 14 +polkadot_parachain_chain_api_block_weight_bucket{chain="rococo_local_testnet",le="0.05"} 14 +polkadot_parachain_chain_api_block_weight_bucket{chain="rococo_local_testnet",le="0.1"} 14 +polkadot_parachain_chain_api_block_weight_bucket{chain="rococo_local_testnet",le="0.25"} 14 +polkadot_parachain_chain_api_block_weight_bucket{chain="rococo_local_testnet",le="0.5"} 14 +polkadot_parachain_chain_api_block_weight_bucket{chain="rococo_local_testnet",le="1"} 14 +polkadot_parachain_chain_api_block_weight_bucket{chain="rococo_local_testnet",le="2.5"} 14 +polkadot_parachain_chain_api_block_weight_bucket{chain="rococo_local_testnet",le="5"} 14 +polkadot_parachain_chain_api_block_weight_bucket{chain="rococo_local_testnet",le="10"} 14 +polkadot_parachain_chain_api_block_weight_bucket{chain="rococo_local_testnet",le="+Inf"} 14 +polkadot_parachain_chain_api_block_weight_sum{chain="rococo_local_testnet"} 0.000210502 +polkadot_parachain_chain_api_block_weight_count{chain="rococo_local_testnet"} 14 +# HELP polkadot_parachain_chain_api_finalized_block_hash Time spent within `chain_api::finalized_block_hash` +# TYPE polkadot_parachain_chain_api_finalized_block_hash histogram +polkadot_parachain_chain_api_finalized_block_hash_bucket{chain="rococo_local_testnet",le="0.005"} 3 +polkadot_parachain_chain_api_finalized_block_hash_bucket{chain="rococo_local_testnet",le="0.01"} 3 +polkadot_parachain_chain_api_finalized_block_hash_bucket{chain="rococo_local_testnet",le="0.025"} 3 +polkadot_parachain_chain_api_finalized_block_hash_bucket{chain="rococo_local_testnet",le="0.05"} 3 +polkadot_parachain_chain_api_finalized_block_hash_bucket{chain="rococo_local_testnet",le="0.1"} 3 +polkadot_parachain_chain_api_finalized_block_hash_bucket{chain="rococo_local_testnet",le="0.25"} 3 +polkadot_parachain_chain_api_finalized_block_hash_bucket{chain="rococo_local_testnet",le="0.5"} 3 +polkadot_parachain_chain_api_finalized_block_hash_bucket{chain="rococo_local_testnet",le="1"} 3 +polkadot_parachain_chain_api_finalized_block_hash_bucket{chain="rococo_local_testnet",le="2.5"} 3 +polkadot_parachain_chain_api_finalized_block_hash_bucket{chain="rococo_local_testnet",le="5"} 3 +polkadot_parachain_chain_api_finalized_block_hash_bucket{chain="rococo_local_testnet",le="10"} 3 +polkadot_parachain_chain_api_finalized_block_hash_bucket{chain="rococo_local_testnet",le="+Inf"} 3 +polkadot_parachain_chain_api_finalized_block_hash_sum{chain="rococo_local_testnet"} 0.00004554199999999999 +polkadot_parachain_chain_api_finalized_block_hash_count{chain="rococo_local_testnet"} 3 +# HELP polkadot_parachain_chain_api_finalized_block_number Time spent within `chain_api::finalized_block_number` +# TYPE polkadot_parachain_chain_api_finalized_block_number histogram +polkadot_parachain_chain_api_finalized_block_number_bucket{chain="rococo_local_testnet",le="0.005"} 18 +polkadot_parachain_chain_api_finalized_block_number_bucket{chain="rococo_local_testnet",le="0.01"} 18 +polkadot_parachain_chain_api_finalized_block_number_bucket{chain="rococo_local_testnet",le="0.025"} 18 +polkadot_parachain_chain_api_finalized_block_number_bucket{chain="rococo_local_testnet",le="0.05"} 18 +polkadot_parachain_chain_api_finalized_block_number_bucket{chain="rococo_local_testnet",le="0.1"} 18 +polkadot_parachain_chain_api_finalized_block_number_bucket{chain="rococo_local_testnet",le="0.25"} 18 +polkadot_parachain_chain_api_finalized_block_number_bucket{chain="rococo_local_testnet",le="0.5"} 18 +polkadot_parachain_chain_api_finalized_block_number_bucket{chain="rococo_local_testnet",le="1"} 18 +polkadot_parachain_chain_api_finalized_block_number_bucket{chain="rococo_local_testnet",le="2.5"} 18 +polkadot_parachain_chain_api_finalized_block_number_bucket{chain="rococo_local_testnet",le="5"} 18 +polkadot_parachain_chain_api_finalized_block_number_bucket{chain="rococo_local_testnet",le="10"} 18 +polkadot_parachain_chain_api_finalized_block_number_bucket{chain="rococo_local_testnet",le="+Inf"} 18 +polkadot_parachain_chain_api_finalized_block_number_sum{chain="rococo_local_testnet"} 0.000172791 +polkadot_parachain_chain_api_finalized_block_number_count{chain="rococo_local_testnet"} 18 +# HELP polkadot_parachain_chain_api_requests_total Number of Chain API requests served. +# TYPE polkadot_parachain_chain_api_requests_total counter +polkadot_parachain_chain_api_requests_total{success="succeeded",chain="rococo_local_testnet"} 145 +# HELP polkadot_parachain_collation_generation_new_activations Time spent within fn handle_new_activations +# TYPE polkadot_parachain_collation_generation_new_activations histogram +polkadot_parachain_collation_generation_new_activations_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_collation_generation_new_activations_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_collation_generation_new_activations_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_collation_generation_new_activations_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_collation_generation_new_activations_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_collation_generation_new_activations_bucket{chain="rococo_local_testnet",le="0.25"} 0 +polkadot_parachain_collation_generation_new_activations_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_collation_generation_new_activations_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_collation_generation_new_activations_bucket{chain="rococo_local_testnet",le="2.5"} 0 +polkadot_parachain_collation_generation_new_activations_bucket{chain="rococo_local_testnet",le="5"} 0 +polkadot_parachain_collation_generation_new_activations_bucket{chain="rococo_local_testnet",le="10"} 0 +polkadot_parachain_collation_generation_new_activations_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_collation_generation_new_activations_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_collation_generation_new_activations_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_collation_generation_per_availability_core Time spent handling a particular availability core for a relay parent in fn handle_new_activations +# TYPE polkadot_parachain_collation_generation_per_availability_core histogram +polkadot_parachain_collation_generation_per_availability_core_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_collation_generation_per_availability_core_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_collation_generation_per_availability_core_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_collation_generation_per_availability_core_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_collation_generation_per_availability_core_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_collation_generation_per_availability_core_bucket{chain="rococo_local_testnet",le="0.25"} 0 +polkadot_parachain_collation_generation_per_availability_core_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_collation_generation_per_availability_core_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_collation_generation_per_availability_core_bucket{chain="rococo_local_testnet",le="2.5"} 0 +polkadot_parachain_collation_generation_per_availability_core_bucket{chain="rococo_local_testnet",le="5"} 0 +polkadot_parachain_collation_generation_per_availability_core_bucket{chain="rococo_local_testnet",le="10"} 0 +polkadot_parachain_collation_generation_per_availability_core_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_collation_generation_per_availability_core_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_collation_generation_per_availability_core_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_collation_generation_per_relay_parent Time spent handling a particular relay parent within fn handle_new_activations +# TYPE polkadot_parachain_collation_generation_per_relay_parent histogram +polkadot_parachain_collation_generation_per_relay_parent_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_collation_generation_per_relay_parent_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_collation_generation_per_relay_parent_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_collation_generation_per_relay_parent_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_collation_generation_per_relay_parent_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_collation_generation_per_relay_parent_bucket{chain="rococo_local_testnet",le="0.25"} 0 +polkadot_parachain_collation_generation_per_relay_parent_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_collation_generation_per_relay_parent_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_collation_generation_per_relay_parent_bucket{chain="rococo_local_testnet",le="2.5"} 0 +polkadot_parachain_collation_generation_per_relay_parent_bucket{chain="rococo_local_testnet",le="5"} 0 +polkadot_parachain_collation_generation_per_relay_parent_bucket{chain="rococo_local_testnet",le="10"} 0 +polkadot_parachain_collation_generation_per_relay_parent_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_collation_generation_per_relay_parent_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_collation_generation_per_relay_parent_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_collation_generation_submit_collation Time spent preparing and submitting a collation to the network protocol +# TYPE polkadot_parachain_collation_generation_submit_collation histogram +polkadot_parachain_collation_generation_submit_collation_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_collation_generation_submit_collation_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_collation_generation_submit_collation_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_collation_generation_submit_collation_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_collation_generation_submit_collation_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_collation_generation_submit_collation_bucket{chain="rococo_local_testnet",le="0.25"} 0 +polkadot_parachain_collation_generation_submit_collation_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_collation_generation_submit_collation_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_collation_generation_submit_collation_bucket{chain="rococo_local_testnet",le="2.5"} 0 +polkadot_parachain_collation_generation_submit_collation_bucket{chain="rococo_local_testnet",le="5"} 0 +polkadot_parachain_collation_generation_submit_collation_bucket{chain="rococo_local_testnet",le="10"} 0 +polkadot_parachain_collation_generation_submit_collation_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_collation_generation_submit_collation_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_collation_generation_submit_collation_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_collations_generated_total Number of collations generated. +# TYPE polkadot_parachain_collations_generated_total counter +polkadot_parachain_collations_generated_total{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_collator_peer_count Amount of collator peers connected +# TYPE polkadot_parachain_collator_peer_count gauge +polkadot_parachain_collator_peer_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_collator_protocol_validator_collation_request_duration Lifetime of the `CollationFetchRequest` structure +# TYPE polkadot_parachain_collator_protocol_validator_collation_request_duration histogram +polkadot_parachain_collator_protocol_validator_collation_request_duration_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_collator_protocol_validator_collation_request_duration_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_collator_protocol_validator_collation_request_duration_bucket{chain="rococo_local_testnet",le="0.2"} 0 +polkadot_parachain_collator_protocol_validator_collation_request_duration_bucket{chain="rococo_local_testnet",le="0.3"} 0 +polkadot_parachain_collator_protocol_validator_collation_request_duration_bucket{chain="rococo_local_testnet",le="0.4"} 0 +polkadot_parachain_collator_protocol_validator_collation_request_duration_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_collator_protocol_validator_collation_request_duration_bucket{chain="rococo_local_testnet",le="0.6"} 0 +polkadot_parachain_collator_protocol_validator_collation_request_duration_bucket{chain="rococo_local_testnet",le="0.75"} 0 +polkadot_parachain_collator_protocol_validator_collation_request_duration_bucket{chain="rococo_local_testnet",le="0.9"} 0 +polkadot_parachain_collator_protocol_validator_collation_request_duration_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_collator_protocol_validator_collation_request_duration_bucket{chain="rococo_local_testnet",le="1.2"} 0 +polkadot_parachain_collator_protocol_validator_collation_request_duration_bucket{chain="rococo_local_testnet",le="1.5"} 0 +polkadot_parachain_collator_protocol_validator_collation_request_duration_bucket{chain="rococo_local_testnet",le="1.75"} 0 +polkadot_parachain_collator_protocol_validator_collation_request_duration_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_collator_protocol_validator_collation_request_duration_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_collator_protocol_validator_collation_request_duration_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_collator_protocol_validator_handle_collation_request_result Time spent within `collator_protocol_validator::handle_collation_request_result` +# TYPE polkadot_parachain_collator_protocol_validator_handle_collation_request_result histogram +polkadot_parachain_collator_protocol_validator_handle_collation_request_result_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_collator_protocol_validator_handle_collation_request_result_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_collator_protocol_validator_handle_collation_request_result_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_collator_protocol_validator_handle_collation_request_result_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_collator_protocol_validator_handle_collation_request_result_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_collator_protocol_validator_handle_collation_request_result_bucket{chain="rococo_local_testnet",le="0.25"} 0 +polkadot_parachain_collator_protocol_validator_handle_collation_request_result_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_collator_protocol_validator_handle_collation_request_result_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_collator_protocol_validator_handle_collation_request_result_bucket{chain="rococo_local_testnet",le="2.5"} 0 +polkadot_parachain_collator_protocol_validator_handle_collation_request_result_bucket{chain="rococo_local_testnet",le="5"} 0 +polkadot_parachain_collator_protocol_validator_handle_collation_request_result_bucket{chain="rococo_local_testnet",le="10"} 0 +polkadot_parachain_collator_protocol_validator_handle_collation_request_result_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_collator_protocol_validator_handle_collation_request_result_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_collator_protocol_validator_handle_collation_request_result_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_collator_protocol_validator_process_msg Time spent within `collator_protocol_validator::process_msg` +# TYPE polkadot_parachain_collator_protocol_validator_process_msg histogram +polkadot_parachain_collator_protocol_validator_process_msg_bucket{chain="rococo_local_testnet",le="0.005"} 16 +polkadot_parachain_collator_protocol_validator_process_msg_bucket{chain="rococo_local_testnet",le="0.01"} 16 +polkadot_parachain_collator_protocol_validator_process_msg_bucket{chain="rococo_local_testnet",le="0.025"} 16 +polkadot_parachain_collator_protocol_validator_process_msg_bucket{chain="rococo_local_testnet",le="0.05"} 16 +polkadot_parachain_collator_protocol_validator_process_msg_bucket{chain="rococo_local_testnet",le="0.1"} 16 +polkadot_parachain_collator_protocol_validator_process_msg_bucket{chain="rococo_local_testnet",le="0.25"} 16 +polkadot_parachain_collator_protocol_validator_process_msg_bucket{chain="rococo_local_testnet",le="0.5"} 16 +polkadot_parachain_collator_protocol_validator_process_msg_bucket{chain="rococo_local_testnet",le="1"} 16 +polkadot_parachain_collator_protocol_validator_process_msg_bucket{chain="rococo_local_testnet",le="2.5"} 16 +polkadot_parachain_collator_protocol_validator_process_msg_bucket{chain="rococo_local_testnet",le="5"} 16 +polkadot_parachain_collator_protocol_validator_process_msg_bucket{chain="rococo_local_testnet",le="10"} 16 +polkadot_parachain_collator_protocol_validator_process_msg_bucket{chain="rococo_local_testnet",le="+Inf"} 16 +polkadot_parachain_collator_protocol_validator_process_msg_sum{chain="rococo_local_testnet"} 0.032725542 +polkadot_parachain_collator_protocol_validator_process_msg_count{chain="rococo_local_testnet"} 16 +# HELP polkadot_parachain_collator_protocol_validator_request_unblocked_collations Time spent within `collator_protocol_validator::request_unblocked_collations` +# TYPE polkadot_parachain_collator_protocol_validator_request_unblocked_collations histogram +polkadot_parachain_collator_protocol_validator_request_unblocked_collations_bucket{chain="rococo_local_testnet",le="0.005"} 15 +polkadot_parachain_collator_protocol_validator_request_unblocked_collations_bucket{chain="rococo_local_testnet",le="0.01"} 15 +polkadot_parachain_collator_protocol_validator_request_unblocked_collations_bucket{chain="rococo_local_testnet",le="0.025"} 15 +polkadot_parachain_collator_protocol_validator_request_unblocked_collations_bucket{chain="rococo_local_testnet",le="0.05"} 15 +polkadot_parachain_collator_protocol_validator_request_unblocked_collations_bucket{chain="rococo_local_testnet",le="0.1"} 15 +polkadot_parachain_collator_protocol_validator_request_unblocked_collations_bucket{chain="rococo_local_testnet",le="0.25"} 15 +polkadot_parachain_collator_protocol_validator_request_unblocked_collations_bucket{chain="rococo_local_testnet",le="0.5"} 15 +polkadot_parachain_collator_protocol_validator_request_unblocked_collations_bucket{chain="rococo_local_testnet",le="1"} 15 +polkadot_parachain_collator_protocol_validator_request_unblocked_collations_bucket{chain="rococo_local_testnet",le="2.5"} 15 +polkadot_parachain_collator_protocol_validator_request_unblocked_collations_bucket{chain="rococo_local_testnet",le="5"} 15 +polkadot_parachain_collator_protocol_validator_request_unblocked_collations_bucket{chain="rococo_local_testnet",le="10"} 15 +polkadot_parachain_collator_protocol_validator_request_unblocked_collations_bucket{chain="rococo_local_testnet",le="+Inf"} 15 +polkadot_parachain_collator_protocol_validator_request_unblocked_collations_sum{chain="rococo_local_testnet"} 0.000003667 +polkadot_parachain_collator_protocol_validator_request_unblocked_collations_count{chain="rococo_local_testnet"} 15 +# HELP polkadot_parachain_deactivated_heads_total Number of deactivated heads. +# TYPE polkadot_parachain_deactivated_heads_total counter +polkadot_parachain_deactivated_heads_total{chain="rococo_local_testnet"} 13 +# HELP polkadot_parachain_desired_peer_count The number of peers that the local node is expected to connect to on a parachain-related peer-set (either including or not including unresolvable authorities, depending on whether `ConnectToValidators` or `ConnectToValidatorsResolved` was used.) +# TYPE polkadot_parachain_desired_peer_count gauge +polkadot_parachain_desired_peer_count{protocol="validation",chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_dispute_candidate_approval_votes_fetched_total Number of approval votes fetched from approval voting. +# TYPE polkadot_parachain_dispute_candidate_approval_votes_fetched_total counter +polkadot_parachain_dispute_candidate_approval_votes_fetched_total{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_dispute_coordinator_vote_cleanup Time spent cleaning up old votes per batch. +# TYPE polkadot_parachain_dispute_coordinator_vote_cleanup histogram +polkadot_parachain_dispute_coordinator_vote_cleanup_bucket{chain="rococo_local_testnet",le="0.01"} 1 +polkadot_parachain_dispute_coordinator_vote_cleanup_bucket{chain="rococo_local_testnet",le="0.1"} 1 +polkadot_parachain_dispute_coordinator_vote_cleanup_bucket{chain="rococo_local_testnet",le="0.5"} 1 +polkadot_parachain_dispute_coordinator_vote_cleanup_bucket{chain="rococo_local_testnet",le="1"} 1 +polkadot_parachain_dispute_coordinator_vote_cleanup_bucket{chain="rococo_local_testnet",le="2"} 1 +polkadot_parachain_dispute_coordinator_vote_cleanup_bucket{chain="rococo_local_testnet",le="4"} 1 +polkadot_parachain_dispute_coordinator_vote_cleanup_bucket{chain="rococo_local_testnet",le="8"} 1 +polkadot_parachain_dispute_coordinator_vote_cleanup_bucket{chain="rococo_local_testnet",le="16"} 1 +polkadot_parachain_dispute_coordinator_vote_cleanup_bucket{chain="rococo_local_testnet",le="32"} 1 +polkadot_parachain_dispute_coordinator_vote_cleanup_bucket{chain="rococo_local_testnet",le="64"} 1 +polkadot_parachain_dispute_coordinator_vote_cleanup_bucket{chain="rococo_local_testnet",le="+Inf"} 1 +polkadot_parachain_dispute_coordinator_vote_cleanup_sum{chain="rococo_local_testnet"} 0.000103958 +polkadot_parachain_dispute_coordinator_vote_cleanup_count{chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_dispute_distribution_received_requests Total number of received dispute requests. +# TYPE polkadot_parachain_dispute_distribution_received_requests counter +polkadot_parachain_dispute_distribution_received_requests{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_dispute_distribution_time_dispute_request Time needed for dispute votes to get confirmed/fail getting transmitted. +# TYPE polkadot_parachain_dispute_distribution_time_dispute_request histogram +polkadot_parachain_dispute_distribution_time_dispute_request_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_dispute_distribution_time_dispute_request_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_dispute_distribution_time_dispute_request_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_dispute_distribution_time_dispute_request_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_dispute_distribution_time_dispute_request_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_dispute_distribution_time_dispute_request_bucket{chain="rococo_local_testnet",le="0.25"} 0 +polkadot_parachain_dispute_distribution_time_dispute_request_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_dispute_distribution_time_dispute_request_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_dispute_distribution_time_dispute_request_bucket{chain="rococo_local_testnet",le="2.5"} 0 +polkadot_parachain_dispute_distribution_time_dispute_request_bucket{chain="rococo_local_testnet",le="5"} 0 +polkadot_parachain_dispute_distribution_time_dispute_request_bucket{chain="rococo_local_testnet",le="10"} 0 +polkadot_parachain_dispute_distribution_time_dispute_request_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_dispute_distribution_time_dispute_request_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_dispute_distribution_time_dispute_request_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_dispute_participation_best_effort_queue_size Number of disputes waiting for local participation in the best effort queue. +# TYPE polkadot_parachain_dispute_participation_best_effort_queue_size gauge +polkadot_parachain_dispute_participation_best_effort_queue_size{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_dispute_participation_durations Time spent within fn Participation::participate +# TYPE polkadot_parachain_dispute_participation_durations histogram +polkadot_parachain_dispute_participation_durations_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_dispute_participation_durations_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_dispute_participation_durations_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_dispute_participation_durations_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_dispute_participation_durations_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_dispute_participation_durations_bucket{chain="rococo_local_testnet",le="0.25"} 0 +polkadot_parachain_dispute_participation_durations_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_dispute_participation_durations_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_dispute_participation_durations_bucket{chain="rococo_local_testnet",le="2.5"} 0 +polkadot_parachain_dispute_participation_durations_bucket{chain="rococo_local_testnet",le="5"} 0 +polkadot_parachain_dispute_participation_durations_bucket{chain="rococo_local_testnet",le="10"} 0 +polkadot_parachain_dispute_participation_durations_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_dispute_participation_durations_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_dispute_participation_durations_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_dispute_participation_pipeline_durations Measures the duration of the full participation pipeline: From when a participation request is first queued to when participation in the requested dispute is complete. +# TYPE polkadot_parachain_dispute_participation_pipeline_durations histogram +polkadot_parachain_dispute_participation_pipeline_durations_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_dispute_participation_pipeline_durations_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_dispute_participation_pipeline_durations_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_dispute_participation_pipeline_durations_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_dispute_participation_pipeline_durations_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_dispute_participation_pipeline_durations_bucket{chain="rococo_local_testnet",le="0.25"} 0 +polkadot_parachain_dispute_participation_pipeline_durations_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_dispute_participation_pipeline_durations_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_dispute_participation_pipeline_durations_bucket{chain="rococo_local_testnet",le="2.5"} 0 +polkadot_parachain_dispute_participation_pipeline_durations_bucket{chain="rococo_local_testnet",le="5"} 0 +polkadot_parachain_dispute_participation_pipeline_durations_bucket{chain="rococo_local_testnet",le="10"} 0 +polkadot_parachain_dispute_participation_pipeline_durations_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_dispute_participation_pipeline_durations_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_dispute_participation_pipeline_durations_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_dispute_participation_priority_queue_size Number of disputes waiting for local participation in the priority queue. +# TYPE polkadot_parachain_dispute_participation_priority_queue_size gauge +polkadot_parachain_dispute_participation_priority_queue_size{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_dispute_refrained_participations Number of refrained participations. We refrain from participation if all of the following conditions are met: disputed candidate is not included, not backed and not confirmed. +# TYPE polkadot_parachain_dispute_refrained_participations counter +polkadot_parachain_dispute_refrained_participations{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_disputes_finality_lag How far behind the head of the chain the Disputes protocol wants to vote +# TYPE polkadot_parachain_disputes_finality_lag gauge +polkadot_parachain_disputes_finality_lag{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_fetch_retries_total Number of times we did not succeed in fetching a chunk and needed to try more backers. +# TYPE polkadot_parachain_fetch_retries_total counter +polkadot_parachain_fetch_retries_total{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_fetched_chunks_total Total number of fetched chunks. +# TYPE polkadot_parachain_fetched_chunks_total counter +polkadot_parachain_fetched_chunks_total{success="succeeded",chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_fetched_onchain_disputes Number of disputes fetched from the runtime +# TYPE polkadot_parachain_fetched_onchain_disputes counter +polkadot_parachain_fetched_onchain_disputes{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_imported_candidates_total Number of candidates imported by the approval voting subsystem +# TYPE polkadot_parachain_imported_candidates_total counter +polkadot_parachain_imported_candidates_total{chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_inherent_data_dispute_statement_sets Number of dispute statements sets passed to `create_inherent()`. +# TYPE polkadot_parachain_inherent_data_dispute_statement_sets counter +polkadot_parachain_inherent_data_dispute_statement_sets{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_inherent_data_requests_total Number of InherentData requests served by provisioner. +# TYPE polkadot_parachain_inherent_data_requests_total counter +polkadot_parachain_inherent_data_requests_total{success="succeeded",chain="rococo_local_testnet"} 5 +# HELP polkadot_parachain_messages_relayed_total Number of messages relayed by Overseer. +# TYPE polkadot_parachain_messages_relayed_total counter +polkadot_parachain_messages_relayed_total{chain="rococo_local_testnet"} 110 +# HELP polkadot_parachain_messages_sent_total The number of messages sent via network bridge +# TYPE polkadot_parachain_messages_sent_total counter +polkadot_parachain_messages_sent_total{type="available_data_fetching_v1",chain="rococo_local_testnet"} 1 +polkadot_parachain_messages_sent_total{type="chunk_fetching_v1",chain="rococo_local_testnet"} 1 +polkadot_parachain_messages_sent_total{type="polkadot_network_bridge::WireMessage",chain="rococo_local_testnet"} 49 +polkadot_parachain_messages_sent_total{type="report_peer",chain="rococo_local_testnet"} 4 +# HELP polkadot_parachain_network_bridge_rx_delayed Number of events being delayed while broadcasting from the network bridge +# TYPE polkadot_parachain_network_bridge_rx_delayed histogram +polkadot_parachain_network_bridge_rx_delayed_bucket{chain="rococo_local_testnet",le="0"} 0 +polkadot_parachain_network_bridge_rx_delayed_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_network_bridge_rx_delayed_bucket{chain="rococo_local_testnet",le="2"} 0 +polkadot_parachain_network_bridge_rx_delayed_bucket{chain="rococo_local_testnet",le="8"} 0 +polkadot_parachain_network_bridge_rx_delayed_bucket{chain="rococo_local_testnet",le="16"} 0 +polkadot_parachain_network_bridge_rx_delayed_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_network_bridge_rx_delayed_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_network_bridge_rx_delayed_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_network_bridge_rx_delayed_time Time spent for waiting of the delayed events +# TYPE polkadot_parachain_network_bridge_rx_delayed_time histogram +polkadot_parachain_network_bridge_rx_delayed_time_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_network_bridge_rx_delayed_time_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_network_bridge_rx_delayed_time_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_network_bridge_rx_delayed_time_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_network_bridge_rx_delayed_time_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_network_bridge_rx_delayed_time_bucket{chain="rococo_local_testnet",le="0.25"} 0 +polkadot_parachain_network_bridge_rx_delayed_time_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_network_bridge_rx_delayed_time_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_network_bridge_rx_delayed_time_bucket{chain="rococo_local_testnet",le="2.5"} 0 +polkadot_parachain_network_bridge_rx_delayed_time_bucket{chain="rococo_local_testnet",le="5"} 0 +polkadot_parachain_network_bridge_rx_delayed_time_bucket{chain="rococo_local_testnet",le="10"} 0 +polkadot_parachain_network_bridge_rx_delayed_time_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_network_bridge_rx_delayed_time_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_network_bridge_rx_delayed_time_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_network_report_events_total The amount of reputation changes issued by subsystems +# TYPE polkadot_parachain_network_report_events_total counter +polkadot_parachain_network_report_events_total{chain="rococo_local_testnet"} 4 +# HELP polkadot_parachain_notification_bytes_received_total The number of bytes received on a parachain notification protocol +# TYPE polkadot_parachain_notification_bytes_received_total counter +polkadot_parachain_notification_bytes_received_total{protocol="validation/1",chain="rococo_local_testnet"} 6780 +# HELP polkadot_parachain_notification_bytes_sent_total The number of bytes sent on a parachain notification protocol +# TYPE polkadot_parachain_notification_bytes_sent_total counter +polkadot_parachain_notification_bytes_sent_total{protocol="validation/1",chain="rococo_local_testnet"} 7483 +# HELP polkadot_parachain_notifications_received_total The number of notifications received on a parachain protocol +# TYPE polkadot_parachain_notifications_received_total counter +polkadot_parachain_notifications_received_total{protocol="validation/1",chain="rococo_local_testnet"} 63 +# HELP polkadot_parachain_notifications_sent_total The number of notifications sent on a parachain protocol +# TYPE polkadot_parachain_notifications_sent_total counter +polkadot_parachain_notifications_sent_total{protocol="validation/1",chain="rococo_local_testnet"} 79 +# HELP polkadot_parachain_overseer_signals_received Number of signals received by subsystems from overseer +# TYPE polkadot_parachain_overseer_signals_received gauge +polkadot_parachain_overseer_signals_received{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_received{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet"} 26 +# HELP polkadot_parachain_overseer_signals_sent Number of signals sent by overseer to subsystems +# TYPE polkadot_parachain_overseer_signals_sent gauge +polkadot_parachain_overseer_signals_sent{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet"} 26 +polkadot_parachain_overseer_signals_sent{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet"} 26 +# HELP polkadot_parachain_peer_connect_events_total The number of peer connect events on a parachain notifications protocol +# TYPE polkadot_parachain_peer_connect_events_total counter +polkadot_parachain_peer_connect_events_total{protocol="validation/1",chain="rococo_local_testnet"} 2 +# HELP polkadot_parachain_peer_count The number of peers on a parachain-related peer-set +# TYPE polkadot_parachain_peer_count gauge +polkadot_parachain_peer_count{protocol="validation/1",chain="rococo_local_testnet"} 2 +# HELP polkadot_parachain_prospective_parachains_prune_view_candidate_storage Time spent within `prospective_parachains::prune_view_candidate_storage` +# TYPE polkadot_parachain_prospective_parachains_prune_view_candidate_storage histogram +polkadot_parachain_prospective_parachains_prune_view_candidate_storage_bucket{chain="rococo_local_testnet",le="0.005"} 1 +polkadot_parachain_prospective_parachains_prune_view_candidate_storage_bucket{chain="rococo_local_testnet",le="0.01"} 1 +polkadot_parachain_prospective_parachains_prune_view_candidate_storage_bucket{chain="rococo_local_testnet",le="0.025"} 1 +polkadot_parachain_prospective_parachains_prune_view_candidate_storage_bucket{chain="rococo_local_testnet",le="0.05"} 1 +polkadot_parachain_prospective_parachains_prune_view_candidate_storage_bucket{chain="rococo_local_testnet",le="0.1"} 1 +polkadot_parachain_prospective_parachains_prune_view_candidate_storage_bucket{chain="rococo_local_testnet",le="0.25"} 1 +polkadot_parachain_prospective_parachains_prune_view_candidate_storage_bucket{chain="rococo_local_testnet",le="0.5"} 1 +polkadot_parachain_prospective_parachains_prune_view_candidate_storage_bucket{chain="rococo_local_testnet",le="1"} 1 +polkadot_parachain_prospective_parachains_prune_view_candidate_storage_bucket{chain="rococo_local_testnet",le="2.5"} 1 +polkadot_parachain_prospective_parachains_prune_view_candidate_storage_bucket{chain="rococo_local_testnet",le="5"} 1 +polkadot_parachain_prospective_parachains_prune_view_candidate_storage_bucket{chain="rococo_local_testnet",le="10"} 1 +polkadot_parachain_prospective_parachains_prune_view_candidate_storage_bucket{chain="rococo_local_testnet",le="+Inf"} 1 +polkadot_parachain_prospective_parachains_prune_view_candidate_storage_sum{chain="rococo_local_testnet"} 0.000000042 +polkadot_parachain_prospective_parachains_prune_view_candidate_storage_count{chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_provisioner_inherent_data_response_bitfields_sent Number of inherent bitfields sent in response to `ProvisionerMessage::RequestInherentData`. +# TYPE polkadot_parachain_provisioner_inherent_data_response_bitfields_sent histogram +polkadot_parachain_provisioner_inherent_data_response_bitfields_sent_bucket{chain="rococo_local_testnet",le="0"} 0 +polkadot_parachain_provisioner_inherent_data_response_bitfields_sent_bucket{chain="rococo_local_testnet",le="25"} 5 +polkadot_parachain_provisioner_inherent_data_response_bitfields_sent_bucket{chain="rococo_local_testnet",le="50"} 5 +polkadot_parachain_provisioner_inherent_data_response_bitfields_sent_bucket{chain="rococo_local_testnet",le="100"} 5 +polkadot_parachain_provisioner_inherent_data_response_bitfields_sent_bucket{chain="rococo_local_testnet",le="150"} 5 +polkadot_parachain_provisioner_inherent_data_response_bitfields_sent_bucket{chain="rococo_local_testnet",le="200"} 5 +polkadot_parachain_provisioner_inherent_data_response_bitfields_sent_bucket{chain="rococo_local_testnet",le="250"} 5 +polkadot_parachain_provisioner_inherent_data_response_bitfields_sent_bucket{chain="rococo_local_testnet",le="300"} 5 +polkadot_parachain_provisioner_inherent_data_response_bitfields_sent_bucket{chain="rococo_local_testnet",le="400"} 5 +polkadot_parachain_provisioner_inherent_data_response_bitfields_sent_bucket{chain="rococo_local_testnet",le="500"} 5 +polkadot_parachain_provisioner_inherent_data_response_bitfields_sent_bucket{chain="rococo_local_testnet",le="600"} 5 +polkadot_parachain_provisioner_inherent_data_response_bitfields_sent_bucket{chain="rococo_local_testnet",le="+Inf"} 5 +polkadot_parachain_provisioner_inherent_data_response_bitfields_sent_sum{chain="rococo_local_testnet"} 10 +polkadot_parachain_provisioner_inherent_data_response_bitfields_sent_count{chain="rococo_local_testnet"} 5 +# HELP polkadot_parachain_provisioner_partitioned_disputes Number of disputes partitioned by type. +# TYPE polkadot_parachain_provisioner_partitioned_disputes counter +polkadot_parachain_provisioner_partitioned_disputes{partition="active_concluded_onchain",chain="rococo_local_testnet"} 0 +polkadot_parachain_provisioner_partitioned_disputes{partition="active_unconcluded_onchain",chain="rococo_local_testnet"} 0 +polkadot_parachain_provisioner_partitioned_disputes{partition="active_unknown_onchain",chain="rococo_local_testnet"} 0 +polkadot_parachain_provisioner_partitioned_disputes{partition="inactive_concluded_known_onchain",chain="rococo_local_testnet"} 0 +polkadot_parachain_provisioner_partitioned_disputes{partition="inactive_unconcluded_known_onchain",chain="rococo_local_testnet"} 0 +polkadot_parachain_provisioner_partitioned_disputes{partition="inactive_unknown_onchain",chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_provisioner_provisionable_data_time Time spent within `provisioner::provisionable_data` +# TYPE polkadot_parachain_provisioner_provisionable_data_time histogram +polkadot_parachain_provisioner_provisionable_data_time_bucket{chain="rococo_local_testnet",le="0.005"} 30 +polkadot_parachain_provisioner_provisionable_data_time_bucket{chain="rococo_local_testnet",le="0.01"} 30 +polkadot_parachain_provisioner_provisionable_data_time_bucket{chain="rococo_local_testnet",le="0.025"} 30 +polkadot_parachain_provisioner_provisionable_data_time_bucket{chain="rococo_local_testnet",le="0.05"} 30 +polkadot_parachain_provisioner_provisionable_data_time_bucket{chain="rococo_local_testnet",le="0.1"} 30 +polkadot_parachain_provisioner_provisionable_data_time_bucket{chain="rococo_local_testnet",le="0.25"} 30 +polkadot_parachain_provisioner_provisionable_data_time_bucket{chain="rococo_local_testnet",le="0.5"} 30 +polkadot_parachain_provisioner_provisionable_data_time_bucket{chain="rococo_local_testnet",le="1"} 30 +polkadot_parachain_provisioner_provisionable_data_time_bucket{chain="rococo_local_testnet",le="2.5"} 30 +polkadot_parachain_provisioner_provisionable_data_time_bucket{chain="rococo_local_testnet",le="5"} 30 +polkadot_parachain_provisioner_provisionable_data_time_bucket{chain="rococo_local_testnet",le="10"} 30 +polkadot_parachain_provisioner_provisionable_data_time_bucket{chain="rococo_local_testnet",le="+Inf"} 30 +polkadot_parachain_provisioner_provisionable_data_time_sum{chain="rococo_local_testnet"} 0.000023041000000000005 +polkadot_parachain_provisioner_provisionable_data_time_count{chain="rococo_local_testnet"} 30 +# HELP polkadot_parachain_provisioner_request_inherent_data_time Time spent within `provisioner::request_inherent_data` +# TYPE polkadot_parachain_provisioner_request_inherent_data_time histogram +polkadot_parachain_provisioner_request_inherent_data_time_bucket{chain="rococo_local_testnet",le="0.005"} 5 +polkadot_parachain_provisioner_request_inherent_data_time_bucket{chain="rococo_local_testnet",le="0.01"} 5 +polkadot_parachain_provisioner_request_inherent_data_time_bucket{chain="rococo_local_testnet",le="0.025"} 5 +polkadot_parachain_provisioner_request_inherent_data_time_bucket{chain="rococo_local_testnet",le="0.05"} 5 +polkadot_parachain_provisioner_request_inherent_data_time_bucket{chain="rococo_local_testnet",le="0.1"} 5 +polkadot_parachain_provisioner_request_inherent_data_time_bucket{chain="rococo_local_testnet",le="0.25"} 5 +polkadot_parachain_provisioner_request_inherent_data_time_bucket{chain="rococo_local_testnet",le="0.5"} 5 +polkadot_parachain_provisioner_request_inherent_data_time_bucket{chain="rococo_local_testnet",le="1"} 5 +polkadot_parachain_provisioner_request_inherent_data_time_bucket{chain="rococo_local_testnet",le="2.5"} 5 +polkadot_parachain_provisioner_request_inherent_data_time_bucket{chain="rococo_local_testnet",le="5"} 5 +polkadot_parachain_provisioner_request_inherent_data_time_bucket{chain="rococo_local_testnet",le="10"} 5 +polkadot_parachain_provisioner_request_inherent_data_time_bucket{chain="rococo_local_testnet",le="+Inf"} 5 +polkadot_parachain_provisioner_request_inherent_data_time_sum{chain="rococo_local_testnet"} 0.006329125 +polkadot_parachain_provisioner_request_inherent_data_time_count{chain="rococo_local_testnet"} 5 +# HELP polkadot_parachain_received_availability_chunks_total Number of availability chunks received. +# TYPE polkadot_parachain_received_availability_chunks_total counter +polkadot_parachain_received_availability_chunks_total{chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_received_availabilty_bitfields_total Number of valid availability bitfields received from other peers. +# TYPE polkadot_parachain_received_availabilty_bitfields_total counter +polkadot_parachain_received_availabilty_bitfields_total{chain="rococo_local_testnet"} 14 +# HELP polkadot_parachain_runtime_api_make_runtime_api_request Time spent within `runtime_api::make_runtime_api_request` +# TYPE polkadot_parachain_runtime_api_make_runtime_api_request histogram +polkadot_parachain_runtime_api_make_runtime_api_request_bucket{chain="rococo_local_testnet",le="0.005"} 295 +polkadot_parachain_runtime_api_make_runtime_api_request_bucket{chain="rococo_local_testnet",le="0.01"} 295 +polkadot_parachain_runtime_api_make_runtime_api_request_bucket{chain="rococo_local_testnet",le="0.025"} 295 +polkadot_parachain_runtime_api_make_runtime_api_request_bucket{chain="rococo_local_testnet",le="0.05"} 295 +polkadot_parachain_runtime_api_make_runtime_api_request_bucket{chain="rococo_local_testnet",le="0.1"} 295 +polkadot_parachain_runtime_api_make_runtime_api_request_bucket{chain="rococo_local_testnet",le="0.25"} 295 +polkadot_parachain_runtime_api_make_runtime_api_request_bucket{chain="rococo_local_testnet",le="0.5"} 295 +polkadot_parachain_runtime_api_make_runtime_api_request_bucket{chain="rococo_local_testnet",le="1"} 295 +polkadot_parachain_runtime_api_make_runtime_api_request_bucket{chain="rococo_local_testnet",le="2.5"} 295 +polkadot_parachain_runtime_api_make_runtime_api_request_bucket{chain="rococo_local_testnet",le="5"} 295 +polkadot_parachain_runtime_api_make_runtime_api_request_bucket{chain="rococo_local_testnet",le="10"} 295 +polkadot_parachain_runtime_api_make_runtime_api_request_bucket{chain="rococo_local_testnet",le="+Inf"} 295 +polkadot_parachain_runtime_api_make_runtime_api_request_sum{chain="rococo_local_testnet"} 0.08046465599999997 +polkadot_parachain_runtime_api_make_runtime_api_request_count{chain="rococo_local_testnet"} 295 +# HELP polkadot_parachain_runtime_api_requests_total Number of Runtime API requests served. +# TYPE polkadot_parachain_runtime_api_requests_total counter +polkadot_parachain_runtime_api_requests_total{success="cached",chain="rococo_local_testnet"} 255 +polkadot_parachain_runtime_api_requests_total{success="failed",chain="rococo_local_testnet"} 84 +polkadot_parachain_runtime_api_requests_total{success="succeeded",chain="rococo_local_testnet"} 206 +# HELP polkadot_parachain_sent_own_availabilty_bitfields_total Number of own availability bitfields sent to other peers. +# TYPE polkadot_parachain_sent_own_availabilty_bitfields_total counter +polkadot_parachain_sent_own_availabilty_bitfields_total{chain="rococo_local_testnet"} 14 +# HELP polkadot_parachain_statement_distribution_active_leaves_update Time spent within `statement_distribution::active_leaves_update` +# TYPE polkadot_parachain_statement_distribution_active_leaves_update histogram +polkadot_parachain_statement_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.000025"} 1 +polkadot_parachain_statement_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.00005"} 1 +polkadot_parachain_statement_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.000075"} 1 +polkadot_parachain_statement_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.0001"} 1 +polkadot_parachain_statement_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.0003125"} 1 +polkadot_parachain_statement_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.000625"} 1 +polkadot_parachain_statement_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.00125"} 5 +polkadot_parachain_statement_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.0025"} 10 +polkadot_parachain_statement_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.005"} 15 +polkadot_parachain_statement_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.01"} 15 +polkadot_parachain_statement_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.025"} 15 +polkadot_parachain_statement_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.05"} 15 +polkadot_parachain_statement_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="0.1"} 15 +polkadot_parachain_statement_distribution_active_leaves_update_bucket{chain="rococo_local_testnet",le="+Inf"} 15 +polkadot_parachain_statement_distribution_active_leaves_update_sum{chain="rococo_local_testnet"} 0.028708792000000004 +polkadot_parachain_statement_distribution_active_leaves_update_count{chain="rococo_local_testnet"} 15 +# HELP polkadot_parachain_statement_distribution_created_message_size Size of created messages containing Seconded statements. +# TYPE polkadot_parachain_statement_distribution_created_message_size gauge +polkadot_parachain_statement_distribution_created_message_size{chain="rococo_local_testnet"} 596 +# HELP polkadot_parachain_statement_distribution_network_bridge_update Time spent within `statement_distribution::network_bridge_update` +# TYPE polkadot_parachain_statement_distribution_network_bridge_update histogram +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="circulate_statement",chain="rococo_local_testnet",le="0.000025"} 2 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="circulate_statement",chain="rococo_local_testnet",le="0.00005"} 2 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="circulate_statement",chain="rococo_local_testnet",le="0.000075"} 2 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="circulate_statement",chain="rococo_local_testnet",le="0.0001"} 2 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="circulate_statement",chain="rococo_local_testnet",le="0.0003125"} 2 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="circulate_statement",chain="rococo_local_testnet",le="0.000625"} 2 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="circulate_statement",chain="rococo_local_testnet",le="0.00125"} 2 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="circulate_statement",chain="rococo_local_testnet",le="0.0025"} 2 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="circulate_statement",chain="rococo_local_testnet",le="0.005"} 2 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="circulate_statement",chain="rococo_local_testnet",le="0.01"} 2 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="circulate_statement",chain="rococo_local_testnet",le="0.025"} 2 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="circulate_statement",chain="rococo_local_testnet",le="0.05"} 2 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="circulate_statement",chain="rococo_local_testnet",le="0.1"} 2 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="circulate_statement",chain="rococo_local_testnet",le="+Inf"} 2 +polkadot_parachain_statement_distribution_network_bridge_update_sum{message_type="circulate_statement",chain="rococo_local_testnet"} 0.000000083 +polkadot_parachain_statement_distribution_network_bridge_update_count{message_type="circulate_statement",chain="rococo_local_testnet"} 2 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="handle_incoming_message",chain="rococo_local_testnet",le="0.000025"} 4 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="handle_incoming_message",chain="rococo_local_testnet",le="0.00005"} 4 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="handle_incoming_message",chain="rococo_local_testnet",le="0.000075"} 4 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="handle_incoming_message",chain="rococo_local_testnet",le="0.0001"} 4 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="handle_incoming_message",chain="rococo_local_testnet",le="0.0003125"} 4 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="handle_incoming_message",chain="rococo_local_testnet",le="0.000625"} 4 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="handle_incoming_message",chain="rococo_local_testnet",le="0.00125"} 4 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="handle_incoming_message",chain="rococo_local_testnet",le="0.0025"} 4 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="handle_incoming_message",chain="rococo_local_testnet",le="0.005"} 4 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="handle_incoming_message",chain="rococo_local_testnet",le="0.01"} 4 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="handle_incoming_message",chain="rococo_local_testnet",le="0.025"} 4 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="handle_incoming_message",chain="rococo_local_testnet",le="0.05"} 4 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="handle_incoming_message",chain="rococo_local_testnet",le="0.1"} 4 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="handle_incoming_message",chain="rococo_local_testnet",le="+Inf"} 4 +polkadot_parachain_statement_distribution_network_bridge_update_sum{message_type="handle_incoming_message",chain="rococo_local_testnet"} 0.000000376 +polkadot_parachain_statement_distribution_network_bridge_update_count{message_type="handle_incoming_message",chain="rococo_local_testnet"} 4 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="new_gossip_topology",chain="rococo_local_testnet",le="0.000025"} 1 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="new_gossip_topology",chain="rococo_local_testnet",le="0.00005"} 1 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="new_gossip_topology",chain="rococo_local_testnet",le="0.000075"} 1 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="new_gossip_topology",chain="rococo_local_testnet",le="0.0001"} 1 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="new_gossip_topology",chain="rococo_local_testnet",le="0.0003125"} 1 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="new_gossip_topology",chain="rococo_local_testnet",le="0.000625"} 1 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="new_gossip_topology",chain="rococo_local_testnet",le="0.00125"} 1 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="new_gossip_topology",chain="rococo_local_testnet",le="0.0025"} 1 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="new_gossip_topology",chain="rococo_local_testnet",le="0.005"} 1 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="new_gossip_topology",chain="rococo_local_testnet",le="0.01"} 1 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="new_gossip_topology",chain="rococo_local_testnet",le="0.025"} 1 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="new_gossip_topology",chain="rococo_local_testnet",le="0.05"} 1 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="new_gossip_topology",chain="rococo_local_testnet",le="0.1"} 1 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="new_gossip_topology",chain="rococo_local_testnet",le="+Inf"} 1 +polkadot_parachain_statement_distribution_network_bridge_update_sum{message_type="new_gossip_topology",chain="rococo_local_testnet"} 0.000000084 +polkadot_parachain_statement_distribution_network_bridge_update_count{message_type="new_gossip_topology",chain="rococo_local_testnet"} 1 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="peer_view_change",chain="rococo_local_testnet",le="0.000025"} 32 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="peer_view_change",chain="rococo_local_testnet",le="0.00005"} 32 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="peer_view_change",chain="rococo_local_testnet",le="0.000075"} 32 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="peer_view_change",chain="rococo_local_testnet",le="0.0001"} 32 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="peer_view_change",chain="rococo_local_testnet",le="0.0003125"} 32 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="peer_view_change",chain="rococo_local_testnet",le="0.000625"} 32 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="peer_view_change",chain="rococo_local_testnet",le="0.00125"} 32 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="peer_view_change",chain="rococo_local_testnet",le="0.0025"} 32 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="peer_view_change",chain="rococo_local_testnet",le="0.005"} 32 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="peer_view_change",chain="rococo_local_testnet",le="0.01"} 32 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="peer_view_change",chain="rococo_local_testnet",le="0.025"} 32 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="peer_view_change",chain="rococo_local_testnet",le="0.05"} 32 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="peer_view_change",chain="rococo_local_testnet",le="0.1"} 32 +polkadot_parachain_statement_distribution_network_bridge_update_bucket{message_type="peer_view_change",chain="rococo_local_testnet",le="+Inf"} 32 +polkadot_parachain_statement_distribution_network_bridge_update_sum{message_type="peer_view_change",chain="rococo_local_testnet"} 0.0000020029999999999997 +polkadot_parachain_statement_distribution_network_bridge_update_count{message_type="peer_view_change",chain="rococo_local_testnet"} 32 +# HELP polkadot_parachain_statement_distribution_sent_requests_total Number of large statement fetching requests sent. +# TYPE polkadot_parachain_statement_distribution_sent_requests_total counter +polkadot_parachain_statement_distribution_sent_requests_total{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_statement_distribution_share Time spent within `statement_distribution::share` +# TYPE polkadot_parachain_statement_distribution_share histogram +polkadot_parachain_statement_distribution_share_bucket{chain="rococo_local_testnet",le="0.000025"} 0 +polkadot_parachain_statement_distribution_share_bucket{chain="rococo_local_testnet",le="0.00005"} 0 +polkadot_parachain_statement_distribution_share_bucket{chain="rococo_local_testnet",le="0.000075"} 0 +polkadot_parachain_statement_distribution_share_bucket{chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_statement_distribution_share_bucket{chain="rococo_local_testnet",le="0.0003125"} 0 +polkadot_parachain_statement_distribution_share_bucket{chain="rococo_local_testnet",le="0.000625"} 0 +polkadot_parachain_statement_distribution_share_bucket{chain="rococo_local_testnet",le="0.00125"} 0 +polkadot_parachain_statement_distribution_share_bucket{chain="rococo_local_testnet",le="0.0025"} 0 +polkadot_parachain_statement_distribution_share_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_statement_distribution_share_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_statement_distribution_share_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_statement_distribution_share_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_statement_distribution_share_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_statement_distribution_share_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_statement_distribution_share_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_statement_distribution_share_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_statements_distributed_total Number of candidate validity statements distributed to other peers. +# TYPE polkadot_parachain_statements_distributed_total counter +polkadot_parachain_statements_distributed_total{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_subsystem_bounded_blocked Number of times senders blocked while sending messages to a subsystem +# TYPE polkadot_parachain_subsystem_bounded_blocked gauge +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_blocked{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_subsystem_bounded_received Number of elements received by subsystems' bounded queues +# TYPE polkadot_parachain_subsystem_bounded_received gauge +polkadot_parachain_subsystem_bounded_received{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet"} 53 +polkadot_parachain_subsystem_bounded_received{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet"} 18 +polkadot_parachain_subsystem_bounded_received{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_received{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet"} 1 +polkadot_parachain_subsystem_bounded_received{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet"} 4 +polkadot_parachain_subsystem_bounded_received{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet"} 76 +polkadot_parachain_subsystem_bounded_received{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_received{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet"} 2 +polkadot_parachain_subsystem_bounded_received{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet"} 1 +polkadot_parachain_subsystem_bounded_received{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet"} 145 +polkadot_parachain_subsystem_bounded_received{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet"} 65 +polkadot_parachain_subsystem_bounded_received{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_received{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_received{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet"} 33 +polkadot_parachain_subsystem_bounded_received{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_received{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet"} 34 +polkadot_parachain_subsystem_bounded_received{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet"} 2 +polkadot_parachain_subsystem_bounded_received{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet"} 38 +polkadot_parachain_subsystem_bounded_received{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_received{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet"} 33 +polkadot_parachain_subsystem_bounded_received{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_received{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet"} 550 +polkadot_parachain_subsystem_bounded_received{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet"} 38 +# HELP polkadot_parachain_subsystem_bounded_sent Number of elements sent to subsystems' bounded queues +# TYPE polkadot_parachain_subsystem_bounded_sent gauge +polkadot_parachain_subsystem_bounded_sent{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet"} 53 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet"} 18 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet"} 1 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet"} 4 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet"} 76 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet"} 2 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet"} 1 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet"} 145 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet"} 65 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet"} 33 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet"} 34 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet"} 2 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet"} 38 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet"} 33 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet"} 550 +polkadot_parachain_subsystem_bounded_sent{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet"} 38 +# HELP polkadot_parachain_subsystem_bounded_tof Duration spent in a particular channel from entrance to removal +# TYPE polkadot_parachain_subsystem_bounded_tof histogram +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="0.0001"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="0.0004"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="0.0016"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="0.0064"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="0.0256"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="0.1024"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="0.4096"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="1.6384"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="3.2768"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="4.9152"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="6.5536"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="+Inf"} 5 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet"} 0.0004917490296065807 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="0.0001"} 2 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="0.0004"} 2 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="0.0016"} 2 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="0.0064"} 2 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="0.0256"} 2 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="0.1024"} 2 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="0.4096"} 2 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="1.6384"} 2 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="3.2768"} 2 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="4.9152"} 2 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="6.5536"} 2 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="+Inf"} 2 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet"} 0.00014408305287361145 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet"} 2 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="0.0001"} 1 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="0.0004"} 1 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="0.0016"} 1 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="0.0064"} 1 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="0.0256"} 1 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="0.1024"} 1 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="0.4096"} 1 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="1.6384"} 1 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="3.2768"} 1 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="4.9152"} 1 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="6.5536"} 1 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="+Inf"} 1 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet"} 0.000012417091056704521 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet"} 1 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="0.0001"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="0.0004"} 4 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="0.0016"} 6 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="0.0064"} 6 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="0.0256"} 6 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="0.1024"} 6 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="0.4096"} 6 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="1.6384"} 6 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="3.2768"} 6 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="4.9152"} 6 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="6.5536"} 6 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="+Inf"} 6 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet"} 0.0014776650350540876 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet"} 6 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="0.0001"} 8 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="0.0004"} 11 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="0.0016"} 11 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="0.0064"} 11 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="0.0256"} 11 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="0.1024"} 11 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="0.4096"} 11 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="1.6384"} 11 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="3.2768"} 11 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="4.9152"} 11 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="6.5536"} 11 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="+Inf"} 11 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet"} 0.0007772934623062611 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet"} 11 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="0.0001"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="0.0004"} 4 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="0.0016"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="0.0064"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="0.0256"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="0.1024"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="0.4096"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="1.6384"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="3.2768"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="4.9152"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="6.5536"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="+Inf"} 5 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet"} 0.0011382920201867819 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet"} 5 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="0.0001"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="0.0004"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="0.0016"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="0.0064"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="0.0256"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="0.1024"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="0.4096"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="1.6384"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="3.2768"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="4.9152"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="6.5536"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="+Inf"} 3 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet"} 0.00022529088892042637 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="0.0001"} 2 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="0.0004"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="0.0016"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="0.0064"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="0.0256"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="0.1024"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="0.4096"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="1.6384"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="3.2768"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="4.9152"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="6.5536"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="+Inf"} 3 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet"} 0.00040624896064400673 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="0.0001"} 1 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="0.0004"} 1 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="0.0016"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="0.0064"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="0.0256"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="0.1024"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="0.4096"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="1.6384"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="3.2768"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="4.9152"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="6.5536"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="+Inf"} 3 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet"} 0.0013868738897144794 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="0.0001"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="0.0004"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="0.0016"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="0.0064"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="0.0256"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="0.1024"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="0.4096"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="1.6384"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="3.2768"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="4.9152"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="6.5536"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="+Inf"} 3 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet"} 0.00008533289656043053 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="0.0001"} 12 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="0.0004"} 16 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="0.0016"} 26 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="0.0064"} 27 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="0.0256"} 27 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="0.1024"} 27 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="0.4096"} 27 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="1.6384"} 27 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="3.2768"} 27 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="4.9152"} 27 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="6.5536"} 27 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="+Inf"} 27 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet"} 0.012124166125431657 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet"} 27 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="0.0001"} 2 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="0.0004"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="0.0016"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="0.0064"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="0.0256"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="0.1024"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="0.4096"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="1.6384"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="3.2768"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="4.9152"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="6.5536"} 3 +polkadot_parachain_subsystem_bounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="+Inf"} 3 +polkadot_parachain_subsystem_bounded_tof_sum{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet"} 0.0002901260741055012 +polkadot_parachain_subsystem_bounded_tof_count{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet"} 3 +# HELP polkadot_parachain_subsystem_unbounded_received Number of elements received by subsystems' unbounded queues +# TYPE polkadot_parachain_subsystem_unbounded_received gauge +polkadot_parachain_subsystem_unbounded_received{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet"} 33 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet"} 17 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet"} 5 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet"} 16 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet"} 5 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet"} 17 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet"} 2 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_received{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet"} 17 +# HELP polkadot_parachain_subsystem_unbounded_sent Number of elements sent to subsystems' unbounded queues +# TYPE polkadot_parachain_subsystem_unbounded_sent gauge +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet"} 33 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet"} 17 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet"} 5 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet"} 16 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet"} 5 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet"} 17 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet"} 2 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_sent{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet"} 17 +# HELP polkadot_parachain_subsystem_unbounded_tof Duration spent in a particular channel from entrance to removal +# TYPE polkadot_parachain_subsystem_unbounded_tof histogram +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="0.0001"} 3 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="0.0004"} 3 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="0.0016"} 3 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="0.0064"} 3 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="0.0256"} 3 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="0.1024"} 3 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="0.4096"} 3 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="1.6384"} 3 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="3.2768"} 3 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="4.9152"} 3 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="6.5536"} 3 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="+Inf"} 3 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet"} 0.0001836656592786312 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="approval-distribution-subsystem",chain="rococo_local_testnet"} 3 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="approval-voting-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="availability-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="availability-recovery-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="availability-store-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="0.0016"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="0.0064"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="0.0256"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="0.1024"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="0.4096"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="1.6384"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="3.2768"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="4.9152"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="6.5536"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="+Inf"} 2 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet"} 0.0031816670671105385 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="bitfield-distribution-subsystem",chain="rococo_local_testnet"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="bitfield-signing-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="0.0001"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="0.0004"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="0.0016"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="0.0064"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="0.0256"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="0.1024"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="0.4096"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="1.6384"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="3.2768"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="4.9152"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="6.5536"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="+Inf"} 1 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="candidate-backing-subsystem",chain="rococo_local_testnet"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="candidate-validation-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="chain-api-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="chain-selection-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="collation-generation-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="0.0001"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="0.0004"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="0.0016"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="0.0064"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="0.0256"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="0.1024"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="0.4096"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="1.6384"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="3.2768"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="4.9152"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="6.5536"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="+Inf"} 2 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet"} 0.00008941697888076305 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="collator-protocol-subsystem",chain="rococo_local_testnet"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="0.0001"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="0.0004"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="0.0016"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="0.0064"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="0.0256"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="0.1024"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="0.4096"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="1.6384"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="3.2768"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="4.9152"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="6.5536"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="+Inf"} 1 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet"} 0.00006420793943107128 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="dispute-coordinator-subsystem",chain="rococo_local_testnet"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="dispute-distribution-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="0.0016"} 1 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="0.0064"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="0.0256"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="0.1024"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="0.4096"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="1.6384"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="3.2768"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="4.9152"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="6.5536"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet",le="+Inf"} 2 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet"} 0.003612083150073886 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="gossip-support-subsystem",chain="rococo_local_testnet"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="network-bridge-rx-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="network-bridge-tx-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="prospective-parachains-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="provisioner-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="pvf-checker-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="runtime-api-subsystem",chain="rococo_local_testnet"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="0.0064"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="0.0256"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="0.1024"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="0.4096"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="1.6384"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="3.2768"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="4.9152"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="6.5536"} 2 +polkadot_parachain_subsystem_unbounded_tof_bucket{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="+Inf"} 2 +polkadot_parachain_subsystem_unbounded_tof_sum{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet"} 0.005076083121821284 +polkadot_parachain_subsystem_unbounded_tof_count{subsystem_name="statement-distribution-subsystem",chain="rococo_local_testnet"} 2 +# HELP polkadot_parachain_time_approval_db_transaction Time spent writing an approval db transaction. +# TYPE polkadot_parachain_time_approval_db_transaction histogram +polkadot_parachain_time_approval_db_transaction_bucket{chain="rococo_local_testnet",le="0.005"} 21 +polkadot_parachain_time_approval_db_transaction_bucket{chain="rococo_local_testnet",le="0.01"} 21 +polkadot_parachain_time_approval_db_transaction_bucket{chain="rococo_local_testnet",le="0.025"} 21 +polkadot_parachain_time_approval_db_transaction_bucket{chain="rococo_local_testnet",le="0.05"} 21 +polkadot_parachain_time_approval_db_transaction_bucket{chain="rococo_local_testnet",le="0.1"} 21 +polkadot_parachain_time_approval_db_transaction_bucket{chain="rococo_local_testnet",le="0.25"} 21 +polkadot_parachain_time_approval_db_transaction_bucket{chain="rococo_local_testnet",le="0.5"} 21 +polkadot_parachain_time_approval_db_transaction_bucket{chain="rococo_local_testnet",le="1"} 21 +polkadot_parachain_time_approval_db_transaction_bucket{chain="rococo_local_testnet",le="2.5"} 21 +polkadot_parachain_time_approval_db_transaction_bucket{chain="rococo_local_testnet",le="5"} 21 +polkadot_parachain_time_approval_db_transaction_bucket{chain="rococo_local_testnet",le="10"} 21 +polkadot_parachain_time_approval_db_transaction_bucket{chain="rococo_local_testnet",le="+Inf"} 21 +polkadot_parachain_time_approval_db_transaction_sum{chain="rococo_local_testnet"} 0.0044119619999999984 +polkadot_parachain_time_approval_db_transaction_count{chain="rococo_local_testnet"} 21 +# HELP polkadot_parachain_time_awaiting_approval_voting Time spent awaiting a reply from the Approval Voting Subsystem. +# TYPE polkadot_parachain_time_awaiting_approval_voting histogram +polkadot_parachain_time_awaiting_approval_voting_bucket{chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_time_awaiting_approval_voting_bucket{chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_time_awaiting_approval_voting_bucket{chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_time_awaiting_approval_voting_bucket{chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_time_awaiting_approval_voting_bucket{chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_time_awaiting_approval_voting_bucket{chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_time_awaiting_approval_voting_bucket{chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_time_awaiting_approval_voting_bucket{chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_time_awaiting_approval_voting_bucket{chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_time_awaiting_approval_voting_bucket{chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_time_awaiting_approval_voting_bucket{chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_time_awaiting_approval_voting_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_time_awaiting_approval_voting_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_time_awaiting_approval_voting_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_time_import_pending_now_known Time spent on importing pending assignments and approvals. +# TYPE polkadot_parachain_time_import_pending_now_known histogram +polkadot_parachain_time_import_pending_now_known_bucket{chain="rococo_local_testnet",le="0.0001"} 0 +polkadot_parachain_time_import_pending_now_known_bucket{chain="rococo_local_testnet",le="0.0004"} 0 +polkadot_parachain_time_import_pending_now_known_bucket{chain="rococo_local_testnet",le="0.0016"} 0 +polkadot_parachain_time_import_pending_now_known_bucket{chain="rococo_local_testnet",le="0.0064"} 0 +polkadot_parachain_time_import_pending_now_known_bucket{chain="rococo_local_testnet",le="0.0256"} 0 +polkadot_parachain_time_import_pending_now_known_bucket{chain="rococo_local_testnet",le="0.1024"} 0 +polkadot_parachain_time_import_pending_now_known_bucket{chain="rococo_local_testnet",le="0.4096"} 0 +polkadot_parachain_time_import_pending_now_known_bucket{chain="rococo_local_testnet",le="1.6384"} 0 +polkadot_parachain_time_import_pending_now_known_bucket{chain="rococo_local_testnet",le="3.2768"} 0 +polkadot_parachain_time_import_pending_now_known_bucket{chain="rococo_local_testnet",le="4.9152"} 0 +polkadot_parachain_time_import_pending_now_known_bucket{chain="rococo_local_testnet",le="6.5536"} 0 +polkadot_parachain_time_import_pending_now_known_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_parachain_time_import_pending_now_known_sum{chain="rococo_local_testnet"} 0 +polkadot_parachain_time_import_pending_now_known_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_parachain_time_recover_and_approve Time spent recovering and approving data in approval voting +# TYPE polkadot_parachain_time_recover_and_approve histogram +polkadot_parachain_time_recover_and_approve_bucket{chain="rococo_local_testnet",le="0.005"} 0 +polkadot_parachain_time_recover_and_approve_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_parachain_time_recover_and_approve_bucket{chain="rococo_local_testnet",le="0.025"} 0 +polkadot_parachain_time_recover_and_approve_bucket{chain="rococo_local_testnet",le="0.05"} 0 +polkadot_parachain_time_recover_and_approve_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_parachain_time_recover_and_approve_bucket{chain="rococo_local_testnet",le="0.25"} 0 +polkadot_parachain_time_recover_and_approve_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_parachain_time_recover_and_approve_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_parachain_time_recover_and_approve_bucket{chain="rococo_local_testnet",le="2.5"} 0 +polkadot_parachain_time_recover_and_approve_bucket{chain="rococo_local_testnet",le="5"} 1 +polkadot_parachain_time_recover_and_approve_bucket{chain="rococo_local_testnet",le="10"} 1 +polkadot_parachain_time_recover_and_approve_bucket{chain="rococo_local_testnet",le="+Inf"} 1 +polkadot_parachain_time_recover_and_approve_sum{chain="rococo_local_testnet"} 2.7405542499999997 +polkadot_parachain_time_recover_and_approve_count{chain="rococo_local_testnet"} 1 +# HELP polkadot_parachain_time_unify_with_peer Time spent within fn `unify_with_peer`. +# TYPE polkadot_parachain_time_unify_with_peer histogram +polkadot_parachain_time_unify_with_peer_bucket{chain="rococo_local_testnet",le="0.000625"} 60 +polkadot_parachain_time_unify_with_peer_bucket{chain="rococo_local_testnet",le="0.00125"} 60 +polkadot_parachain_time_unify_with_peer_bucket{chain="rococo_local_testnet",le="0.0025"} 60 +polkadot_parachain_time_unify_with_peer_bucket{chain="rococo_local_testnet",le="0.005"} 60 +polkadot_parachain_time_unify_with_peer_bucket{chain="rococo_local_testnet",le="0.0075"} 60 +polkadot_parachain_time_unify_with_peer_bucket{chain="rococo_local_testnet",le="0.01"} 60 +polkadot_parachain_time_unify_with_peer_bucket{chain="rococo_local_testnet",le="0.025"} 60 +polkadot_parachain_time_unify_with_peer_bucket{chain="rococo_local_testnet",le="0.05"} 60 +polkadot_parachain_time_unify_with_peer_bucket{chain="rococo_local_testnet",le="0.1"} 60 +polkadot_parachain_time_unify_with_peer_bucket{chain="rococo_local_testnet",le="0.25"} 60 +polkadot_parachain_time_unify_with_peer_bucket{chain="rococo_local_testnet",le="0.5"} 60 +polkadot_parachain_time_unify_with_peer_bucket{chain="rococo_local_testnet",le="1"} 60 +polkadot_parachain_time_unify_with_peer_bucket{chain="rococo_local_testnet",le="2.5"} 60 +polkadot_parachain_time_unify_with_peer_bucket{chain="rococo_local_testnet",le="5"} 60 +polkadot_parachain_time_unify_with_peer_bucket{chain="rococo_local_testnet",le="10"} 60 +polkadot_parachain_time_unify_with_peer_bucket{chain="rococo_local_testnet",le="+Inf"} 60 +polkadot_parachain_time_unify_with_peer_sum{chain="rococo_local_testnet"} 0.000078959 +polkadot_parachain_time_unify_with_peer_count{chain="rococo_local_testnet"} 60 +# HELP polkadot_parachain_unified_with_peer_total Number of times `unify_with_peer` is called. +# TYPE polkadot_parachain_unified_with_peer_total counter +polkadot_parachain_unified_with_peer_total{chain="rococo_local_testnet"} 60 +# HELP polkadot_parachain_validation_requests_total Number of validation requests served. +# TYPE polkadot_parachain_validation_requests_total counter +polkadot_parachain_validation_requests_total{validity="valid",chain="rococo_local_testnet"} 1 +# HELP polkadot_pvf_execute_enqueued The total number of jobs enqueued into the execution pipeline +# TYPE polkadot_pvf_execute_enqueued counter +polkadot_pvf_execute_enqueued{chain="rococo_local_testnet"} 1 +# HELP polkadot_pvf_execute_finished The total number of jobs done in the execution pipeline +# TYPE polkadot_pvf_execute_finished counter +polkadot_pvf_execute_finished{chain="rococo_local_testnet"} 1 +# HELP polkadot_pvf_execution_time Time spent in executing PVFs +# TYPE polkadot_pvf_execution_time histogram +polkadot_pvf_execution_time_bucket{chain="rococo_local_testnet",le="0.01"} 0 +polkadot_pvf_execution_time_bucket{chain="rococo_local_testnet",le="0.025"} 1 +polkadot_pvf_execution_time_bucket{chain="rococo_local_testnet",le="0.05"} 1 +polkadot_pvf_execution_time_bucket{chain="rococo_local_testnet",le="0.1"} 1 +polkadot_pvf_execution_time_bucket{chain="rococo_local_testnet",le="0.25"} 1 +polkadot_pvf_execution_time_bucket{chain="rococo_local_testnet",le="0.5"} 1 +polkadot_pvf_execution_time_bucket{chain="rococo_local_testnet",le="1"} 1 +polkadot_pvf_execution_time_bucket{chain="rococo_local_testnet",le="2"} 1 +polkadot_pvf_execution_time_bucket{chain="rococo_local_testnet",le="3"} 1 +polkadot_pvf_execution_time_bucket{chain="rococo_local_testnet",le="4"} 1 +polkadot_pvf_execution_time_bucket{chain="rococo_local_testnet",le="5"} 1 +polkadot_pvf_execution_time_bucket{chain="rococo_local_testnet",le="6"} 1 +polkadot_pvf_execution_time_bucket{chain="rococo_local_testnet",le="8"} 1 +polkadot_pvf_execution_time_bucket{chain="rococo_local_testnet",le="10"} 1 +polkadot_pvf_execution_time_bucket{chain="rococo_local_testnet",le="12"} 1 +polkadot_pvf_execution_time_bucket{chain="rococo_local_testnet",le="+Inf"} 1 +polkadot_pvf_execution_time_sum{chain="rococo_local_testnet"} 0.022490292 +polkadot_pvf_execution_time_count{chain="rococo_local_testnet"} 1 +# HELP polkadot_pvf_precheck_judgement Time between sending the pre-check request to receiving the response. +# TYPE polkadot_pvf_precheck_judgement histogram +polkadot_pvf_precheck_judgement_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_pvf_precheck_judgement_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_pvf_precheck_judgement_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_pvf_precheck_judgement_bucket{chain="rococo_local_testnet",le="10"} 0 +polkadot_pvf_precheck_judgement_bucket{chain="rococo_local_testnet",le="20"} 0 +polkadot_pvf_precheck_judgement_bucket{chain="rococo_local_testnet",le="30"} 0 +polkadot_pvf_precheck_judgement_bucket{chain="rococo_local_testnet",le="40"} 0 +polkadot_pvf_precheck_judgement_bucket{chain="rococo_local_testnet",le="50"} 0 +polkadot_pvf_precheck_judgement_bucket{chain="rococo_local_testnet",le="60"} 0 +polkadot_pvf_precheck_judgement_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +polkadot_pvf_precheck_judgement_sum{chain="rococo_local_testnet"} 0 +polkadot_pvf_precheck_judgement_count{chain="rococo_local_testnet"} 0 +# HELP polkadot_pvf_precheck_pvfs_left The number of PVFs removed from the view. +# TYPE polkadot_pvf_precheck_pvfs_left counter +polkadot_pvf_precheck_pvfs_left{chain="rococo_local_testnet"} 0 +# HELP polkadot_pvf_precheck_pvfs_observed The number of new PVFs observed. +# TYPE polkadot_pvf_precheck_pvfs_observed counter +polkadot_pvf_precheck_pvfs_observed{chain="rococo_local_testnet"} 0 +# HELP polkadot_pvf_precheck_votes_duplicate The number of votes that are submitted more than once for the same code withinthe same session. +# TYPE polkadot_pvf_precheck_votes_duplicate counter +polkadot_pvf_precheck_votes_duplicate{chain="rococo_local_testnet"} 0 +# HELP polkadot_pvf_precheck_votes_started The number of votes that are pending submission +# TYPE polkadot_pvf_precheck_votes_started counter +polkadot_pvf_precheck_votes_started{chain="rococo_local_testnet"} 0 +# HELP polkadot_pvf_precheck_votes_total The total number of votes submitted. +# TYPE polkadot_pvf_precheck_votes_total counter +polkadot_pvf_precheck_votes_total{chain="rococo_local_testnet"} 0 +# HELP polkadot_pvf_preparation_time Time spent in preparing PVF artifacts in seconds +# TYPE polkadot_pvf_preparation_time histogram +polkadot_pvf_preparation_time_bucket{chain="rococo_local_testnet",le="0.1"} 0 +polkadot_pvf_preparation_time_bucket{chain="rococo_local_testnet",le="0.5"} 0 +polkadot_pvf_preparation_time_bucket{chain="rococo_local_testnet",le="1"} 0 +polkadot_pvf_preparation_time_bucket{chain="rococo_local_testnet",le="2"} 0 +polkadot_pvf_preparation_time_bucket{chain="rococo_local_testnet",le="3"} 1 +polkadot_pvf_preparation_time_bucket{chain="rococo_local_testnet",le="10"} 1 +polkadot_pvf_preparation_time_bucket{chain="rococo_local_testnet",le="20"} 1 +polkadot_pvf_preparation_time_bucket{chain="rococo_local_testnet",le="30"} 1 +polkadot_pvf_preparation_time_bucket{chain="rococo_local_testnet",le="60"} 1 +polkadot_pvf_preparation_time_bucket{chain="rococo_local_testnet",le="120"} 1 +polkadot_pvf_preparation_time_bucket{chain="rococo_local_testnet",le="240"} 1 +polkadot_pvf_preparation_time_bucket{chain="rococo_local_testnet",le="360"} 1 +polkadot_pvf_preparation_time_bucket{chain="rococo_local_testnet",le="480"} 1 +polkadot_pvf_preparation_time_bucket{chain="rococo_local_testnet",le="+Inf"} 1 +polkadot_pvf_preparation_time_sum{chain="rococo_local_testnet"} 2.6898820409999997 +polkadot_pvf_preparation_time_count{chain="rococo_local_testnet"} 1 +# HELP polkadot_pvf_prepare_concluded The total number of jobs concluded in the preparation pipeline +# TYPE polkadot_pvf_prepare_concluded counter +polkadot_pvf_prepare_concluded{chain="rococo_local_testnet"} 1 +# HELP polkadot_pvf_prepare_enqueued The total number of jobs enqueued into the preparation pipeline +# TYPE polkadot_pvf_prepare_enqueued counter +polkadot_pvf_prepare_enqueued{chain="rococo_local_testnet"} 1 +# HELP polkadot_pvf_worker_spawned The total number of workers spawned successfully +# TYPE polkadot_pvf_worker_spawned counter +polkadot_pvf_worker_spawned{flavor="execute",chain="rococo_local_testnet"} 1 +polkadot_pvf_worker_spawned{flavor="prepare",chain="rococo_local_testnet"} 1 +# HELP polkadot_pvf_worker_spawning The total number of workers began to spawn +# TYPE polkadot_pvf_worker_spawning counter +polkadot_pvf_worker_spawning{flavor="execute",chain="rococo_local_testnet"} 1 +polkadot_pvf_worker_spawning{flavor="prepare",chain="rococo_local_testnet"} 1 +# HELP substrate_authority_discovery_amount_external_addresses_last_published Number of external addresses published when authority discovery last published addresses. +# TYPE substrate_authority_discovery_amount_external_addresses_last_published gauge +substrate_authority_discovery_amount_external_addresses_last_published{chain="rococo_local_testnet"} 2 +# HELP substrate_authority_discovery_authority_address_requests_pending Number of pending authority address requests. +# TYPE substrate_authority_discovery_authority_address_requests_pending gauge +substrate_authority_discovery_authority_address_requests_pending{chain="rococo_local_testnet"} 0 +# HELP substrate_authority_discovery_authority_addresses_requested_total Number of times authority discovery has requested external addresses of a single authority. +# TYPE substrate_authority_discovery_authority_addresses_requested_total counter +substrate_authority_discovery_authority_addresses_requested_total{chain="rococo_local_testnet"} 5 +# HELP substrate_authority_discovery_dht_event_received Number of dht events received by authority discovery. +# TYPE substrate_authority_discovery_dht_event_received counter +substrate_authority_discovery_dht_event_received{name="value_found",chain="rococo_local_testnet"} 5 +substrate_authority_discovery_dht_event_received{name="value_put_failed",chain="rococo_local_testnet"} 5 +# HELP substrate_authority_discovery_handle_value_found_event_failure Number of times handling a dht value found event failed. +# TYPE substrate_authority_discovery_handle_value_found_event_failure counter +substrate_authority_discovery_handle_value_found_event_failure{chain="rococo_local_testnet"} 0 +# HELP substrate_authority_discovery_known_authorities_count Number of authorities known by authority discovery. +# TYPE substrate_authority_discovery_known_authorities_count gauge +substrate_authority_discovery_known_authorities_count{chain="rococo_local_testnet"} 1 +# HELP substrate_authority_discovery_times_published_total Number of times authority discovery has published external addresses. +# TYPE substrate_authority_discovery_times_published_total counter +substrate_authority_discovery_times_published_total{chain="rococo_local_testnet"} 5 +# HELP substrate_beefy_bad_justification_imports Number of bad justifications on block-import +# TYPE substrate_beefy_bad_justification_imports counter +substrate_beefy_bad_justification_imports{chain="rococo_local_testnet"} 0 +# HELP substrate_beefy_best_block Best block finalized by BEEFY +# TYPE substrate_beefy_best_block gauge +substrate_beefy_best_block{chain="rococo_local_testnet"} 11 +# HELP substrate_beefy_best_voted Best block voted on by BEEFY +# TYPE substrate_beefy_best_voted gauge +substrate_beefy_best_voted{chain="rococo_local_testnet"} 11 +# HELP substrate_beefy_buffered_justifications Number of currently buffered justifications +# TYPE substrate_beefy_buffered_justifications gauge +substrate_beefy_buffered_justifications{chain="rococo_local_testnet"} 0 +# HELP substrate_beefy_buffered_justifications_dropped Number of justifications dropped due to full buffers +# TYPE substrate_beefy_buffered_justifications_dropped counter +substrate_beefy_buffered_justifications_dropped{chain="rococo_local_testnet"} 0 +# HELP substrate_beefy_equivocation_votes Number of equivocation votes received +# TYPE substrate_beefy_equivocation_votes counter +substrate_beefy_equivocation_votes{chain="rococo_local_testnet"} 0 +# HELP substrate_beefy_failed_justification_responses Number of Failed Justification responses +# TYPE substrate_beefy_failed_justification_responses counter +substrate_beefy_failed_justification_responses{chain="rococo_local_testnet"} 0 +# HELP substrate_beefy_good_justification_imports Number of good justifications on block-import +# TYPE substrate_beefy_good_justification_imports counter +substrate_beefy_good_justification_imports{chain="rococo_local_testnet"} 0 +# HELP substrate_beefy_imported_justifications Number of valid justifications successfully imported +# TYPE substrate_beefy_imported_justifications counter +substrate_beefy_imported_justifications{chain="rococo_local_testnet"} 0 +# HELP substrate_beefy_invalid_votes Number of invalid votes received +# TYPE substrate_beefy_invalid_votes counter +substrate_beefy_invalid_votes{chain="rococo_local_testnet"} 0 +# HELP substrate_beefy_lagging_sessions Number of sessions with lagging signed commitment on mandatory block +# TYPE substrate_beefy_lagging_sessions counter +substrate_beefy_lagging_sessions{chain="rococo_local_testnet"} 0 +# HELP substrate_beefy_no_authority_found_in_store Number of times no Authority public key found in store +# TYPE substrate_beefy_no_authority_found_in_store counter +substrate_beefy_no_authority_found_in_store{chain="rococo_local_testnet"} 0 +# HELP substrate_beefy_on_demand_justification_good_proof Number of on-demand justification good proof +# TYPE substrate_beefy_on_demand_justification_good_proof counter +substrate_beefy_on_demand_justification_good_proof{chain="rococo_local_testnet"} 0 +# HELP substrate_beefy_on_demand_justification_invalid_proof Number of on-demand justification invalid proof +# TYPE substrate_beefy_on_demand_justification_invalid_proof counter +substrate_beefy_on_demand_justification_invalid_proof{chain="rococo_local_testnet"} 0 +# HELP substrate_beefy_on_demand_justification_no_peer_to_request_from Number of times there was no good peer to request justification from +# TYPE substrate_beefy_on_demand_justification_no_peer_to_request_from counter +substrate_beefy_on_demand_justification_no_peer_to_request_from{chain="rococo_local_testnet"} 2 +# HELP substrate_beefy_on_demand_justification_peer_error Number of on-demand justification peer error +# TYPE substrate_beefy_on_demand_justification_peer_error counter +substrate_beefy_on_demand_justification_peer_error{chain="rococo_local_testnet"} 0 +# HELP substrate_beefy_should_vote_on Next block, BEEFY should vote on +# TYPE substrate_beefy_should_vote_on gauge +substrate_beefy_should_vote_on{chain="rococo_local_testnet"} 11 +# HELP substrate_beefy_stale_justifications Number of valid but stale justifications received +# TYPE substrate_beefy_stale_justifications counter +substrate_beefy_stale_justifications{chain="rococo_local_testnet"} 0 +# HELP substrate_beefy_stale_votes Number of valid but stale votes received +# TYPE substrate_beefy_stale_votes counter +substrate_beefy_stale_votes{chain="rococo_local_testnet"} 0 +# HELP substrate_beefy_successful_handled_votes Number of good votes successfully handled +# TYPE substrate_beefy_successful_handled_votes counter +substrate_beefy_successful_handled_votes{chain="rococo_local_testnet"} 6 +# HELP substrate_beefy_successful_justification_responses Number of Successful Justification responses +# TYPE substrate_beefy_successful_justification_responses counter +substrate_beefy_successful_justification_responses{chain="rococo_local_testnet"} 0 +# HELP substrate_beefy_validator_set_id Current BEEFY active validator set id. +# TYPE substrate_beefy_validator_set_id gauge +substrate_beefy_validator_set_id{chain="rococo_local_testnet"} 1 +# HELP substrate_beefy_votes_sent Number of votes sent by this node +# TYPE substrate_beefy_votes_sent counter +substrate_beefy_votes_sent{chain="rococo_local_testnet"} 3 +# HELP substrate_block_height Block height info of the chain +# TYPE substrate_block_height gauge +substrate_block_height{status="best",chain="rococo_local_testnet"} 13 +substrate_block_height{status="finalized",chain="rococo_local_testnet"} 10 +substrate_block_height{status="sync_target",chain="rococo_local_testnet"} 13 +# HELP substrate_block_verification_and_import_time Time taken to verify and import blocks +# TYPE substrate_block_verification_and_import_time histogram +substrate_block_verification_and_import_time_bucket{chain="rococo_local_testnet",le="0.005"} 8 +substrate_block_verification_and_import_time_bucket{chain="rococo_local_testnet",le="0.01"} 9 +substrate_block_verification_and_import_time_bucket{chain="rococo_local_testnet",le="0.025"} 9 +substrate_block_verification_and_import_time_bucket{chain="rococo_local_testnet",le="0.05"} 9 +substrate_block_verification_and_import_time_bucket{chain="rococo_local_testnet",le="0.1"} 9 +substrate_block_verification_and_import_time_bucket{chain="rococo_local_testnet",le="0.25"} 9 +substrate_block_verification_and_import_time_bucket{chain="rococo_local_testnet",le="0.5"} 9 +substrate_block_verification_and_import_time_bucket{chain="rococo_local_testnet",le="1"} 9 +substrate_block_verification_and_import_time_bucket{chain="rococo_local_testnet",le="2.5"} 9 +substrate_block_verification_and_import_time_bucket{chain="rococo_local_testnet",le="5"} 9 +substrate_block_verification_and_import_time_bucket{chain="rococo_local_testnet",le="10"} 9 +substrate_block_verification_and_import_time_bucket{chain="rococo_local_testnet",le="+Inf"} 9 +substrate_block_verification_and_import_time_sum{chain="rococo_local_testnet"} 0.032715167 +substrate_block_verification_and_import_time_count{chain="rococo_local_testnet"} 9 +# HELP substrate_block_verification_time Time taken to verify blocks +# TYPE substrate_block_verification_time histogram +substrate_block_verification_time_bucket{result="success",chain="rococo_local_testnet",le="0.005"} 9 +substrate_block_verification_time_bucket{result="success",chain="rococo_local_testnet",le="0.01"} 9 +substrate_block_verification_time_bucket{result="success",chain="rococo_local_testnet",le="0.025"} 9 +substrate_block_verification_time_bucket{result="success",chain="rococo_local_testnet",le="0.05"} 9 +substrate_block_verification_time_bucket{result="success",chain="rococo_local_testnet",le="0.1"} 9 +substrate_block_verification_time_bucket{result="success",chain="rococo_local_testnet",le="0.25"} 9 +substrate_block_verification_time_bucket{result="success",chain="rococo_local_testnet",le="0.5"} 9 +substrate_block_verification_time_bucket{result="success",chain="rococo_local_testnet",le="1"} 9 +substrate_block_verification_time_bucket{result="success",chain="rococo_local_testnet",le="2.5"} 9 +substrate_block_verification_time_bucket{result="success",chain="rococo_local_testnet",le="5"} 9 +substrate_block_verification_time_bucket{result="success",chain="rococo_local_testnet",le="10"} 9 +substrate_block_verification_time_bucket{result="success",chain="rococo_local_testnet",le="+Inf"} 9 +substrate_block_verification_time_sum{result="success",chain="rococo_local_testnet"} 0.00814396 +substrate_block_verification_time_count{result="success",chain="rococo_local_testnet"} 9 +# HELP substrate_build_info A metric with a constant '1' value labeled by name, version +# TYPE substrate_build_info gauge +substrate_build_info{name="bob",version="1.1.0-9b6e241ab63",chain="rococo_local_testnet"} 1 +# HELP substrate_database_cache_bytes RocksDB cache size in bytes +# TYPE substrate_database_cache_bytes gauge +substrate_database_cache_bytes{chain="rococo_local_testnet"} 0 +# HELP substrate_finality_grandpa_communication_gossip_validator_messages Number of messages validated by the finality grandpa gossip validator. +# TYPE substrate_finality_grandpa_communication_gossip_validator_messages counter +substrate_finality_grandpa_communication_gossip_validator_messages{action="discard",message="neighbor",chain="rococo_local_testnet"} 61 +substrate_finality_grandpa_communication_gossip_validator_messages{action="keep",message="vote",chain="rococo_local_testnet"} 38 +substrate_finality_grandpa_communication_gossip_validator_messages{action="process_and_discard",message="commit",chain="rococo_local_testnet"} 7 +# HELP substrate_finality_grandpa_precommits_total Total number of GRANDPA precommits cast locally. +# TYPE substrate_finality_grandpa_precommits_total counter +substrate_finality_grandpa_precommits_total{chain="rococo_local_testnet"} 19 +# HELP substrate_finality_grandpa_prevotes_total Total number of GRANDPA prevotes cast locally. +# TYPE substrate_finality_grandpa_prevotes_total counter +substrate_finality_grandpa_prevotes_total{chain="rococo_local_testnet"} 19 +# HELP substrate_finality_grandpa_round Highest completed GRANDPA round. +# TYPE substrate_finality_grandpa_round gauge +substrate_finality_grandpa_round{chain="rococo_local_testnet"} 19 +# HELP substrate_finality_grandpa_until_imported_waiting_messages_number Number of finality grandpa messages waiting within the until imported queue. +# TYPE substrate_finality_grandpa_until_imported_waiting_messages_number gauge +substrate_finality_grandpa_until_imported_waiting_messages_number{chain="rococo_local_testnet"} 0 +# HELP substrate_import_queue_processed_total Blocks processed by import queue +# TYPE substrate_import_queue_processed_total counter +substrate_import_queue_processed_total{result="success",chain="rococo_local_testnet"} 9 +# HELP substrate_issued_light_requests Number of light client requests that our node has issued. +# TYPE substrate_issued_light_requests counter +substrate_issued_light_requests{chain="rococo_local_testnet"} 0 +# HELP substrate_justification_import_time Time taken to import justifications +# TYPE substrate_justification_import_time histogram +substrate_justification_import_time_bucket{chain="rococo_local_testnet",le="0.005"} 0 +substrate_justification_import_time_bucket{chain="rococo_local_testnet",le="0.01"} 0 +substrate_justification_import_time_bucket{chain="rococo_local_testnet",le="0.025"} 0 +substrate_justification_import_time_bucket{chain="rococo_local_testnet",le="0.05"} 0 +substrate_justification_import_time_bucket{chain="rococo_local_testnet",le="0.1"} 0 +substrate_justification_import_time_bucket{chain="rococo_local_testnet",le="0.25"} 0 +substrate_justification_import_time_bucket{chain="rococo_local_testnet",le="0.5"} 0 +substrate_justification_import_time_bucket{chain="rococo_local_testnet",le="1"} 0 +substrate_justification_import_time_bucket{chain="rococo_local_testnet",le="2.5"} 0 +substrate_justification_import_time_bucket{chain="rococo_local_testnet",le="5"} 0 +substrate_justification_import_time_bucket{chain="rococo_local_testnet",le="10"} 0 +substrate_justification_import_time_bucket{chain="rococo_local_testnet",le="+Inf"} 0 +substrate_justification_import_time_sum{chain="rococo_local_testnet"} 0 +substrate_justification_import_time_count{chain="rococo_local_testnet"} 0 +# HELP substrate_network_gossip_expired_messages_total Number of expired messages by the gossip service. +# TYPE substrate_network_gossip_expired_messages_total counter +substrate_network_gossip_expired_messages_total{chain="rococo_local_testnet"} 113 +# HELP substrate_network_gossip_registered_messages_total Number of registered messages by the gossip service. +# TYPE substrate_network_gossip_registered_messages_total counter +substrate_network_gossip_registered_messages_total{chain="rococo_local_testnet"} 132 +# HELP substrate_node_roles The roles the node is running as +# TYPE substrate_node_roles gauge +substrate_node_roles{chain="rococo_local_testnet"} 4 +# HELP substrate_number_leaves Number of known chain leaves (aka forks) +# TYPE substrate_number_leaves gauge +substrate_number_leaves{chain="rococo_local_testnet"} 1 +# HELP substrate_process_start_time_seconds Number of seconds between the UNIX epoch and the moment the process started +# TYPE substrate_process_start_time_seconds gauge +substrate_process_start_time_seconds{chain="rococo_local_testnet"} 1695903683 +# HELP substrate_proposer_block_constructed Histogram of time taken to construct new block +# TYPE substrate_proposer_block_constructed histogram +substrate_proposer_block_constructed_bucket{chain="rococo_local_testnet",le="0.005"} 5 +substrate_proposer_block_constructed_bucket{chain="rococo_local_testnet",le="0.01"} 5 +substrate_proposer_block_constructed_bucket{chain="rococo_local_testnet",le="0.025"} 5 +substrate_proposer_block_constructed_bucket{chain="rococo_local_testnet",le="0.05"} 5 +substrate_proposer_block_constructed_bucket{chain="rococo_local_testnet",le="0.1"} 5 +substrate_proposer_block_constructed_bucket{chain="rococo_local_testnet",le="0.25"} 5 +substrate_proposer_block_constructed_bucket{chain="rococo_local_testnet",le="0.5"} 5 +substrate_proposer_block_constructed_bucket{chain="rococo_local_testnet",le="1"} 5 +substrate_proposer_block_constructed_bucket{chain="rococo_local_testnet",le="2.5"} 5 +substrate_proposer_block_constructed_bucket{chain="rococo_local_testnet",le="5"} 5 +substrate_proposer_block_constructed_bucket{chain="rococo_local_testnet",le="10"} 5 +substrate_proposer_block_constructed_bucket{chain="rococo_local_testnet",le="+Inf"} 5 +substrate_proposer_block_constructed_sum{chain="rococo_local_testnet"} 0.0070964570000000005 +substrate_proposer_block_constructed_count{chain="rococo_local_testnet"} 5 +# HELP substrate_proposer_block_proposal_time Histogram of time taken to construct a block and prepare it for proposal +# TYPE substrate_proposer_block_proposal_time histogram +substrate_proposer_block_proposal_time_bucket{chain="rococo_local_testnet",le="0.005"} 3 +substrate_proposer_block_proposal_time_bucket{chain="rococo_local_testnet",le="0.01"} 5 +substrate_proposer_block_proposal_time_bucket{chain="rococo_local_testnet",le="0.025"} 5 +substrate_proposer_block_proposal_time_bucket{chain="rococo_local_testnet",le="0.05"} 5 +substrate_proposer_block_proposal_time_bucket{chain="rococo_local_testnet",le="0.1"} 5 +substrate_proposer_block_proposal_time_bucket{chain="rococo_local_testnet",le="0.25"} 5 +substrate_proposer_block_proposal_time_bucket{chain="rococo_local_testnet",le="0.5"} 5 +substrate_proposer_block_proposal_time_bucket{chain="rococo_local_testnet",le="1"} 5 +substrate_proposer_block_proposal_time_bucket{chain="rococo_local_testnet",le="2.5"} 5 +substrate_proposer_block_proposal_time_bucket{chain="rococo_local_testnet",le="5"} 5 +substrate_proposer_block_proposal_time_bucket{chain="rococo_local_testnet",le="10"} 5 +substrate_proposer_block_proposal_time_bucket{chain="rococo_local_testnet",le="+Inf"} 5 +substrate_proposer_block_proposal_time_sum{chain="rococo_local_testnet"} 0.0219325 +substrate_proposer_block_proposal_time_count{chain="rococo_local_testnet"} 5 +# HELP substrate_proposer_create_inherents_time Histogram of time taken to execute create inherents +# TYPE substrate_proposer_create_inherents_time histogram +substrate_proposer_create_inherents_time_bucket{chain="rococo_local_testnet",le="0.005"} 5 +substrate_proposer_create_inherents_time_bucket{chain="rococo_local_testnet",le="0.01"} 5 +substrate_proposer_create_inherents_time_bucket{chain="rococo_local_testnet",le="0.025"} 5 +substrate_proposer_create_inherents_time_bucket{chain="rococo_local_testnet",le="0.05"} 5 +substrate_proposer_create_inherents_time_bucket{chain="rococo_local_testnet",le="0.1"} 5 +substrate_proposer_create_inherents_time_bucket{chain="rococo_local_testnet",le="0.25"} 5 +substrate_proposer_create_inherents_time_bucket{chain="rococo_local_testnet",le="0.5"} 5 +substrate_proposer_create_inherents_time_bucket{chain="rococo_local_testnet",le="1"} 5 +substrate_proposer_create_inherents_time_bucket{chain="rococo_local_testnet",le="2.5"} 5 +substrate_proposer_create_inherents_time_bucket{chain="rococo_local_testnet",le="5"} 5 +substrate_proposer_create_inherents_time_bucket{chain="rococo_local_testnet",le="10"} 5 +substrate_proposer_create_inherents_time_bucket{chain="rococo_local_testnet",le="+Inf"} 5 +substrate_proposer_create_inherents_time_sum{chain="rococo_local_testnet"} 0.003903748 +substrate_proposer_create_inherents_time_count{chain="rococo_local_testnet"} 5 +# HELP substrate_proposer_end_proposal_reason The reason why the block proposing was ended. This doesn't include errors. +# TYPE substrate_proposer_end_proposal_reason counter +substrate_proposer_end_proposal_reason{reason="no_more_transactions",chain="rococo_local_testnet"} 5 +# HELP substrate_proposer_number_of_transactions Number of transactions included in block +# TYPE substrate_proposer_number_of_transactions gauge +substrate_proposer_number_of_transactions{chain="rococo_local_testnet"} 2 +# HELP substrate_ready_transactions_number Number of transactions in the ready queue +# TYPE substrate_ready_transactions_number gauge +substrate_ready_transactions_number{chain="rococo_local_testnet"} 0 +# HELP substrate_rpc_sessions_closed Number of persistent RPC sessions closed +# TYPE substrate_rpc_sessions_closed counter +substrate_rpc_sessions_closed{chain="rococo_local_testnet"} 0 +# HELP substrate_rpc_sessions_opened Number of persistent RPC sessions opened +# TYPE substrate_rpc_sessions_opened counter +substrate_rpc_sessions_opened{chain="rococo_local_testnet"} 0 +# HELP substrate_state_cache_bytes State cache size in bytes +# TYPE substrate_state_cache_bytes gauge +substrate_state_cache_bytes{chain="rococo_local_testnet"} 3416549 +# HELP substrate_sub_libp2p_connections_opened_total Total number of connections opened by direction +# TYPE substrate_sub_libp2p_connections_opened_total counter +substrate_sub_libp2p_connections_opened_total{direction="in",chain="rococo_local_testnet"} 1 +substrate_sub_libp2p_connections_opened_total{direction="out",chain="rococo_local_testnet"} 2 +# HELP substrate_sub_libp2p_distinct_peers_connections_closed_total Total number of connections closed with distinct peers +# TYPE substrate_sub_libp2p_distinct_peers_connections_closed_total counter +substrate_sub_libp2p_distinct_peers_connections_closed_total{chain="rococo_local_testnet"} 0 +# HELP substrate_sub_libp2p_distinct_peers_connections_opened_total Total number of connections opened with distinct peers +# TYPE substrate_sub_libp2p_distinct_peers_connections_opened_total counter +substrate_sub_libp2p_distinct_peers_connections_opened_total{chain="rococo_local_testnet"} 2 +# HELP substrate_sub_libp2p_incoming_connections_handshake_errors_total Total number of incoming connections that have failed during the initial handshake +# TYPE substrate_sub_libp2p_incoming_connections_handshake_errors_total counter +substrate_sub_libp2p_incoming_connections_handshake_errors_total{reason="transport-error",chain="rococo_local_testnet"} 1 +# HELP substrate_sub_libp2p_incoming_connections_total Total number of incoming connections on the listening sockets +# TYPE substrate_sub_libp2p_incoming_connections_total counter +substrate_sub_libp2p_incoming_connections_total{chain="rococo_local_testnet"} 2 +# HELP substrate_sub_libp2p_is_major_syncing Whether the node is performing a major sync or not. +# TYPE substrate_sub_libp2p_is_major_syncing gauge +substrate_sub_libp2p_is_major_syncing{chain="rococo_local_testnet"} 0 +# HELP substrate_sub_libp2p_kademlia_query_duration Duration of Kademlia queries per query type +# TYPE substrate_sub_libp2p_kademlia_query_duration histogram +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-found",chain="rococo_local_testnet",le="0.5"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-found",chain="rococo_local_testnet",le="1"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-found",chain="rococo_local_testnet",le="2"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-found",chain="rococo_local_testnet",le="4"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-found",chain="rococo_local_testnet",le="8"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-found",chain="rococo_local_testnet",le="16"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-found",chain="rococo_local_testnet",le="32"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-found",chain="rococo_local_testnet",le="64"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-found",chain="rococo_local_testnet",le="128"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-found",chain="rococo_local_testnet",le="256"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-found",chain="rococo_local_testnet",le="+Inf"} 5 +substrate_sub_libp2p_kademlia_query_duration_sum{type="value-found",chain="rococo_local_testnet"} 0 +substrate_sub_libp2p_kademlia_query_duration_count{type="value-found",chain="rococo_local_testnet"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-put-failed",chain="rococo_local_testnet",le="0.5"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-put-failed",chain="rococo_local_testnet",le="1"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-put-failed",chain="rococo_local_testnet",le="2"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-put-failed",chain="rococo_local_testnet",le="4"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-put-failed",chain="rococo_local_testnet",le="8"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-put-failed",chain="rococo_local_testnet",le="16"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-put-failed",chain="rococo_local_testnet",le="32"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-put-failed",chain="rococo_local_testnet",le="64"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-put-failed",chain="rococo_local_testnet",le="128"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-put-failed",chain="rococo_local_testnet",le="256"} 5 +substrate_sub_libp2p_kademlia_query_duration_bucket{type="value-put-failed",chain="rococo_local_testnet",le="+Inf"} 5 +substrate_sub_libp2p_kademlia_query_duration_sum{type="value-put-failed",chain="rococo_local_testnet"} 0.025113918 +substrate_sub_libp2p_kademlia_query_duration_count{type="value-put-failed",chain="rococo_local_testnet"} 5 +# HELP substrate_sub_libp2p_kademlia_random_queries_total Number of random Kademlia queries started +# TYPE substrate_sub_libp2p_kademlia_random_queries_total counter +substrate_sub_libp2p_kademlia_random_queries_total{chain="rococo_local_testnet"} 7 +# HELP substrate_sub_libp2p_kademlia_records_count Number of records in the Kademlia records store +# TYPE substrate_sub_libp2p_kademlia_records_count gauge +substrate_sub_libp2p_kademlia_records_count{chain="rococo_local_testnet"} 2 +# HELP substrate_sub_libp2p_kademlia_records_sizes_total Total size of all the records in the Kademlia records store +# TYPE substrate_sub_libp2p_kademlia_records_sizes_total gauge +substrate_sub_libp2p_kademlia_records_sizes_total{chain="rococo_local_testnet"} 507 +# HELP substrate_sub_libp2p_kbuckets_num_nodes Number of nodes per kbucket per Kademlia instance +# TYPE substrate_sub_libp2p_kbuckets_num_nodes gauge +substrate_sub_libp2p_kbuckets_num_nodes{lower_ilog2_bucket_bound="255",chain="rococo_local_testnet"} 2 +# HELP substrate_sub_libp2p_listeners_errors_total Total number of non-fatal errors reported by a listener +# TYPE substrate_sub_libp2p_listeners_errors_total counter +substrate_sub_libp2p_listeners_errors_total{chain="rococo_local_testnet"} 0 +# HELP substrate_sub_libp2p_listeners_local_addresses Number of local addresses we're listening on +# TYPE substrate_sub_libp2p_listeners_local_addresses gauge +substrate_sub_libp2p_listeners_local_addresses{chain="rococo_local_testnet"} 3 +# HELP substrate_sub_libp2p_network_bytes_total Total bandwidth usage +# TYPE substrate_sub_libp2p_network_bytes_total counter +substrate_sub_libp2p_network_bytes_total{direction="in",chain="rococo_local_testnet"} 85440 +substrate_sub_libp2p_network_bytes_total{direction="out",chain="rococo_local_testnet"} 80963 +# HELP substrate_sub_libp2p_notifications_sizes Sizes of the notifications send to and received from all nodes +# TYPE substrate_sub_libp2p_notifications_sizes histogram +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet",le="64"} 0 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet",le="256"} 9 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet",le="1024"} 9 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet",le="4096"} 9 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet",le="16384"} 9 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet",le="65536"} 9 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet",le="262144"} 9 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet",le="1048576"} 9 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet",le="+Inf"} 9 +substrate_sub_libp2p_notifications_sizes_sum{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet"} 1483 +substrate_sub_libp2p_notifications_sizes_count{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet"} 9 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet",le="64"} 61 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet",le="256"} 213 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet",le="1024"} 227 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet",le="4096"} 227 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet",le="16384"} 227 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet",le="65536"} 227 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet",le="262144"} 227 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet",le="1048576"} 227 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet",le="+Inf"} 227 +substrate_sub_libp2p_notifications_sizes_sum{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet"} 28608 +substrate_sub_libp2p_notifications_sizes_count{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet"} 227 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet",le="64"} 25 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet",le="256"} 59 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet",le="1024"} 63 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet",le="4096"} 63 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet",le="16384"} 63 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet",le="65536"} 63 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet",le="262144"} 63 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet",le="1048576"} 63 +substrate_sub_libp2p_notifications_sizes_bucket{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet",le="+Inf"} 63 +substrate_sub_libp2p_notifications_sizes_sum{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet"} 6780 +substrate_sub_libp2p_notifications_sizes_count{direction="in",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet"} 63 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet",le="64"} 0 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet",le="256"} 9 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet",le="1024"} 9 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet",le="4096"} 9 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet",le="16384"} 9 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet",le="65536"} 9 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet",le="262144"} 9 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet",le="1048576"} 9 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet",le="+Inf"} 9 +substrate_sub_libp2p_notifications_sizes_sum{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet"} 1483 +substrate_sub_libp2p_notifications_sizes_count{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet"} 9 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet",le="64"} 62 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet",le="256"} 214 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet",le="1024"} 227 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet",le="4096"} 227 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet",le="16384"} 227 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet",le="65536"} 227 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet",le="262144"} 227 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet",le="1048576"} 227 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet",le="+Inf"} 227 +substrate_sub_libp2p_notifications_sizes_sum{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet"} 28311 +substrate_sub_libp2p_notifications_sizes_count{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet"} 227 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet",le="64"} 25 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet",le="256"} 77 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet",le="1024"} 79 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet",le="4096"} 79 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet",le="16384"} 79 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet",le="65536"} 79 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet",le="262144"} 79 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet",le="1048576"} 79 +substrate_sub_libp2p_notifications_sizes_bucket{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet",le="+Inf"} 79 +substrate_sub_libp2p_notifications_sizes_sum{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet"} 7483 +substrate_sub_libp2p_notifications_sizes_count{direction="out",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet"} 79 +# HELP substrate_sub_libp2p_notifications_streams_opened_total Total number of notification substreams that have been opened +# TYPE substrate_sub_libp2p_notifications_streams_opened_total counter +substrate_sub_libp2p_notifications_streams_opened_total{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet"} 1 +substrate_sub_libp2p_notifications_streams_opened_total{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet"} 2 +substrate_sub_libp2p_notifications_streams_opened_total{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/transactions/1",chain="rococo_local_testnet"} 2 +substrate_sub_libp2p_notifications_streams_opened_total{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet"} 2 +# HELP substrate_sub_libp2p_out_events_events_total Number of broadcast network events that have been sent or received across all channels +# TYPE substrate_sub_libp2p_out_events_events_total counter +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="dht",name="authority-discovery",chain="rococo_local_testnet"} 10 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="dht",name="network-gossip",chain="rococo_local_testnet"} 20 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="dht",name="polkadot-network-bridge",chain="rococo_local_testnet"} 10 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="dht",name="transactions-handler-net",chain="rococo_local_testnet"} 10 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2\"",name="authority-discovery",chain="rococo_local_testnet"} 9 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2\"",name="network-gossip",chain="rococo_local_testnet"} 18 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2\"",name="polkadot-network-bridge",chain="rococo_local_testnet"} 9 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2\"",name="transactions-handler-net",chain="rococo_local_testnet"} 9 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1\"",name="authority-discovery",chain="rococo_local_testnet"} 227 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1\"",name="network-gossip",chain="rococo_local_testnet"} 454 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1\"",name="polkadot-network-bridge",chain="rococo_local_testnet"} 227 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1\"",name="transactions-handler-net",chain="rococo_local_testnet"} 227 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1\"",name="authority-discovery",chain="rococo_local_testnet"} 63 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1\"",name="network-gossip",chain="rococo_local_testnet"} 126 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1\"",name="polkadot-network-bridge",chain="rococo_local_testnet"} 63 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1\"",name="transactions-handler-net",chain="rococo_local_testnet"} 63 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-open-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2\"",name="authority-discovery",chain="rococo_local_testnet"} 1 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-open-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2\"",name="network-gossip",chain="rococo_local_testnet"} 2 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-open-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2\"",name="polkadot-network-bridge",chain="rococo_local_testnet"} 1 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-open-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2\"",name="transactions-handler-net",chain="rococo_local_testnet"} 1 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-open-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1\"",name="authority-discovery",chain="rococo_local_testnet"} 2 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-open-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1\"",name="network-gossip",chain="rococo_local_testnet"} 4 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-open-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1\"",name="polkadot-network-bridge",chain="rococo_local_testnet"} 2 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-open-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1\"",name="transactions-handler-net",chain="rococo_local_testnet"} 2 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-open-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/transactions/1\"",name="authority-discovery",chain="rococo_local_testnet"} 2 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-open-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/transactions/1\"",name="network-gossip",chain="rococo_local_testnet"} 4 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-open-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/transactions/1\"",name="polkadot-network-bridge",chain="rococo_local_testnet"} 2 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-open-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/transactions/1\"",name="transactions-handler-net",chain="rococo_local_testnet"} 2 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-open-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1\"",name="authority-discovery",chain="rococo_local_testnet"} 2 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-open-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1\"",name="network-gossip",chain="rococo_local_testnet"} 4 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-open-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1\"",name="polkadot-network-bridge",chain="rococo_local_testnet"} 2 +substrate_sub_libp2p_out_events_events_total{action="sent",event_name="notif-open-\"/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1\"",name="transactions-handler-net",chain="rococo_local_testnet"} 2 +# HELP substrate_sub_libp2p_out_events_notifications_sizes Size of notification events that have been sent or received across all channels +# TYPE substrate_sub_libp2p_out_events_notifications_sizes counter +substrate_sub_libp2p_out_events_notifications_sizes{action="sent",name="authority-discovery",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet"} 1483 +substrate_sub_libp2p_out_events_notifications_sizes{action="sent",name="authority-discovery",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet"} 28608 +substrate_sub_libp2p_out_events_notifications_sizes{action="sent",name="authority-discovery",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet"} 6780 +substrate_sub_libp2p_out_events_notifications_sizes{action="sent",name="network-gossip",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet"} 2966 +substrate_sub_libp2p_out_events_notifications_sizes{action="sent",name="network-gossip",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet"} 57216 +substrate_sub_libp2p_out_events_notifications_sizes{action="sent",name="network-gossip",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet"} 13560 +substrate_sub_libp2p_out_events_notifications_sizes{action="sent",name="polkadot-network-bridge",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet"} 1483 +substrate_sub_libp2p_out_events_notifications_sizes{action="sent",name="polkadot-network-bridge",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet"} 28608 +substrate_sub_libp2p_out_events_notifications_sizes{action="sent",name="polkadot-network-bridge",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet"} 6780 +substrate_sub_libp2p_out_events_notifications_sizes{action="sent",name="transactions-handler-net",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/beefy/2",chain="rococo_local_testnet"} 1483 +substrate_sub_libp2p_out_events_notifications_sizes{action="sent",name="transactions-handler-net",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/grandpa/1",chain="rococo_local_testnet"} 28608 +substrate_sub_libp2p_out_events_notifications_sizes{action="sent",name="transactions-handler-net",protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/validation/1",chain="rococo_local_testnet"} 6780 +# HELP substrate_sub_libp2p_out_events_num_channels Number of internal active channels that broadcast network events +# TYPE substrate_sub_libp2p_out_events_num_channels gauge +substrate_sub_libp2p_out_events_num_channels{name="authority-discovery",chain="rococo_local_testnet"} 1 +substrate_sub_libp2p_out_events_num_channels{name="network-gossip",chain="rococo_local_testnet"} 2 +substrate_sub_libp2p_out_events_num_channels{name="polkadot-network-bridge",chain="rococo_local_testnet"} 1 +substrate_sub_libp2p_out_events_num_channels{name="transactions-handler-net",chain="rococo_local_testnet"} 1 +# HELP substrate_sub_libp2p_peers_count Number of connected peers +# TYPE substrate_sub_libp2p_peers_count gauge +substrate_sub_libp2p_peers_count{chain="rococo_local_testnet"} 2 +# HELP substrate_sub_libp2p_peerset_num_discovered Number of nodes stored in the peerset manager +# TYPE substrate_sub_libp2p_peerset_num_discovered gauge +substrate_sub_libp2p_peerset_num_discovered{chain="rococo_local_testnet"} 2 +# HELP substrate_sub_libp2p_pending_connections Number of connections in the process of being established +# TYPE substrate_sub_libp2p_pending_connections gauge +substrate_sub_libp2p_pending_connections{chain="rococo_local_testnet"} 0 +# HELP substrate_sub_libp2p_pending_connections_errors_total Total number of pending connection errors +# TYPE substrate_sub_libp2p_pending_connections_errors_total counter +substrate_sub_libp2p_pending_connections_errors_total{reason="limit-reached",chain="rococo_local_testnet"} 2 +# HELP substrate_sub_libp2p_requests_in_success_total For successful incoming requests, time between receiving the request and starting to send the response +# TYPE substrate_sub_libp2p_requests_in_success_total histogram +substrate_sub_libp2p_requests_in_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.001"} 10 +substrate_sub_libp2p_requests_in_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.002"} 10 +substrate_sub_libp2p_requests_in_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.004"} 10 +substrate_sub_libp2p_requests_in_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.008"} 10 +substrate_sub_libp2p_requests_in_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.016"} 10 +substrate_sub_libp2p_requests_in_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.032"} 10 +substrate_sub_libp2p_requests_in_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.064"} 10 +substrate_sub_libp2p_requests_in_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.128"} 10 +substrate_sub_libp2p_requests_in_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.256"} 10 +substrate_sub_libp2p_requests_in_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.512"} 10 +substrate_sub_libp2p_requests_in_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="1.024"} 10 +substrate_sub_libp2p_requests_in_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="2.048"} 10 +substrate_sub_libp2p_requests_in_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="4.096"} 10 +substrate_sub_libp2p_requests_in_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="8.192"} 10 +substrate_sub_libp2p_requests_in_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="16.384"} 10 +substrate_sub_libp2p_requests_in_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="32.768"} 10 +substrate_sub_libp2p_requests_in_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="+Inf"} 10 +substrate_sub_libp2p_requests_in_success_total_sum{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet"} 0.001931374 +substrate_sub_libp2p_requests_in_success_total_count{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet"} 10 +# HELP substrate_sub_libp2p_requests_out_success_total For successful outgoing requests, time between a request's start and finish +# TYPE substrate_sub_libp2p_requests_out_success_total histogram +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_available_data/1",chain="rococo_local_testnet",le="0.001"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_available_data/1",chain="rococo_local_testnet",le="0.002"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_available_data/1",chain="rococo_local_testnet",le="0.004"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_available_data/1",chain="rococo_local_testnet",le="0.008"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_available_data/1",chain="rococo_local_testnet",le="0.016"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_available_data/1",chain="rococo_local_testnet",le="0.032"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_available_data/1",chain="rococo_local_testnet",le="0.064"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_available_data/1",chain="rococo_local_testnet",le="0.128"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_available_data/1",chain="rococo_local_testnet",le="0.256"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_available_data/1",chain="rococo_local_testnet",le="0.512"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_available_data/1",chain="rococo_local_testnet",le="1.024"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_available_data/1",chain="rococo_local_testnet",le="2.048"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_available_data/1",chain="rococo_local_testnet",le="4.096"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_available_data/1",chain="rococo_local_testnet",le="8.192"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_available_data/1",chain="rococo_local_testnet",le="16.384"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_available_data/1",chain="rococo_local_testnet",le="32.768"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_available_data/1",chain="rococo_local_testnet",le="+Inf"} 1 +substrate_sub_libp2p_requests_out_success_total_sum{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_available_data/1",chain="rococo_local_testnet"} 0.000613584 +substrate_sub_libp2p_requests_out_success_total_count{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_available_data/1",chain="rococo_local_testnet"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_chunk/1",chain="rococo_local_testnet",le="0.001"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_chunk/1",chain="rococo_local_testnet",le="0.002"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_chunk/1",chain="rococo_local_testnet",le="0.004"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_chunk/1",chain="rococo_local_testnet",le="0.008"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_chunk/1",chain="rococo_local_testnet",le="0.016"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_chunk/1",chain="rococo_local_testnet",le="0.032"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_chunk/1",chain="rococo_local_testnet",le="0.064"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_chunk/1",chain="rococo_local_testnet",le="0.128"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_chunk/1",chain="rococo_local_testnet",le="0.256"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_chunk/1",chain="rococo_local_testnet",le="0.512"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_chunk/1",chain="rococo_local_testnet",le="1.024"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_chunk/1",chain="rococo_local_testnet",le="2.048"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_chunk/1",chain="rococo_local_testnet",le="4.096"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_chunk/1",chain="rococo_local_testnet",le="8.192"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_chunk/1",chain="rococo_local_testnet",le="16.384"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_chunk/1",chain="rococo_local_testnet",le="32.768"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_chunk/1",chain="rococo_local_testnet",le="+Inf"} 1 +substrate_sub_libp2p_requests_out_success_total_sum{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_chunk/1",chain="rococo_local_testnet"} 0.000818916 +substrate_sub_libp2p_requests_out_success_total_count{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/req_chunk/1",chain="rococo_local_testnet"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.001"} 1 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.002"} 9 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.004"} 9 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.008"} 9 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.016"} 9 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.032"} 9 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.064"} 9 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.128"} 9 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.256"} 9 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="0.512"} 9 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="1.024"} 9 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="2.048"} 9 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="4.096"} 9 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="8.192"} 9 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="16.384"} 9 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="32.768"} 9 +substrate_sub_libp2p_requests_out_success_total_bucket{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet",le="+Inf"} 9 +substrate_sub_libp2p_requests_out_success_total_sum{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet"} 0.010734917 +substrate_sub_libp2p_requests_out_success_total_count{protocol="/cad9c9efb686b2914d6ace9249601264c76d937662d5608479265ea007ae3118/sync/2",chain="rococo_local_testnet"} 9 +# HELP substrate_sub_txpool_block_transactions_pruned Total number of transactions that was requested to be pruned by block events +# TYPE substrate_sub_txpool_block_transactions_pruned counter +substrate_sub_txpool_block_transactions_pruned{chain="rococo_local_testnet"} 26 +# HELP substrate_sub_txpool_block_transactions_resubmitted Total number of transactions that was requested to be resubmitted by block events +# TYPE substrate_sub_txpool_block_transactions_resubmitted counter +substrate_sub_txpool_block_transactions_resubmitted{chain="rococo_local_testnet"} 0 +# HELP substrate_sub_txpool_submitted_transactions Total number of transactions submitted +# TYPE substrate_sub_txpool_submitted_transactions counter +substrate_sub_txpool_submitted_transactions{chain="rococo_local_testnet"} 0 +# HELP substrate_sub_txpool_validations_finished Total number of transactions that finished validation +# TYPE substrate_sub_txpool_validations_finished counter +substrate_sub_txpool_validations_finished{chain="rococo_local_testnet"} 0 +# HELP substrate_sub_txpool_validations_invalid Total number of transactions that were removed from the pool as invalid +# TYPE substrate_sub_txpool_validations_invalid counter +substrate_sub_txpool_validations_invalid{chain="rococo_local_testnet"} 0 +# HELP substrate_sub_txpool_validations_scheduled Total number of transactions scheduled for validation +# TYPE substrate_sub_txpool_validations_scheduled counter +substrate_sub_txpool_validations_scheduled{chain="rococo_local_testnet"} 0 +# HELP substrate_sync_extra_justifications Number of extra justifications requests +# TYPE substrate_sync_extra_justifications gauge +substrate_sync_extra_justifications{status="active",chain="rococo_local_testnet"} 0 +substrate_sync_extra_justifications{status="failed",chain="rococo_local_testnet"} 0 +substrate_sync_extra_justifications{status="importing",chain="rococo_local_testnet"} 0 +substrate_sync_extra_justifications{status="pending",chain="rococo_local_testnet"} 0 +# HELP substrate_sync_fork_targets Number of fork sync targets +# TYPE substrate_sync_fork_targets gauge +substrate_sync_fork_targets{chain="rococo_local_testnet"} 0 +# HELP substrate_sync_import_queue_blocks_submitted Number of blocks submitted to the import queue. +# TYPE substrate_sync_import_queue_blocks_submitted counter +substrate_sync_import_queue_blocks_submitted{chain="rococo_local_testnet"} 9 +# HELP substrate_sync_import_queue_justifications_submitted Number of justifications submitted to the import queue. +# TYPE substrate_sync_import_queue_justifications_submitted counter +substrate_sync_import_queue_justifications_submitted{chain="rococo_local_testnet"} 0 +# HELP substrate_sync_peers Number of peers we sync with +# TYPE substrate_sync_peers gauge +substrate_sync_peers{chain="rococo_local_testnet"} 2 +# HELP substrate_sync_propagated_transactions Number of transactions propagated to at least one peer +# TYPE substrate_sync_propagated_transactions counter +substrate_sync_propagated_transactions{chain="rococo_local_testnet"} 0 +# HELP substrate_sync_queued_blocks Number of blocks in import queue +# TYPE substrate_sync_queued_blocks gauge +substrate_sync_queued_blocks{chain="rococo_local_testnet"} 0 +# HELP substrate_tasks_ended_total Total number of tasks for which Future::poll has returned Ready(()) or panicked +# TYPE substrate_tasks_ended_total counter +substrate_tasks_ended_total{kind="async",reason="finished",task_group="approval-voting",task_name="approval-checks",chain="rococo_local_testnet"} 1 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="authority-discovery",task_name="authority-discovery-worker",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="availability-distribution",task_name="availability-distribution-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="availability-distribution",task_name="chunk-fetcher",chain="rococo_local_testnet"} 1 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="availability-distribution",task_name="chunk-receiver",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="availability-distribution",task_name="pov-receiver",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="availability-recovery",task_name="availability-recovery-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="availability-recovery",task_name="recovery-task",chain="rococo_local_testnet"} 1 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="babe",task_name="babe-worker",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="bitfield-distribution",task_name="bitfield-distribution-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="bitfield-signing",task_name="bitfield-signing-job",chain="rococo_local_testnet"} 14 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="candidate-backing",task_name="candidate-backing-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="candidate-validation",task_name="candidate-validation-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="candidate-validation",task_name="validate-from-exhaustive",chain="rococo_local_testnet"} 1 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="collation-generation",task_name="collation-generation-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="collator-protocol",task_name="collator-protocol-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="default",task_name="import-queue",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="default",task_name="informant",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="default",task_name="prometheus-endpoint",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="default",task_name="storage-monitor",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="default",task_name="telemetry-periodic-send",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="default",task_name="unpin-worker",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="dispute-distribution",task_name="dispute-distribution-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="dispute-distribution",task_name="disputes-receiver",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="dispute-distribution",task_name="get_active_disputes",chain="rococo_local_testnet"} 15 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="gossip-support",task_name="gossip-support-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="networking",task_name="block-request-handler",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="networking",task_name="libp2p-node",chain="rococo_local_testnet"} 8 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="networking",task_name="light-client-request-handler",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="networking",task_name="network-transactions-handler",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="networking",task_name="peer-store",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="networking",task_name="state-request-handler",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="networking",task_name="system-rpc-handler",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="networking",task_name="warp-sync-request-handler",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="offchain-work",task_name="offchain-workers-runner",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="offchain-worker",task_name="offchain-on-block",chain="rococo_local_testnet"} 13 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="overseer",task_name="metrics-metronome",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="prospective-parachains",task_name="prospective-parachains-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="provisioner",task_name="provisioner-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="provisioner",task_name="send-inherent-data",chain="rococo_local_testnet"} 5 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="pvf-checker",task_name="pvf-checker-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="relay-chain-selection",task_name="approval-checking-lag-update",chain="rococo_local_testnet"} 18 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="rpc",task_name="substrate-rpc-subscription",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="statement-distribution",task_name="candidate-responder",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="statement-distribution",task_name="large-statement-responder",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="statement-distribution",task_name="statement-distribution-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="transaction-pool",task_name="on-transaction-imported",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="transaction-pool",task_name="txpool-background",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="async",reason="finished",task_group="transaction-pool",task_name="txpool-notifications",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="approval-distribution",task_name="approval-distribution-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="approval-voting",task_name="approval-voting-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="availability-recovery",task_name="erasure-task",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="availability-store",task_name="availability-store-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="bitfield-signing",task_name="bitfield-signing-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="block-import",task_name="basic-block-import-worker",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="candidate-validation",task_name="pvf-validation-host",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="chain-api",task_name="chain-api-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="chain-selection",task_name="chain-selection-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="default",task_name="babe",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="default",task_name="basic-authorship-proposer",chain="rococo_local_testnet"} 5 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="default",task_name="beefy-gadget",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="default",task_name="grandpa-voter",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="default",task_name="overseer",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="default",task_name="syncing",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="dispute-coordinator",task_name="dispute-coordinator-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="network-bridge-rx",task_name="network-bridge-in-network-worker",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="network-bridge-rx",task_name="network-bridge-rx-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="network-bridge-tx",task_name="network-bridge-tx-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="networking",task_name="chain-sync-network-service-provider",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="networking",task_name="network-worker",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="runtime-api",task_name="polkadot-runtime-api-request",chain="rococo_local_testnet"} 295 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="runtime-api",task_name="runtime-api-subsystem",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="transaction-pool",task_name="transaction-pool-task-0",chain="rococo_local_testnet"} 0 +substrate_tasks_ended_total{kind="blocking",reason="finished",task_group="transaction-pool",task_name="transaction-pool-task-1",chain="rococo_local_testnet"} 0 +# HELP substrate_tasks_polling_duration Duration in seconds of each invocation of Future::poll +# TYPE substrate_tasks_polling_duration histogram +substrate_tasks_polling_duration_bucket{kind="async",task_group="approval-voting",task_name="approval-checks",chain="rococo_local_testnet",le="0.001"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="approval-voting",task_name="approval-checks",chain="rococo_local_testnet",le="0.004"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="approval-voting",task_name="approval-checks",chain="rococo_local_testnet",le="0.016"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="approval-voting",task_name="approval-checks",chain="rococo_local_testnet",le="0.064"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="approval-voting",task_name="approval-checks",chain="rococo_local_testnet",le="0.256"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="approval-voting",task_name="approval-checks",chain="rococo_local_testnet",le="1.024"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="approval-voting",task_name="approval-checks",chain="rococo_local_testnet",le="4.096"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="approval-voting",task_name="approval-checks",chain="rococo_local_testnet",le="16.384"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="approval-voting",task_name="approval-checks",chain="rococo_local_testnet",le="65.536"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="approval-voting",task_name="approval-checks",chain="rococo_local_testnet",le="+Inf"} 3 +substrate_tasks_polling_duration_sum{kind="async",task_group="approval-voting",task_name="approval-checks",chain="rococo_local_testnet"} 0.000017208000000000002 +substrate_tasks_polling_duration_count{kind="async",task_group="approval-voting",task_name="approval-checks",chain="rococo_local_testnet"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="authority-discovery",task_name="authority-discovery-worker",chain="rococo_local_testnet",le="0.001"} 280 +substrate_tasks_polling_duration_bucket{kind="async",task_group="authority-discovery",task_name="authority-discovery-worker",chain="rococo_local_testnet",le="0.004"} 285 +substrate_tasks_polling_duration_bucket{kind="async",task_group="authority-discovery",task_name="authority-discovery-worker",chain="rococo_local_testnet",le="0.016"} 287 +substrate_tasks_polling_duration_bucket{kind="async",task_group="authority-discovery",task_name="authority-discovery-worker",chain="rococo_local_testnet",le="0.064"} 287 +substrate_tasks_polling_duration_bucket{kind="async",task_group="authority-discovery",task_name="authority-discovery-worker",chain="rococo_local_testnet",le="0.256"} 287 +substrate_tasks_polling_duration_bucket{kind="async",task_group="authority-discovery",task_name="authority-discovery-worker",chain="rococo_local_testnet",le="1.024"} 287 +substrate_tasks_polling_duration_bucket{kind="async",task_group="authority-discovery",task_name="authority-discovery-worker",chain="rococo_local_testnet",le="4.096"} 287 +substrate_tasks_polling_duration_bucket{kind="async",task_group="authority-discovery",task_name="authority-discovery-worker",chain="rococo_local_testnet",le="16.384"} 287 +substrate_tasks_polling_duration_bucket{kind="async",task_group="authority-discovery",task_name="authority-discovery-worker",chain="rococo_local_testnet",le="65.536"} 287 +substrate_tasks_polling_duration_bucket{kind="async",task_group="authority-discovery",task_name="authority-discovery-worker",chain="rococo_local_testnet",le="+Inf"} 287 +substrate_tasks_polling_duration_sum{kind="async",task_group="authority-discovery",task_name="authority-discovery-worker",chain="rococo_local_testnet"} 0.03723616999999999 +substrate_tasks_polling_duration_count{kind="async",task_group="authority-discovery",task_name="authority-discovery-worker",chain="rococo_local_testnet"} 287 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="0.001"} 103 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="0.004"} 104 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="0.016"} 104 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="0.064"} 104 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="0.256"} 104 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="1.024"} 104 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="4.096"} 104 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="16.384"} 104 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="65.536"} 104 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="availability-distribution-subsystem",chain="rococo_local_testnet",le="+Inf"} 104 +substrate_tasks_polling_duration_sum{kind="async",task_group="availability-distribution",task_name="availability-distribution-subsystem",chain="rococo_local_testnet"} 0.0032152910000000003 +substrate_tasks_polling_duration_count{kind="async",task_group="availability-distribution",task_name="availability-distribution-subsystem",chain="rococo_local_testnet"} 104 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-fetcher",chain="rococo_local_testnet",le="0.001"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-fetcher",chain="rococo_local_testnet",le="0.004"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-fetcher",chain="rococo_local_testnet",le="0.016"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-fetcher",chain="rococo_local_testnet",le="0.064"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-fetcher",chain="rococo_local_testnet",le="0.256"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-fetcher",chain="rococo_local_testnet",le="1.024"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-fetcher",chain="rococo_local_testnet",le="4.096"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-fetcher",chain="rococo_local_testnet",le="16.384"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-fetcher",chain="rococo_local_testnet",le="65.536"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-fetcher",chain="rococo_local_testnet",le="+Inf"} 3 +substrate_tasks_polling_duration_sum{kind="async",task_group="availability-distribution",task_name="chunk-fetcher",chain="rococo_local_testnet"} 0.000225125 +substrate_tasks_polling_duration_count{kind="async",task_group="availability-distribution",task_name="chunk-fetcher",chain="rococo_local_testnet"} 3 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-receiver",chain="rococo_local_testnet",le="0.001"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-receiver",chain="rococo_local_testnet",le="0.004"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-receiver",chain="rococo_local_testnet",le="0.016"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-receiver",chain="rococo_local_testnet",le="0.064"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-receiver",chain="rococo_local_testnet",le="0.256"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-receiver",chain="rococo_local_testnet",le="1.024"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-receiver",chain="rococo_local_testnet",le="4.096"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-receiver",chain="rococo_local_testnet",le="16.384"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-receiver",chain="rococo_local_testnet",le="65.536"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="chunk-receiver",chain="rococo_local_testnet",le="+Inf"} 1 +substrate_tasks_polling_duration_sum{kind="async",task_group="availability-distribution",task_name="chunk-receiver",chain="rococo_local_testnet"} 0.000002375 +substrate_tasks_polling_duration_count{kind="async",task_group="availability-distribution",task_name="chunk-receiver",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="pov-receiver",chain="rococo_local_testnet",le="0.001"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="pov-receiver",chain="rococo_local_testnet",le="0.004"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="pov-receiver",chain="rococo_local_testnet",le="0.016"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="pov-receiver",chain="rococo_local_testnet",le="0.064"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="pov-receiver",chain="rococo_local_testnet",le="0.256"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="pov-receiver",chain="rococo_local_testnet",le="1.024"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="pov-receiver",chain="rococo_local_testnet",le="4.096"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="pov-receiver",chain="rococo_local_testnet",le="16.384"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="pov-receiver",chain="rococo_local_testnet",le="65.536"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-distribution",task_name="pov-receiver",chain="rococo_local_testnet",le="+Inf"} 1 +substrate_tasks_polling_duration_sum{kind="async",task_group="availability-distribution",task_name="pov-receiver",chain="rococo_local_testnet"} 0.000003458 +substrate_tasks_polling_duration_count{kind="async",task_group="availability-distribution",task_name="pov-receiver",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="0.001"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="0.004"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="0.016"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="0.064"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="0.256"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="1.024"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="4.096"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="16.384"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="65.536"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="availability-recovery-subsystem",chain="rococo_local_testnet",le="+Inf"} 35 +substrate_tasks_polling_duration_sum{kind="async",task_group="availability-recovery",task_name="availability-recovery-subsystem",chain="rococo_local_testnet"} 0.0006545420000000001 +substrate_tasks_polling_duration_count{kind="async",task_group="availability-recovery",task_name="availability-recovery-subsystem",chain="rococo_local_testnet"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="recovery-task",chain="rococo_local_testnet",le="0.001"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="recovery-task",chain="rococo_local_testnet",le="0.004"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="recovery-task",chain="rococo_local_testnet",le="0.016"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="recovery-task",chain="rococo_local_testnet",le="0.064"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="recovery-task",chain="rococo_local_testnet",le="0.256"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="recovery-task",chain="rococo_local_testnet",le="1.024"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="recovery-task",chain="rococo_local_testnet",le="4.096"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="recovery-task",chain="rococo_local_testnet",le="16.384"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="recovery-task",chain="rococo_local_testnet",le="65.536"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="availability-recovery",task_name="recovery-task",chain="rococo_local_testnet",le="+Inf"} 4 +substrate_tasks_polling_duration_sum{kind="async",task_group="availability-recovery",task_name="recovery-task",chain="rococo_local_testnet"} 0.000225584 +substrate_tasks_polling_duration_count{kind="async",task_group="availability-recovery",task_name="recovery-task",chain="rococo_local_testnet"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="babe",task_name="babe-worker",chain="rococo_local_testnet",le="0.001"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="babe",task_name="babe-worker",chain="rococo_local_testnet",le="0.004"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="babe",task_name="babe-worker",chain="rococo_local_testnet",le="0.016"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="babe",task_name="babe-worker",chain="rococo_local_testnet",le="0.064"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="babe",task_name="babe-worker",chain="rococo_local_testnet",le="0.256"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="babe",task_name="babe-worker",chain="rococo_local_testnet",le="1.024"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="babe",task_name="babe-worker",chain="rococo_local_testnet",le="4.096"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="babe",task_name="babe-worker",chain="rococo_local_testnet",le="16.384"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="babe",task_name="babe-worker",chain="rococo_local_testnet",le="65.536"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="babe",task_name="babe-worker",chain="rococo_local_testnet",le="+Inf"} 1 +substrate_tasks_polling_duration_sum{kind="async",task_group="babe",task_name="babe-worker",chain="rococo_local_testnet"} 0.000004084 +substrate_tasks_polling_duration_count{kind="async",task_group="babe",task_name="babe-worker",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-distribution",task_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="0.001"} 132 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-distribution",task_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="0.004"} 132 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-distribution",task_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="0.016"} 132 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-distribution",task_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="0.064"} 132 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-distribution",task_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="0.256"} 132 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-distribution",task_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="1.024"} 132 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-distribution",task_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="4.096"} 132 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-distribution",task_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="16.384"} 132 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-distribution",task_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="65.536"} 132 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-distribution",task_name="bitfield-distribution-subsystem",chain="rococo_local_testnet",le="+Inf"} 132 +substrate_tasks_polling_duration_sum{kind="async",task_group="bitfield-distribution",task_name="bitfield-distribution-subsystem",chain="rococo_local_testnet"} 0.006397163000000003 +substrate_tasks_polling_duration_count{kind="async",task_group="bitfield-distribution",task_name="bitfield-distribution-subsystem",chain="rococo_local_testnet"} 132 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-signing",task_name="bitfield-signing-job",chain="rococo_local_testnet",le="0.001"} 47 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-signing",task_name="bitfield-signing-job",chain="rococo_local_testnet",le="0.004"} 53 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-signing",task_name="bitfield-signing-job",chain="rococo_local_testnet",le="0.016"} 61 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-signing",task_name="bitfield-signing-job",chain="rococo_local_testnet",le="0.064"} 61 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-signing",task_name="bitfield-signing-job",chain="rococo_local_testnet",le="0.256"} 61 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-signing",task_name="bitfield-signing-job",chain="rococo_local_testnet",le="1.024"} 61 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-signing",task_name="bitfield-signing-job",chain="rococo_local_testnet",le="4.096"} 61 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-signing",task_name="bitfield-signing-job",chain="rococo_local_testnet",le="16.384"} 61 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-signing",task_name="bitfield-signing-job",chain="rococo_local_testnet",le="65.536"} 61 +substrate_tasks_polling_duration_bucket{kind="async",task_group="bitfield-signing",task_name="bitfield-signing-job",chain="rococo_local_testnet",le="+Inf"} 61 +substrate_tasks_polling_duration_sum{kind="async",task_group="bitfield-signing",task_name="bitfield-signing-job",chain="rococo_local_testnet"} 0.07094437099999998 +substrate_tasks_polling_duration_count{kind="async",task_group="bitfield-signing",task_name="bitfield-signing-job",chain="rococo_local_testnet"} 61 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-backing",task_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="0.001"} 99 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-backing",task_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="0.004"} 99 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-backing",task_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="0.016"} 99 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-backing",task_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="0.064"} 99 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-backing",task_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="0.256"} 99 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-backing",task_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="1.024"} 99 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-backing",task_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="4.096"} 99 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-backing",task_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="16.384"} 99 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-backing",task_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="65.536"} 99 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-backing",task_name="candidate-backing-subsystem",chain="rococo_local_testnet",le="+Inf"} 99 +substrate_tasks_polling_duration_sum{kind="async",task_group="candidate-backing",task_name="candidate-backing-subsystem",chain="rococo_local_testnet"} 0.0027086280000000007 +substrate_tasks_polling_duration_count{kind="async",task_group="candidate-backing",task_name="candidate-backing-subsystem",chain="rococo_local_testnet"} 99 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="0.001"} 28 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="0.004"} 28 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="0.016"} 28 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="0.064"} 28 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="0.256"} 28 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="1.024"} 28 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="4.096"} 28 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="16.384"} 28 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="65.536"} 28 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="candidate-validation-subsystem",chain="rococo_local_testnet",le="+Inf"} 28 +substrate_tasks_polling_duration_sum{kind="async",task_group="candidate-validation",task_name="candidate-validation-subsystem",chain="rococo_local_testnet"} 0.0005377079999999998 +substrate_tasks_polling_duration_count{kind="async",task_group="candidate-validation",task_name="candidate-validation-subsystem",chain="rococo_local_testnet"} 28 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="validate-from-exhaustive",chain="rococo_local_testnet",le="0.001"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="validate-from-exhaustive",chain="rococo_local_testnet",le="0.004"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="validate-from-exhaustive",chain="rococo_local_testnet",le="0.016"} 2 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="validate-from-exhaustive",chain="rococo_local_testnet",le="0.064"} 2 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="validate-from-exhaustive",chain="rococo_local_testnet",le="0.256"} 2 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="validate-from-exhaustive",chain="rococo_local_testnet",le="1.024"} 2 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="validate-from-exhaustive",chain="rococo_local_testnet",le="4.096"} 2 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="validate-from-exhaustive",chain="rococo_local_testnet",le="16.384"} 2 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="validate-from-exhaustive",chain="rococo_local_testnet",le="65.536"} 2 +substrate_tasks_polling_duration_bucket{kind="async",task_group="candidate-validation",task_name="validate-from-exhaustive",chain="rococo_local_testnet",le="+Inf"} 2 +substrate_tasks_polling_duration_sum{kind="async",task_group="candidate-validation",task_name="validate-from-exhaustive",chain="rococo_local_testnet"} 0.010472166 +substrate_tasks_polling_duration_count{kind="async",task_group="candidate-validation",task_name="validate-from-exhaustive",chain="rococo_local_testnet"} 2 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collation-generation",task_name="collation-generation-subsystem",chain="rococo_local_testnet",le="0.001"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collation-generation",task_name="collation-generation-subsystem",chain="rococo_local_testnet",le="0.004"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collation-generation",task_name="collation-generation-subsystem",chain="rococo_local_testnet",le="0.016"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collation-generation",task_name="collation-generation-subsystem",chain="rococo_local_testnet",le="0.064"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collation-generation",task_name="collation-generation-subsystem",chain="rococo_local_testnet",le="0.256"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collation-generation",task_name="collation-generation-subsystem",chain="rococo_local_testnet",le="1.024"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collation-generation",task_name="collation-generation-subsystem",chain="rococo_local_testnet",le="4.096"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collation-generation",task_name="collation-generation-subsystem",chain="rococo_local_testnet",le="16.384"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collation-generation",task_name="collation-generation-subsystem",chain="rococo_local_testnet",le="65.536"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collation-generation",task_name="collation-generation-subsystem",chain="rococo_local_testnet",le="+Inf"} 26 +substrate_tasks_polling_duration_sum{kind="async",task_group="collation-generation",task_name="collation-generation-subsystem",chain="rococo_local_testnet"} 0.000247082 +substrate_tasks_polling_duration_count{kind="async",task_group="collation-generation",task_name="collation-generation-subsystem",chain="rococo_local_testnet"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collator-protocol",task_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="0.001"} 175 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collator-protocol",task_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="0.004"} 175 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collator-protocol",task_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="0.016"} 175 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collator-protocol",task_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="0.064"} 175 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collator-protocol",task_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="0.256"} 175 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collator-protocol",task_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="1.024"} 175 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collator-protocol",task_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="4.096"} 175 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collator-protocol",task_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="16.384"} 175 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collator-protocol",task_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="65.536"} 175 +substrate_tasks_polling_duration_bucket{kind="async",task_group="collator-protocol",task_name="collator-protocol-subsystem",chain="rococo_local_testnet",le="+Inf"} 175 +substrate_tasks_polling_duration_sum{kind="async",task_group="collator-protocol",task_name="collator-protocol-subsystem",chain="rococo_local_testnet"} 0.007036749000000001 +substrate_tasks_polling_duration_count{kind="async",task_group="collator-protocol",task_name="collator-protocol-subsystem",chain="rococo_local_testnet"} 175 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="import-queue",chain="rococo_local_testnet",le="0.001"} 10 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="import-queue",chain="rococo_local_testnet",le="0.004"} 10 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="import-queue",chain="rococo_local_testnet",le="0.016"} 10 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="import-queue",chain="rococo_local_testnet",le="0.064"} 10 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="import-queue",chain="rococo_local_testnet",le="0.256"} 10 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="import-queue",chain="rococo_local_testnet",le="1.024"} 10 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="import-queue",chain="rococo_local_testnet",le="4.096"} 10 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="import-queue",chain="rococo_local_testnet",le="16.384"} 10 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="import-queue",chain="rococo_local_testnet",le="65.536"} 10 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="import-queue",chain="rococo_local_testnet",le="+Inf"} 10 +substrate_tasks_polling_duration_sum{kind="async",task_group="default",task_name="import-queue",chain="rococo_local_testnet"} 0.000058042 +substrate_tasks_polling_duration_count{kind="async",task_group="default",task_name="import-queue",chain="rococo_local_testnet"} 10 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="informant",chain="rococo_local_testnet",le="0.001"} 60 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="informant",chain="rococo_local_testnet",le="0.004"} 60 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="informant",chain="rococo_local_testnet",le="0.016"} 60 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="informant",chain="rococo_local_testnet",le="0.064"} 60 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="informant",chain="rococo_local_testnet",le="0.256"} 60 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="informant",chain="rococo_local_testnet",le="1.024"} 60 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="informant",chain="rococo_local_testnet",le="4.096"} 60 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="informant",chain="rococo_local_testnet",le="16.384"} 60 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="informant",chain="rococo_local_testnet",le="65.536"} 60 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="informant",chain="rococo_local_testnet",le="+Inf"} 60 +substrate_tasks_polling_duration_sum{kind="async",task_group="default",task_name="informant",chain="rococo_local_testnet"} 0.0036650799999999994 +substrate_tasks_polling_duration_count{kind="async",task_group="default",task_name="informant",chain="rococo_local_testnet"} 60 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="prometheus-endpoint",chain="rococo_local_testnet",le="0.001"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="prometheus-endpoint",chain="rococo_local_testnet",le="0.004"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="prometheus-endpoint",chain="rococo_local_testnet",le="0.016"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="prometheus-endpoint",chain="rococo_local_testnet",le="0.064"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="prometheus-endpoint",chain="rococo_local_testnet",le="0.256"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="prometheus-endpoint",chain="rococo_local_testnet",le="1.024"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="prometheus-endpoint",chain="rococo_local_testnet",le="4.096"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="prometheus-endpoint",chain="rococo_local_testnet",le="16.384"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="prometheus-endpoint",chain="rococo_local_testnet",le="65.536"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="prometheus-endpoint",chain="rococo_local_testnet",le="+Inf"} 4 +substrate_tasks_polling_duration_sum{kind="async",task_group="default",task_name="prometheus-endpoint",chain="rococo_local_testnet"} 0.000294501 +substrate_tasks_polling_duration_count{kind="async",task_group="default",task_name="prometheus-endpoint",chain="rococo_local_testnet"} 4 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="storage-monitor",chain="rococo_local_testnet",le="0.001"} 16 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="storage-monitor",chain="rococo_local_testnet",le="0.004"} 16 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="storage-monitor",chain="rococo_local_testnet",le="0.016"} 16 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="storage-monitor",chain="rococo_local_testnet",le="0.064"} 16 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="storage-monitor",chain="rococo_local_testnet",le="0.256"} 16 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="storage-monitor",chain="rococo_local_testnet",le="1.024"} 16 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="storage-monitor",chain="rococo_local_testnet",le="4.096"} 16 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="storage-monitor",chain="rococo_local_testnet",le="16.384"} 16 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="storage-monitor",chain="rococo_local_testnet",le="65.536"} 16 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="storage-monitor",chain="rococo_local_testnet",le="+Inf"} 16 +substrate_tasks_polling_duration_sum{kind="async",task_group="default",task_name="storage-monitor",chain="rococo_local_testnet"} 0.0031815420000000003 +substrate_tasks_polling_duration_count{kind="async",task_group="default",task_name="storage-monitor",chain="rococo_local_testnet"} 16 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="telemetry-periodic-send",chain="rococo_local_testnet",le="0.001"} 49 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="telemetry-periodic-send",chain="rococo_local_testnet",le="0.004"} 49 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="telemetry-periodic-send",chain="rococo_local_testnet",le="0.016"} 49 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="telemetry-periodic-send",chain="rococo_local_testnet",le="0.064"} 49 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="telemetry-periodic-send",chain="rococo_local_testnet",le="0.256"} 49 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="telemetry-periodic-send",chain="rococo_local_testnet",le="1.024"} 49 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="telemetry-periodic-send",chain="rococo_local_testnet",le="4.096"} 49 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="telemetry-periodic-send",chain="rococo_local_testnet",le="16.384"} 49 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="telemetry-periodic-send",chain="rococo_local_testnet",le="65.536"} 49 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="telemetry-periodic-send",chain="rococo_local_testnet",le="+Inf"} 49 +substrate_tasks_polling_duration_sum{kind="async",task_group="default",task_name="telemetry-periodic-send",chain="rococo_local_testnet"} 0.000933792 +substrate_tasks_polling_duration_count{kind="async",task_group="default",task_name="telemetry-periodic-send",chain="rococo_local_testnet"} 49 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="unpin-worker",chain="rococo_local_testnet",le="0.001"} 23 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="unpin-worker",chain="rococo_local_testnet",le="0.004"} 23 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="unpin-worker",chain="rococo_local_testnet",le="0.016"} 23 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="unpin-worker",chain="rococo_local_testnet",le="0.064"} 23 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="unpin-worker",chain="rococo_local_testnet",le="0.256"} 23 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="unpin-worker",chain="rococo_local_testnet",le="1.024"} 23 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="unpin-worker",chain="rococo_local_testnet",le="4.096"} 23 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="unpin-worker",chain="rococo_local_testnet",le="16.384"} 23 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="unpin-worker",chain="rococo_local_testnet",le="65.536"} 23 +substrate_tasks_polling_duration_bucket{kind="async",task_group="default",task_name="unpin-worker",chain="rococo_local_testnet",le="+Inf"} 23 +substrate_tasks_polling_duration_sum{kind="async",task_group="default",task_name="unpin-worker",chain="rococo_local_testnet"} 0.0005380010000000001 +substrate_tasks_polling_duration_count{kind="async",task_group="default",task_name="unpin-worker",chain="rococo_local_testnet"} 23 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="0.001"} 58 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="0.004"} 59 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="0.016"} 59 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="0.064"} 59 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="0.256"} 59 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="1.024"} 59 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="4.096"} 59 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="16.384"} 59 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="65.536"} 59 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="dispute-distribution-subsystem",chain="rococo_local_testnet",le="+Inf"} 59 +substrate_tasks_polling_duration_sum{kind="async",task_group="dispute-distribution",task_name="dispute-distribution-subsystem",chain="rococo_local_testnet"} 0.0026113700000000004 +substrate_tasks_polling_duration_count{kind="async",task_group="dispute-distribution",task_name="dispute-distribution-subsystem",chain="rococo_local_testnet"} 59 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="disputes-receiver",chain="rococo_local_testnet",le="0.001"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="disputes-receiver",chain="rococo_local_testnet",le="0.004"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="disputes-receiver",chain="rococo_local_testnet",le="0.016"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="disputes-receiver",chain="rococo_local_testnet",le="0.064"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="disputes-receiver",chain="rococo_local_testnet",le="0.256"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="disputes-receiver",chain="rococo_local_testnet",le="1.024"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="disputes-receiver",chain="rococo_local_testnet",le="4.096"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="disputes-receiver",chain="rococo_local_testnet",le="16.384"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="disputes-receiver",chain="rococo_local_testnet",le="65.536"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="disputes-receiver",chain="rococo_local_testnet",le="+Inf"} 1 +substrate_tasks_polling_duration_sum{kind="async",task_group="dispute-distribution",task_name="disputes-receiver",chain="rococo_local_testnet"} 0.000002083 +substrate_tasks_polling_duration_count{kind="async",task_group="dispute-distribution",task_name="disputes-receiver",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="get_active_disputes",chain="rococo_local_testnet",le="0.001"} 30 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="get_active_disputes",chain="rococo_local_testnet",le="0.004"} 30 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="get_active_disputes",chain="rococo_local_testnet",le="0.016"} 30 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="get_active_disputes",chain="rococo_local_testnet",le="0.064"} 30 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="get_active_disputes",chain="rococo_local_testnet",le="0.256"} 30 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="get_active_disputes",chain="rococo_local_testnet",le="1.024"} 30 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="get_active_disputes",chain="rococo_local_testnet",le="4.096"} 30 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="get_active_disputes",chain="rococo_local_testnet",le="16.384"} 30 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="get_active_disputes",chain="rococo_local_testnet",le="65.536"} 30 +substrate_tasks_polling_duration_bucket{kind="async",task_group="dispute-distribution",task_name="get_active_disputes",chain="rococo_local_testnet",le="+Inf"} 30 +substrate_tasks_polling_duration_sum{kind="async",task_group="dispute-distribution",task_name="get_active_disputes",chain="rococo_local_testnet"} 0.00036145800000000005 +substrate_tasks_polling_duration_count{kind="async",task_group="dispute-distribution",task_name="get_active_disputes",chain="rococo_local_testnet"} 30 +substrate_tasks_polling_duration_bucket{kind="async",task_group="gossip-support",task_name="gossip-support-subsystem",chain="rococo_local_testnet",le="0.001"} 93 +substrate_tasks_polling_duration_bucket{kind="async",task_group="gossip-support",task_name="gossip-support-subsystem",chain="rococo_local_testnet",le="0.004"} 93 +substrate_tasks_polling_duration_bucket{kind="async",task_group="gossip-support",task_name="gossip-support-subsystem",chain="rococo_local_testnet",le="0.016"} 93 +substrate_tasks_polling_duration_bucket{kind="async",task_group="gossip-support",task_name="gossip-support-subsystem",chain="rococo_local_testnet",le="0.064"} 93 +substrate_tasks_polling_duration_bucket{kind="async",task_group="gossip-support",task_name="gossip-support-subsystem",chain="rococo_local_testnet",le="0.256"} 93 +substrate_tasks_polling_duration_bucket{kind="async",task_group="gossip-support",task_name="gossip-support-subsystem",chain="rococo_local_testnet",le="1.024"} 93 +substrate_tasks_polling_duration_bucket{kind="async",task_group="gossip-support",task_name="gossip-support-subsystem",chain="rococo_local_testnet",le="4.096"} 93 +substrate_tasks_polling_duration_bucket{kind="async",task_group="gossip-support",task_name="gossip-support-subsystem",chain="rococo_local_testnet",le="16.384"} 93 +substrate_tasks_polling_duration_bucket{kind="async",task_group="gossip-support",task_name="gossip-support-subsystem",chain="rococo_local_testnet",le="65.536"} 93 +substrate_tasks_polling_duration_bucket{kind="async",task_group="gossip-support",task_name="gossip-support-subsystem",chain="rococo_local_testnet",le="+Inf"} 93 +substrate_tasks_polling_duration_sum{kind="async",task_group="gossip-support",task_name="gossip-support-subsystem",chain="rococo_local_testnet"} 0.0009776280000000001 +substrate_tasks_polling_duration_count{kind="async",task_group="gossip-support",task_name="gossip-support-subsystem",chain="rococo_local_testnet"} 93 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="block-request-handler",chain="rococo_local_testnet",le="0.001"} 9 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="block-request-handler",chain="rococo_local_testnet",le="0.004"} 9 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="block-request-handler",chain="rococo_local_testnet",le="0.016"} 9 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="block-request-handler",chain="rococo_local_testnet",le="0.064"} 9 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="block-request-handler",chain="rococo_local_testnet",le="0.256"} 9 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="block-request-handler",chain="rococo_local_testnet",le="1.024"} 9 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="block-request-handler",chain="rococo_local_testnet",le="4.096"} 9 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="block-request-handler",chain="rococo_local_testnet",le="16.384"} 9 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="block-request-handler",chain="rococo_local_testnet",le="65.536"} 9 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="block-request-handler",chain="rococo_local_testnet",le="+Inf"} 9 +substrate_tasks_polling_duration_sum{kind="async",task_group="networking",task_name="block-request-handler",chain="rococo_local_testnet"} 0.00038583400000000005 +substrate_tasks_polling_duration_count{kind="async",task_group="networking",task_name="block-request-handler",chain="rococo_local_testnet"} 9 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="libp2p-node",chain="rococo_local_testnet",le="0.001"} 2497 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="libp2p-node",chain="rococo_local_testnet",le="0.004"} 2504 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="libp2p-node",chain="rococo_local_testnet",le="0.016"} 2508 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="libp2p-node",chain="rococo_local_testnet",le="0.064"} 2508 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="libp2p-node",chain="rococo_local_testnet",le="0.256"} 2508 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="libp2p-node",chain="rococo_local_testnet",le="1.024"} 2508 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="libp2p-node",chain="rococo_local_testnet",le="4.096"} 2508 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="libp2p-node",chain="rococo_local_testnet",le="16.384"} 2508 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="libp2p-node",chain="rococo_local_testnet",le="65.536"} 2508 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="libp2p-node",chain="rococo_local_testnet",le="+Inf"} 2508 +substrate_tasks_polling_duration_sum{kind="async",task_group="networking",task_name="libp2p-node",chain="rococo_local_testnet"} 0.15900465800000002 +substrate_tasks_polling_duration_count{kind="async",task_group="networking",task_name="libp2p-node",chain="rococo_local_testnet"} 2508 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="light-client-request-handler",chain="rococo_local_testnet",le="0.001"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="light-client-request-handler",chain="rococo_local_testnet",le="0.004"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="light-client-request-handler",chain="rococo_local_testnet",le="0.016"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="light-client-request-handler",chain="rococo_local_testnet",le="0.064"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="light-client-request-handler",chain="rococo_local_testnet",le="0.256"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="light-client-request-handler",chain="rococo_local_testnet",le="1.024"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="light-client-request-handler",chain="rococo_local_testnet",le="4.096"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="light-client-request-handler",chain="rococo_local_testnet",le="16.384"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="light-client-request-handler",chain="rococo_local_testnet",le="65.536"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="light-client-request-handler",chain="rococo_local_testnet",le="+Inf"} 1 +substrate_tasks_polling_duration_sum{kind="async",task_group="networking",task_name="light-client-request-handler",chain="rococo_local_testnet"} 0.000000333 +substrate_tasks_polling_duration_count{kind="async",task_group="networking",task_name="light-client-request-handler",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="network-transactions-handler",chain="rococo_local_testnet",le="0.001"} 303 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="network-transactions-handler",chain="rococo_local_testnet",le="0.004"} 303 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="network-transactions-handler",chain="rococo_local_testnet",le="0.016"} 303 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="network-transactions-handler",chain="rococo_local_testnet",le="0.064"} 303 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="network-transactions-handler",chain="rococo_local_testnet",le="0.256"} 303 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="network-transactions-handler",chain="rococo_local_testnet",le="1.024"} 303 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="network-transactions-handler",chain="rococo_local_testnet",le="4.096"} 303 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="network-transactions-handler",chain="rococo_local_testnet",le="16.384"} 303 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="network-transactions-handler",chain="rococo_local_testnet",le="65.536"} 303 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="network-transactions-handler",chain="rococo_local_testnet",le="+Inf"} 303 +substrate_tasks_polling_duration_sum{kind="async",task_group="networking",task_name="network-transactions-handler",chain="rococo_local_testnet"} 0.003942080999999999 +substrate_tasks_polling_duration_count{kind="async",task_group="networking",task_name="network-transactions-handler",chain="rococo_local_testnet"} 303 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="peer-store",chain="rococo_local_testnet",le="0.001"} 78 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="peer-store",chain="rococo_local_testnet",le="0.004"} 78 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="peer-store",chain="rococo_local_testnet",le="0.016"} 78 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="peer-store",chain="rococo_local_testnet",le="0.064"} 78 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="peer-store",chain="rococo_local_testnet",le="0.256"} 78 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="peer-store",chain="rococo_local_testnet",le="1.024"} 78 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="peer-store",chain="rococo_local_testnet",le="4.096"} 78 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="peer-store",chain="rococo_local_testnet",le="16.384"} 78 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="peer-store",chain="rococo_local_testnet",le="65.536"} 78 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="peer-store",chain="rococo_local_testnet",le="+Inf"} 78 +substrate_tasks_polling_duration_sum{kind="async",task_group="networking",task_name="peer-store",chain="rococo_local_testnet"} 0.0011237980000000003 +substrate_tasks_polling_duration_count{kind="async",task_group="networking",task_name="peer-store",chain="rococo_local_testnet"} 78 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="state-request-handler",chain="rococo_local_testnet",le="0.001"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="state-request-handler",chain="rococo_local_testnet",le="0.004"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="state-request-handler",chain="rococo_local_testnet",le="0.016"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="state-request-handler",chain="rococo_local_testnet",le="0.064"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="state-request-handler",chain="rococo_local_testnet",le="0.256"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="state-request-handler",chain="rococo_local_testnet",le="1.024"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="state-request-handler",chain="rococo_local_testnet",le="4.096"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="state-request-handler",chain="rococo_local_testnet",le="16.384"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="state-request-handler",chain="rococo_local_testnet",le="65.536"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="state-request-handler",chain="rococo_local_testnet",le="+Inf"} 1 +substrate_tasks_polling_duration_sum{kind="async",task_group="networking",task_name="state-request-handler",chain="rococo_local_testnet"} 0.000000583 +substrate_tasks_polling_duration_count{kind="async",task_group="networking",task_name="state-request-handler",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="system-rpc-handler",chain="rococo_local_testnet",le="0.001"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="system-rpc-handler",chain="rococo_local_testnet",le="0.004"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="system-rpc-handler",chain="rococo_local_testnet",le="0.016"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="system-rpc-handler",chain="rococo_local_testnet",le="0.064"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="system-rpc-handler",chain="rococo_local_testnet",le="0.256"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="system-rpc-handler",chain="rococo_local_testnet",le="1.024"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="system-rpc-handler",chain="rococo_local_testnet",le="4.096"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="system-rpc-handler",chain="rococo_local_testnet",le="16.384"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="system-rpc-handler",chain="rococo_local_testnet",le="65.536"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="system-rpc-handler",chain="rococo_local_testnet",le="+Inf"} 1 +substrate_tasks_polling_duration_sum{kind="async",task_group="networking",task_name="system-rpc-handler",chain="rococo_local_testnet"} 0.000001666 +substrate_tasks_polling_duration_count{kind="async",task_group="networking",task_name="system-rpc-handler",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="warp-sync-request-handler",chain="rococo_local_testnet",le="0.001"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="warp-sync-request-handler",chain="rococo_local_testnet",le="0.004"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="warp-sync-request-handler",chain="rococo_local_testnet",le="0.016"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="warp-sync-request-handler",chain="rococo_local_testnet",le="0.064"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="warp-sync-request-handler",chain="rococo_local_testnet",le="0.256"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="warp-sync-request-handler",chain="rococo_local_testnet",le="1.024"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="warp-sync-request-handler",chain="rococo_local_testnet",le="4.096"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="warp-sync-request-handler",chain="rococo_local_testnet",le="16.384"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="warp-sync-request-handler",chain="rococo_local_testnet",le="65.536"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="networking",task_name="warp-sync-request-handler",chain="rococo_local_testnet",le="+Inf"} 1 +substrate_tasks_polling_duration_sum{kind="async",task_group="networking",task_name="warp-sync-request-handler",chain="rococo_local_testnet"} 0.0000005 +substrate_tasks_polling_duration_count{kind="async",task_group="networking",task_name="warp-sync-request-handler",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-work",task_name="offchain-workers-runner",chain="rococo_local_testnet",le="0.001"} 14 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-work",task_name="offchain-workers-runner",chain="rococo_local_testnet",le="0.004"} 15 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-work",task_name="offchain-workers-runner",chain="rococo_local_testnet",le="0.016"} 15 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-work",task_name="offchain-workers-runner",chain="rococo_local_testnet",le="0.064"} 15 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-work",task_name="offchain-workers-runner",chain="rococo_local_testnet",le="0.256"} 15 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-work",task_name="offchain-workers-runner",chain="rococo_local_testnet",le="1.024"} 15 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-work",task_name="offchain-workers-runner",chain="rococo_local_testnet",le="4.096"} 15 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-work",task_name="offchain-workers-runner",chain="rococo_local_testnet",le="16.384"} 15 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-work",task_name="offchain-workers-runner",chain="rococo_local_testnet",le="65.536"} 15 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-work",task_name="offchain-workers-runner",chain="rococo_local_testnet",le="+Inf"} 15 +substrate_tasks_polling_duration_sum{kind="async",task_group="offchain-work",task_name="offchain-workers-runner",chain="rococo_local_testnet"} 0.004975458 +substrate_tasks_polling_duration_count{kind="async",task_group="offchain-work",task_name="offchain-workers-runner",chain="rococo_local_testnet"} 15 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-worker",task_name="offchain-on-block",chain="rococo_local_testnet",le="0.001"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-worker",task_name="offchain-on-block",chain="rococo_local_testnet",le="0.004"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-worker",task_name="offchain-on-block",chain="rococo_local_testnet",le="0.016"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-worker",task_name="offchain-on-block",chain="rococo_local_testnet",le="0.064"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-worker",task_name="offchain-on-block",chain="rococo_local_testnet",le="0.256"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-worker",task_name="offchain-on-block",chain="rococo_local_testnet",le="1.024"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-worker",task_name="offchain-on-block",chain="rococo_local_testnet",le="4.096"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-worker",task_name="offchain-on-block",chain="rococo_local_testnet",le="16.384"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-worker",task_name="offchain-on-block",chain="rococo_local_testnet",le="65.536"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="offchain-worker",task_name="offchain-on-block",chain="rococo_local_testnet",le="+Inf"} 26 +substrate_tasks_polling_duration_sum{kind="async",task_group="offchain-worker",task_name="offchain-on-block",chain="rococo_local_testnet"} 0.00018737200000000005 +substrate_tasks_polling_duration_count{kind="async",task_group="offchain-worker",task_name="offchain-on-block",chain="rococo_local_testnet"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="overseer",task_name="metrics-metronome",chain="rococo_local_testnet",le="0.001"} 79 +substrate_tasks_polling_duration_bucket{kind="async",task_group="overseer",task_name="metrics-metronome",chain="rococo_local_testnet",le="0.004"} 82 +substrate_tasks_polling_duration_bucket{kind="async",task_group="overseer",task_name="metrics-metronome",chain="rococo_local_testnet",le="0.016"} 82 +substrate_tasks_polling_duration_bucket{kind="async",task_group="overseer",task_name="metrics-metronome",chain="rococo_local_testnet",le="0.064"} 82 +substrate_tasks_polling_duration_bucket{kind="async",task_group="overseer",task_name="metrics-metronome",chain="rococo_local_testnet",le="0.256"} 82 +substrate_tasks_polling_duration_bucket{kind="async",task_group="overseer",task_name="metrics-metronome",chain="rococo_local_testnet",le="1.024"} 82 +substrate_tasks_polling_duration_bucket{kind="async",task_group="overseer",task_name="metrics-metronome",chain="rococo_local_testnet",le="4.096"} 82 +substrate_tasks_polling_duration_bucket{kind="async",task_group="overseer",task_name="metrics-metronome",chain="rococo_local_testnet",le="16.384"} 82 +substrate_tasks_polling_duration_bucket{kind="async",task_group="overseer",task_name="metrics-metronome",chain="rococo_local_testnet",le="65.536"} 82 +substrate_tasks_polling_duration_bucket{kind="async",task_group="overseer",task_name="metrics-metronome",chain="rococo_local_testnet",le="+Inf"} 82 +substrate_tasks_polling_duration_sum{kind="async",task_group="overseer",task_name="metrics-metronome",chain="rococo_local_testnet"} 0.019866666999999987 +substrate_tasks_polling_duration_count{kind="async",task_group="overseer",task_name="metrics-metronome",chain="rococo_local_testnet"} 82 +substrate_tasks_polling_duration_bucket{kind="async",task_group="prospective-parachains",task_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="0.001"} 40 +substrate_tasks_polling_duration_bucket{kind="async",task_group="prospective-parachains",task_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="0.004"} 40 +substrate_tasks_polling_duration_bucket{kind="async",task_group="prospective-parachains",task_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="0.016"} 40 +substrate_tasks_polling_duration_bucket{kind="async",task_group="prospective-parachains",task_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="0.064"} 40 +substrate_tasks_polling_duration_bucket{kind="async",task_group="prospective-parachains",task_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="0.256"} 40 +substrate_tasks_polling_duration_bucket{kind="async",task_group="prospective-parachains",task_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="1.024"} 40 +substrate_tasks_polling_duration_bucket{kind="async",task_group="prospective-parachains",task_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="4.096"} 40 +substrate_tasks_polling_duration_bucket{kind="async",task_group="prospective-parachains",task_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="16.384"} 40 +substrate_tasks_polling_duration_bucket{kind="async",task_group="prospective-parachains",task_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="65.536"} 40 +substrate_tasks_polling_duration_bucket{kind="async",task_group="prospective-parachains",task_name="prospective-parachains-subsystem",chain="rococo_local_testnet",le="+Inf"} 40 +substrate_tasks_polling_duration_sum{kind="async",task_group="prospective-parachains",task_name="prospective-parachains-subsystem",chain="rococo_local_testnet"} 0.00043482900000000013 +substrate_tasks_polling_duration_count{kind="async",task_group="prospective-parachains",task_name="prospective-parachains-subsystem",chain="rococo_local_testnet"} 40 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="provisioner-subsystem",chain="rococo_local_testnet",le="0.001"} 115 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="provisioner-subsystem",chain="rococo_local_testnet",le="0.004"} 115 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="provisioner-subsystem",chain="rococo_local_testnet",le="0.016"} 115 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="provisioner-subsystem",chain="rococo_local_testnet",le="0.064"} 115 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="provisioner-subsystem",chain="rococo_local_testnet",le="0.256"} 115 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="provisioner-subsystem",chain="rococo_local_testnet",le="1.024"} 115 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="provisioner-subsystem",chain="rococo_local_testnet",le="4.096"} 115 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="provisioner-subsystem",chain="rococo_local_testnet",le="16.384"} 115 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="provisioner-subsystem",chain="rococo_local_testnet",le="65.536"} 115 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="provisioner-subsystem",chain="rococo_local_testnet",le="+Inf"} 115 +substrate_tasks_polling_duration_sum{kind="async",task_group="provisioner",task_name="provisioner-subsystem",chain="rococo_local_testnet"} 0.0020985429999999996 +substrate_tasks_polling_duration_count{kind="async",task_group="provisioner",task_name="provisioner-subsystem",chain="rococo_local_testnet"} 115 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="send-inherent-data",chain="rococo_local_testnet",le="0.001"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="send-inherent-data",chain="rococo_local_testnet",le="0.004"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="send-inherent-data",chain="rococo_local_testnet",le="0.016"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="send-inherent-data",chain="rococo_local_testnet",le="0.064"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="send-inherent-data",chain="rococo_local_testnet",le="0.256"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="send-inherent-data",chain="rococo_local_testnet",le="1.024"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="send-inherent-data",chain="rococo_local_testnet",le="4.096"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="send-inherent-data",chain="rococo_local_testnet",le="16.384"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="send-inherent-data",chain="rococo_local_testnet",le="65.536"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="provisioner",task_name="send-inherent-data",chain="rococo_local_testnet",le="+Inf"} 35 +substrate_tasks_polling_duration_sum{kind="async",task_group="provisioner",task_name="send-inherent-data",chain="rococo_local_testnet"} 0.0013596659999999994 +substrate_tasks_polling_duration_count{kind="async",task_group="provisioner",task_name="send-inherent-data",chain="rococo_local_testnet"} 35 +substrate_tasks_polling_duration_bucket{kind="async",task_group="pvf-checker",task_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="0.001"} 58 +substrate_tasks_polling_duration_bucket{kind="async",task_group="pvf-checker",task_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="0.004"} 58 +substrate_tasks_polling_duration_bucket{kind="async",task_group="pvf-checker",task_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="0.016"} 58 +substrate_tasks_polling_duration_bucket{kind="async",task_group="pvf-checker",task_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="0.064"} 58 +substrate_tasks_polling_duration_bucket{kind="async",task_group="pvf-checker",task_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="0.256"} 58 +substrate_tasks_polling_duration_bucket{kind="async",task_group="pvf-checker",task_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="1.024"} 58 +substrate_tasks_polling_duration_bucket{kind="async",task_group="pvf-checker",task_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="4.096"} 58 +substrate_tasks_polling_duration_bucket{kind="async",task_group="pvf-checker",task_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="16.384"} 58 +substrate_tasks_polling_duration_bucket{kind="async",task_group="pvf-checker",task_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="65.536"} 58 +substrate_tasks_polling_duration_bucket{kind="async",task_group="pvf-checker",task_name="pvf-checker-subsystem",chain="rococo_local_testnet",le="+Inf"} 58 +substrate_tasks_polling_duration_sum{kind="async",task_group="pvf-checker",task_name="pvf-checker-subsystem",chain="rococo_local_testnet"} 0.00212446 +substrate_tasks_polling_duration_count{kind="async",task_group="pvf-checker",task_name="pvf-checker-subsystem",chain="rococo_local_testnet"} 58 +substrate_tasks_polling_duration_bucket{kind="async",task_group="relay-chain-selection",task_name="approval-checking-lag-update",chain="rococo_local_testnet",le="0.001"} 18 +substrate_tasks_polling_duration_bucket{kind="async",task_group="relay-chain-selection",task_name="approval-checking-lag-update",chain="rococo_local_testnet",le="0.004"} 18 +substrate_tasks_polling_duration_bucket{kind="async",task_group="relay-chain-selection",task_name="approval-checking-lag-update",chain="rococo_local_testnet",le="0.016"} 18 +substrate_tasks_polling_duration_bucket{kind="async",task_group="relay-chain-selection",task_name="approval-checking-lag-update",chain="rococo_local_testnet",le="0.064"} 18 +substrate_tasks_polling_duration_bucket{kind="async",task_group="relay-chain-selection",task_name="approval-checking-lag-update",chain="rococo_local_testnet",le="0.256"} 18 +substrate_tasks_polling_duration_bucket{kind="async",task_group="relay-chain-selection",task_name="approval-checking-lag-update",chain="rococo_local_testnet",le="1.024"} 18 +substrate_tasks_polling_duration_bucket{kind="async",task_group="relay-chain-selection",task_name="approval-checking-lag-update",chain="rococo_local_testnet",le="4.096"} 18 +substrate_tasks_polling_duration_bucket{kind="async",task_group="relay-chain-selection",task_name="approval-checking-lag-update",chain="rococo_local_testnet",le="16.384"} 18 +substrate_tasks_polling_duration_bucket{kind="async",task_group="relay-chain-selection",task_name="approval-checking-lag-update",chain="rococo_local_testnet",le="65.536"} 18 +substrate_tasks_polling_duration_bucket{kind="async",task_group="relay-chain-selection",task_name="approval-checking-lag-update",chain="rococo_local_testnet",le="+Inf"} 18 +substrate_tasks_polling_duration_sum{kind="async",task_group="relay-chain-selection",task_name="approval-checking-lag-update",chain="rococo_local_testnet"} 0.00024496000000000007 +substrate_tasks_polling_duration_count{kind="async",task_group="relay-chain-selection",task_name="approval-checking-lag-update",chain="rococo_local_testnet"} 18 +substrate_tasks_polling_duration_bucket{kind="async",task_group="rpc",task_name="substrate-rpc-subscription",chain="rococo_local_testnet",le="0.001"} 8 +substrate_tasks_polling_duration_bucket{kind="async",task_group="rpc",task_name="substrate-rpc-subscription",chain="rococo_local_testnet",le="0.004"} 8 +substrate_tasks_polling_duration_bucket{kind="async",task_group="rpc",task_name="substrate-rpc-subscription",chain="rococo_local_testnet",le="0.016"} 8 +substrate_tasks_polling_duration_bucket{kind="async",task_group="rpc",task_name="substrate-rpc-subscription",chain="rococo_local_testnet",le="0.064"} 8 +substrate_tasks_polling_duration_bucket{kind="async",task_group="rpc",task_name="substrate-rpc-subscription",chain="rococo_local_testnet",le="0.256"} 8 +substrate_tasks_polling_duration_bucket{kind="async",task_group="rpc",task_name="substrate-rpc-subscription",chain="rococo_local_testnet",le="1.024"} 8 +substrate_tasks_polling_duration_bucket{kind="async",task_group="rpc",task_name="substrate-rpc-subscription",chain="rococo_local_testnet",le="4.096"} 8 +substrate_tasks_polling_duration_bucket{kind="async",task_group="rpc",task_name="substrate-rpc-subscription",chain="rococo_local_testnet",le="16.384"} 8 +substrate_tasks_polling_duration_bucket{kind="async",task_group="rpc",task_name="substrate-rpc-subscription",chain="rococo_local_testnet",le="65.536"} 8 +substrate_tasks_polling_duration_bucket{kind="async",task_group="rpc",task_name="substrate-rpc-subscription",chain="rococo_local_testnet",le="+Inf"} 8 +substrate_tasks_polling_duration_sum{kind="async",task_group="rpc",task_name="substrate-rpc-subscription",chain="rococo_local_testnet"} 0.000021750000000000004 +substrate_tasks_polling_duration_count{kind="async",task_group="rpc",task_name="substrate-rpc-subscription",chain="rococo_local_testnet"} 8 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="candidate-responder",chain="rococo_local_testnet",le="0.001"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="candidate-responder",chain="rococo_local_testnet",le="0.004"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="candidate-responder",chain="rococo_local_testnet",le="0.016"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="candidate-responder",chain="rococo_local_testnet",le="0.064"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="candidate-responder",chain="rococo_local_testnet",le="0.256"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="candidate-responder",chain="rococo_local_testnet",le="1.024"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="candidate-responder",chain="rococo_local_testnet",le="4.096"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="candidate-responder",chain="rococo_local_testnet",le="16.384"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="candidate-responder",chain="rococo_local_testnet",le="65.536"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="candidate-responder",chain="rococo_local_testnet",le="+Inf"} 1 +substrate_tasks_polling_duration_sum{kind="async",task_group="statement-distribution",task_name="candidate-responder",chain="rococo_local_testnet"} 0.000002166 +substrate_tasks_polling_duration_count{kind="async",task_group="statement-distribution",task_name="candidate-responder",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="large-statement-responder",chain="rococo_local_testnet",le="0.001"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="large-statement-responder",chain="rococo_local_testnet",le="0.004"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="large-statement-responder",chain="rococo_local_testnet",le="0.016"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="large-statement-responder",chain="rococo_local_testnet",le="0.064"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="large-statement-responder",chain="rococo_local_testnet",le="0.256"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="large-statement-responder",chain="rococo_local_testnet",le="1.024"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="large-statement-responder",chain="rococo_local_testnet",le="4.096"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="large-statement-responder",chain="rococo_local_testnet",le="16.384"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="large-statement-responder",chain="rococo_local_testnet",le="65.536"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="large-statement-responder",chain="rococo_local_testnet",le="+Inf"} 1 +substrate_tasks_polling_duration_sum{kind="async",task_group="statement-distribution",task_name="large-statement-responder",chain="rococo_local_testnet"} 0.000000708 +substrate_tasks_polling_duration_count{kind="async",task_group="statement-distribution",task_name="large-statement-responder",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="0.001"} 109 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="0.004"} 110 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="0.016"} 111 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="0.064"} 111 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="0.256"} 111 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="1.024"} 111 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="4.096"} 111 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="16.384"} 111 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="65.536"} 111 +substrate_tasks_polling_duration_bucket{kind="async",task_group="statement-distribution",task_name="statement-distribution-subsystem",chain="rococo_local_testnet",le="+Inf"} 111 +substrate_tasks_polling_duration_sum{kind="async",task_group="statement-distribution",task_name="statement-distribution-subsystem",chain="rococo_local_testnet"} 0.015694542000000002 +substrate_tasks_polling_duration_count{kind="async",task_group="statement-distribution",task_name="statement-distribution-subsystem",chain="rococo_local_testnet"} 111 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="on-transaction-imported",chain="rococo_local_testnet",le="0.001"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="on-transaction-imported",chain="rococo_local_testnet",le="0.004"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="on-transaction-imported",chain="rococo_local_testnet",le="0.016"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="on-transaction-imported",chain="rococo_local_testnet",le="0.064"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="on-transaction-imported",chain="rococo_local_testnet",le="0.256"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="on-transaction-imported",chain="rococo_local_testnet",le="1.024"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="on-transaction-imported",chain="rococo_local_testnet",le="4.096"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="on-transaction-imported",chain="rococo_local_testnet",le="16.384"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="on-transaction-imported",chain="rococo_local_testnet",le="65.536"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="on-transaction-imported",chain="rococo_local_testnet",le="+Inf"} 1 +substrate_tasks_polling_duration_sum{kind="async",task_group="transaction-pool",task_name="on-transaction-imported",chain="rococo_local_testnet"} 0.000002542 +substrate_tasks_polling_duration_count{kind="async",task_group="transaction-pool",task_name="on-transaction-imported",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-background",chain="rococo_local_testnet",le="0.001"} 398 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-background",chain="rococo_local_testnet",le="0.004"} 399 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-background",chain="rococo_local_testnet",le="0.016"} 399 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-background",chain="rococo_local_testnet",le="0.064"} 399 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-background",chain="rococo_local_testnet",le="0.256"} 399 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-background",chain="rococo_local_testnet",le="1.024"} 399 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-background",chain="rococo_local_testnet",le="4.096"} 399 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-background",chain="rococo_local_testnet",le="16.384"} 399 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-background",chain="rococo_local_testnet",le="65.536"} 399 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-background",chain="rococo_local_testnet",le="+Inf"} 399 +substrate_tasks_polling_duration_sum{kind="async",task_group="transaction-pool",task_name="txpool-background",chain="rococo_local_testnet"} 0.014309667000000003 +substrate_tasks_polling_duration_count{kind="async",task_group="transaction-pool",task_name="txpool-background",chain="rococo_local_testnet"} 399 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-notifications",chain="rococo_local_testnet",le="0.001"} 25 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-notifications",chain="rococo_local_testnet",le="0.004"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-notifications",chain="rococo_local_testnet",le="0.016"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-notifications",chain="rococo_local_testnet",le="0.064"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-notifications",chain="rococo_local_testnet",le="0.256"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-notifications",chain="rococo_local_testnet",le="1.024"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-notifications",chain="rococo_local_testnet",le="4.096"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-notifications",chain="rococo_local_testnet",le="16.384"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-notifications",chain="rococo_local_testnet",le="65.536"} 26 +substrate_tasks_polling_duration_bucket{kind="async",task_group="transaction-pool",task_name="txpool-notifications",chain="rococo_local_testnet",le="+Inf"} 26 +substrate_tasks_polling_duration_sum{kind="async",task_group="transaction-pool",task_name="txpool-notifications",chain="rococo_local_testnet"} 0.0034132100000000003 +substrate_tasks_polling_duration_count{kind="async",task_group="transaction-pool",task_name="txpool-notifications",chain="rococo_local_testnet"} 26 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-distribution",task_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="0.001"} 105 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-distribution",task_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="0.004"} 105 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-distribution",task_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="0.016"} 105 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-distribution",task_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="0.064"} 105 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-distribution",task_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="0.256"} 105 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-distribution",task_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="1.024"} 105 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-distribution",task_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="4.096"} 105 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-distribution",task_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="16.384"} 105 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-distribution",task_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="65.536"} 105 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-distribution",task_name="approval-distribution-subsystem",chain="rococo_local_testnet",le="+Inf"} 105 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="approval-distribution",task_name="approval-distribution-subsystem",chain="rococo_local_testnet"} 0.0036267909999999986 +substrate_tasks_polling_duration_count{kind="blocking",task_group="approval-distribution",task_name="approval-distribution-subsystem",chain="rococo_local_testnet"} 105 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-voting",task_name="approval-voting-subsystem",chain="rococo_local_testnet",le="0.001"} 137 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-voting",task_name="approval-voting-subsystem",chain="rococo_local_testnet",le="0.004"} 142 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-voting",task_name="approval-voting-subsystem",chain="rococo_local_testnet",le="0.016"} 142 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-voting",task_name="approval-voting-subsystem",chain="rococo_local_testnet",le="0.064"} 142 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-voting",task_name="approval-voting-subsystem",chain="rococo_local_testnet",le="0.256"} 142 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-voting",task_name="approval-voting-subsystem",chain="rococo_local_testnet",le="1.024"} 142 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-voting",task_name="approval-voting-subsystem",chain="rococo_local_testnet",le="4.096"} 142 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-voting",task_name="approval-voting-subsystem",chain="rococo_local_testnet",le="16.384"} 142 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-voting",task_name="approval-voting-subsystem",chain="rococo_local_testnet",le="65.536"} 142 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="approval-voting",task_name="approval-voting-subsystem",chain="rococo_local_testnet",le="+Inf"} 142 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="approval-voting",task_name="approval-voting-subsystem",chain="rococo_local_testnet"} 0.025761159999999998 +substrate_tasks_polling_duration_count{kind="blocking",task_group="approval-voting",task_name="approval-voting-subsystem",chain="rococo_local_testnet"} 142 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-recovery",task_name="erasure-task",chain="rococo_local_testnet",le="0.001"} 3 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-recovery",task_name="erasure-task",chain="rococo_local_testnet",le="0.004"} 3 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-recovery",task_name="erasure-task",chain="rococo_local_testnet",le="0.016"} 3 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-recovery",task_name="erasure-task",chain="rococo_local_testnet",le="0.064"} 3 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-recovery",task_name="erasure-task",chain="rococo_local_testnet",le="0.256"} 3 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-recovery",task_name="erasure-task",chain="rococo_local_testnet",le="1.024"} 3 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-recovery",task_name="erasure-task",chain="rococo_local_testnet",le="4.096"} 3 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-recovery",task_name="erasure-task",chain="rococo_local_testnet",le="16.384"} 3 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-recovery",task_name="erasure-task",chain="rococo_local_testnet",le="65.536"} 3 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-recovery",task_name="erasure-task",chain="rococo_local_testnet",le="+Inf"} 3 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="availability-recovery",task_name="erasure-task",chain="rococo_local_testnet"} 0.0005574579999999999 +substrate_tasks_polling_duration_count{kind="blocking",task_group="availability-recovery",task_name="erasure-task",chain="rococo_local_testnet"} 3 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-store",task_name="availability-store-subsystem",chain="rococo_local_testnet",le="0.001"} 71 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-store",task_name="availability-store-subsystem",chain="rococo_local_testnet",le="0.004"} 72 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-store",task_name="availability-store-subsystem",chain="rococo_local_testnet",le="0.016"} 72 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-store",task_name="availability-store-subsystem",chain="rococo_local_testnet",le="0.064"} 72 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-store",task_name="availability-store-subsystem",chain="rococo_local_testnet",le="0.256"} 72 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-store",task_name="availability-store-subsystem",chain="rococo_local_testnet",le="1.024"} 72 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-store",task_name="availability-store-subsystem",chain="rococo_local_testnet",le="4.096"} 72 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-store",task_name="availability-store-subsystem",chain="rococo_local_testnet",le="16.384"} 72 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-store",task_name="availability-store-subsystem",chain="rococo_local_testnet",le="65.536"} 72 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="availability-store",task_name="availability-store-subsystem",chain="rococo_local_testnet",le="+Inf"} 72 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="availability-store",task_name="availability-store-subsystem",chain="rococo_local_testnet"} 0.004564749000000002 +substrate_tasks_polling_duration_count{kind="blocking",task_group="availability-store",task_name="availability-store-subsystem",chain="rococo_local_testnet"} 72 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="bitfield-signing",task_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="0.001"} 27 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="bitfield-signing",task_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="0.004"} 27 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="bitfield-signing",task_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="0.016"} 27 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="bitfield-signing",task_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="0.064"} 27 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="bitfield-signing",task_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="0.256"} 27 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="bitfield-signing",task_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="1.024"} 27 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="bitfield-signing",task_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="4.096"} 27 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="bitfield-signing",task_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="16.384"} 27 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="bitfield-signing",task_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="65.536"} 27 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="bitfield-signing",task_name="bitfield-signing-subsystem",chain="rococo_local_testnet",le="+Inf"} 27 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="bitfield-signing",task_name="bitfield-signing-subsystem",chain="rococo_local_testnet"} 0.00077617 +substrate_tasks_polling_duration_count{kind="blocking",task_group="bitfield-signing",task_name="bitfield-signing-subsystem",chain="rococo_local_testnet"} 27 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="block-import",task_name="basic-block-import-worker",chain="rococo_local_testnet",le="0.001"} 10 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="block-import",task_name="basic-block-import-worker",chain="rococo_local_testnet",le="0.004"} 17 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="block-import",task_name="basic-block-import-worker",chain="rococo_local_testnet",le="0.016"} 19 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="block-import",task_name="basic-block-import-worker",chain="rococo_local_testnet",le="0.064"} 19 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="block-import",task_name="basic-block-import-worker",chain="rococo_local_testnet",le="0.256"} 19 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="block-import",task_name="basic-block-import-worker",chain="rococo_local_testnet",le="1.024"} 19 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="block-import",task_name="basic-block-import-worker",chain="rococo_local_testnet",le="4.096"} 19 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="block-import",task_name="basic-block-import-worker",chain="rococo_local_testnet",le="16.384"} 19 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="block-import",task_name="basic-block-import-worker",chain="rococo_local_testnet",le="65.536"} 19 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="block-import",task_name="basic-block-import-worker",chain="rococo_local_testnet",le="+Inf"} 19 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="block-import",task_name="basic-block-import-worker",chain="rococo_local_testnet"} 0.033308792 +substrate_tasks_polling_duration_count{kind="blocking",task_group="block-import",task_name="basic-block-import-worker",chain="rococo_local_testnet"} 19 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="candidate-validation",task_name="pvf-validation-host",chain="rococo_local_testnet",le="0.001"} 817 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="candidate-validation",task_name="pvf-validation-host",chain="rococo_local_testnet",le="0.004"} 817 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="candidate-validation",task_name="pvf-validation-host",chain="rococo_local_testnet",le="0.016"} 818 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="candidate-validation",task_name="pvf-validation-host",chain="rococo_local_testnet",le="0.064"} 818 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="candidate-validation",task_name="pvf-validation-host",chain="rococo_local_testnet",le="0.256"} 818 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="candidate-validation",task_name="pvf-validation-host",chain="rococo_local_testnet",le="1.024"} 818 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="candidate-validation",task_name="pvf-validation-host",chain="rococo_local_testnet",le="4.096"} 818 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="candidate-validation",task_name="pvf-validation-host",chain="rococo_local_testnet",le="16.384"} 818 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="candidate-validation",task_name="pvf-validation-host",chain="rococo_local_testnet",le="65.536"} 818 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="candidate-validation",task_name="pvf-validation-host",chain="rococo_local_testnet",le="+Inf"} 818 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="candidate-validation",task_name="pvf-validation-host",chain="rococo_local_testnet"} 0.007214685999999984 +substrate_tasks_polling_duration_count{kind="blocking",task_group="candidate-validation",task_name="pvf-validation-host",chain="rococo_local_testnet"} 818 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-api",task_name="chain-api-subsystem",chain="rococo_local_testnet",le="0.001"} 104 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-api",task_name="chain-api-subsystem",chain="rococo_local_testnet",le="0.004"} 104 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-api",task_name="chain-api-subsystem",chain="rococo_local_testnet",le="0.016"} 104 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-api",task_name="chain-api-subsystem",chain="rococo_local_testnet",le="0.064"} 104 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-api",task_name="chain-api-subsystem",chain="rococo_local_testnet",le="0.256"} 104 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-api",task_name="chain-api-subsystem",chain="rococo_local_testnet",le="1.024"} 104 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-api",task_name="chain-api-subsystem",chain="rococo_local_testnet",le="4.096"} 104 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-api",task_name="chain-api-subsystem",chain="rococo_local_testnet",le="16.384"} 104 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-api",task_name="chain-api-subsystem",chain="rococo_local_testnet",le="65.536"} 104 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-api",task_name="chain-api-subsystem",chain="rococo_local_testnet",le="+Inf"} 104 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="chain-api",task_name="chain-api-subsystem",chain="rococo_local_testnet"} 0.0029930490000000002 +substrate_tasks_polling_duration_count{kind="blocking",task_group="chain-api",task_name="chain-api-subsystem",chain="rococo_local_testnet"} 104 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-selection",task_name="chain-selection-subsystem",chain="rococo_local_testnet",le="0.001"} 137 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-selection",task_name="chain-selection-subsystem",chain="rococo_local_testnet",le="0.004"} 139 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-selection",task_name="chain-selection-subsystem",chain="rococo_local_testnet",le="0.016"} 140 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-selection",task_name="chain-selection-subsystem",chain="rococo_local_testnet",le="0.064"} 140 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-selection",task_name="chain-selection-subsystem",chain="rococo_local_testnet",le="0.256"} 140 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-selection",task_name="chain-selection-subsystem",chain="rococo_local_testnet",le="1.024"} 140 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-selection",task_name="chain-selection-subsystem",chain="rococo_local_testnet",le="4.096"} 140 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-selection",task_name="chain-selection-subsystem",chain="rococo_local_testnet",le="16.384"} 140 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-selection",task_name="chain-selection-subsystem",chain="rococo_local_testnet",le="65.536"} 140 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="chain-selection",task_name="chain-selection-subsystem",chain="rococo_local_testnet",le="+Inf"} 140 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="chain-selection",task_name="chain-selection-subsystem",chain="rococo_local_testnet"} 0.024338461000000002 +substrate_tasks_polling_duration_count{kind="blocking",task_group="chain-selection",task_name="chain-selection-subsystem",chain="rococo_local_testnet"} 140 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="babe",chain="rococo_local_testnet",le="0.001"} 25 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="babe",chain="rococo_local_testnet",le="0.004"} 29 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="babe",chain="rococo_local_testnet",le="0.016"} 41 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="babe",chain="rococo_local_testnet",le="0.064"} 42 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="babe",chain="rococo_local_testnet",le="0.256"} 42 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="babe",chain="rococo_local_testnet",le="1.024"} 42 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="babe",chain="rococo_local_testnet",le="4.096"} 42 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="babe",chain="rococo_local_testnet",le="16.384"} 42 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="babe",chain="rococo_local_testnet",le="65.536"} 42 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="babe",chain="rococo_local_testnet",le="+Inf"} 42 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="default",task_name="babe",chain="rococo_local_testnet"} 0.13961191799999997 +substrate_tasks_polling_duration_count{kind="blocking",task_group="default",task_name="babe",chain="rococo_local_testnet"} 42 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="basic-authorship-proposer",chain="rococo_local_testnet",le="0.001"} 0 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="basic-authorship-proposer",chain="rococo_local_testnet",le="0.004"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="basic-authorship-proposer",chain="rococo_local_testnet",le="0.016"} 5 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="basic-authorship-proposer",chain="rococo_local_testnet",le="0.064"} 5 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="basic-authorship-proposer",chain="rococo_local_testnet",le="0.256"} 5 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="basic-authorship-proposer",chain="rococo_local_testnet",le="1.024"} 5 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="basic-authorship-proposer",chain="rococo_local_testnet",le="4.096"} 5 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="basic-authorship-proposer",chain="rococo_local_testnet",le="16.384"} 5 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="basic-authorship-proposer",chain="rococo_local_testnet",le="65.536"} 5 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="basic-authorship-proposer",chain="rococo_local_testnet",le="+Inf"} 5 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="default",task_name="basic-authorship-proposer",chain="rococo_local_testnet"} 0.022576791000000002 +substrate_tasks_polling_duration_count{kind="blocking",task_group="default",task_name="basic-authorship-proposer",chain="rococo_local_testnet"} 5 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="beefy-gadget",chain="rococo_local_testnet",le="0.001"} 337 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="beefy-gadget",chain="rococo_local_testnet",le="0.004"} 341 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="beefy-gadget",chain="rococo_local_testnet",le="0.016"} 343 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="beefy-gadget",chain="rococo_local_testnet",le="0.064"} 343 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="beefy-gadget",chain="rococo_local_testnet",le="0.256"} 343 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="beefy-gadget",chain="rococo_local_testnet",le="1.024"} 343 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="beefy-gadget",chain="rococo_local_testnet",le="4.096"} 343 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="beefy-gadget",chain="rococo_local_testnet",le="16.384"} 343 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="beefy-gadget",chain="rococo_local_testnet",le="65.536"} 343 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="beefy-gadget",chain="rococo_local_testnet",le="+Inf"} 343 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="default",task_name="beefy-gadget",chain="rococo_local_testnet"} 0.037416493999999995 +substrate_tasks_polling_duration_count{kind="blocking",task_group="default",task_name="beefy-gadget",chain="rococo_local_testnet"} 343 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="grandpa-voter",chain="rococo_local_testnet",le="0.001"} 608 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="grandpa-voter",chain="rococo_local_testnet",le="0.004"} 623 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="grandpa-voter",chain="rococo_local_testnet",le="0.016"} 653 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="grandpa-voter",chain="rococo_local_testnet",le="0.064"} 653 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="grandpa-voter",chain="rococo_local_testnet",le="0.256"} 653 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="grandpa-voter",chain="rococo_local_testnet",le="1.024"} 653 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="grandpa-voter",chain="rococo_local_testnet",le="4.096"} 653 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="grandpa-voter",chain="rococo_local_testnet",le="16.384"} 653 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="grandpa-voter",chain="rococo_local_testnet",le="65.536"} 653 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="grandpa-voter",chain="rococo_local_testnet",le="+Inf"} 653 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="default",task_name="grandpa-voter",chain="rococo_local_testnet"} 0.3292343400000003 +substrate_tasks_polling_duration_count{kind="blocking",task_group="default",task_name="grandpa-voter",chain="rococo_local_testnet"} 653 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="overseer",chain="rococo_local_testnet",le="0.001"} 179 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="overseer",chain="rococo_local_testnet",le="0.004"} 181 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="overseer",chain="rococo_local_testnet",le="0.016"} 182 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="overseer",chain="rococo_local_testnet",le="0.064"} 182 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="overseer",chain="rococo_local_testnet",le="0.256"} 182 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="overseer",chain="rococo_local_testnet",le="1.024"} 182 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="overseer",chain="rococo_local_testnet",le="4.096"} 182 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="overseer",chain="rococo_local_testnet",le="16.384"} 182 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="overseer",chain="rococo_local_testnet",le="65.536"} 182 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="overseer",chain="rococo_local_testnet",le="+Inf"} 182 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="default",task_name="overseer",chain="rococo_local_testnet"} 0.01855562699999999 +substrate_tasks_polling_duration_count{kind="blocking",task_group="default",task_name="overseer",chain="rococo_local_testnet"} 182 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="syncing",chain="rococo_local_testnet",le="0.001"} 200 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="syncing",chain="rococo_local_testnet",le="0.004"} 202 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="syncing",chain="rococo_local_testnet",le="0.016"} 202 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="syncing",chain="rococo_local_testnet",le="0.064"} 202 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="syncing",chain="rococo_local_testnet",le="0.256"} 202 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="syncing",chain="rococo_local_testnet",le="1.024"} 202 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="syncing",chain="rococo_local_testnet",le="4.096"} 202 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="syncing",chain="rococo_local_testnet",le="16.384"} 202 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="syncing",chain="rococo_local_testnet",le="65.536"} 202 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="default",task_name="syncing",chain="rococo_local_testnet",le="+Inf"} 202 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="default",task_name="syncing",chain="rococo_local_testnet"} 0.014871296999999995 +substrate_tasks_polling_duration_count{kind="blocking",task_group="default",task_name="syncing",chain="rococo_local_testnet"} 202 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="dispute-coordinator",task_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="0.001"} 148 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="dispute-coordinator",task_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="0.004"} 150 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="dispute-coordinator",task_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="0.016"} 150 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="dispute-coordinator",task_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="0.064"} 150 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="dispute-coordinator",task_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="0.256"} 150 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="dispute-coordinator",task_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="1.024"} 150 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="dispute-coordinator",task_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="4.096"} 150 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="dispute-coordinator",task_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="16.384"} 150 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="dispute-coordinator",task_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="65.536"} 150 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="dispute-coordinator",task_name="dispute-coordinator-subsystem",chain="rococo_local_testnet",le="+Inf"} 150 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="dispute-coordinator",task_name="dispute-coordinator-subsystem",chain="rococo_local_testnet"} 0.008441537999999997 +substrate_tasks_polling_duration_count{kind="blocking",task_group="dispute-coordinator",task_name="dispute-coordinator-subsystem",chain="rococo_local_testnet"} 150 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-in-network-worker",chain="rococo_local_testnet",le="0.001"} 262 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-in-network-worker",chain="rococo_local_testnet",le="0.004"} 263 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-in-network-worker",chain="rococo_local_testnet",le="0.016"} 263 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-in-network-worker",chain="rococo_local_testnet",le="0.064"} 263 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-in-network-worker",chain="rococo_local_testnet",le="0.256"} 263 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-in-network-worker",chain="rococo_local_testnet",le="1.024"} 263 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-in-network-worker",chain="rococo_local_testnet",le="4.096"} 263 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-in-network-worker",chain="rococo_local_testnet",le="16.384"} 263 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-in-network-worker",chain="rococo_local_testnet",le="65.536"} 263 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-in-network-worker",chain="rococo_local_testnet",le="+Inf"} 263 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-in-network-worker",chain="rococo_local_testnet"} 0.00968737599999999 +substrate_tasks_polling_duration_count{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-in-network-worker",chain="rococo_local_testnet"} 263 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="0.001"} 29 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="0.004"} 29 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="0.016"} 29 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="0.064"} 29 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="0.256"} 29 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="1.024"} 29 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="4.096"} 29 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="16.384"} 29 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="65.536"} 29 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-rx-subsystem",chain="rococo_local_testnet",le="+Inf"} 29 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-rx-subsystem",chain="rococo_local_testnet"} 0.0012090789999999998 +substrate_tasks_polling_duration_count{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-rx-subsystem",chain="rococo_local_testnet"} 29 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-tx",task_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="0.001"} 65 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-tx",task_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="0.004"} 65 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-tx",task_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="0.016"} 65 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-tx",task_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="0.064"} 65 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-tx",task_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="0.256"} 65 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-tx",task_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="1.024"} 65 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-tx",task_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="4.096"} 65 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-tx",task_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="16.384"} 65 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-tx",task_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="65.536"} 65 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="network-bridge-tx",task_name="network-bridge-tx-subsystem",chain="rococo_local_testnet",le="+Inf"} 65 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="network-bridge-tx",task_name="network-bridge-tx-subsystem",chain="rococo_local_testnet"} 0.0015063399999999997 +substrate_tasks_polling_duration_count{kind="blocking",task_group="network-bridge-tx",task_name="network-bridge-tx-subsystem",chain="rococo_local_testnet"} 65 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="chain-sync-network-service-provider",chain="rococo_local_testnet",le="0.001"} 23 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="chain-sync-network-service-provider",chain="rococo_local_testnet",le="0.004"} 23 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="chain-sync-network-service-provider",chain="rococo_local_testnet",le="0.016"} 23 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="chain-sync-network-service-provider",chain="rococo_local_testnet",le="0.064"} 23 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="chain-sync-network-service-provider",chain="rococo_local_testnet",le="0.256"} 23 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="chain-sync-network-service-provider",chain="rococo_local_testnet",le="1.024"} 23 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="chain-sync-network-service-provider",chain="rococo_local_testnet",le="4.096"} 23 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="chain-sync-network-service-provider",chain="rococo_local_testnet",le="16.384"} 23 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="chain-sync-network-service-provider",chain="rococo_local_testnet",le="65.536"} 23 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="chain-sync-network-service-provider",chain="rococo_local_testnet",le="+Inf"} 23 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="networking",task_name="chain-sync-network-service-provider",chain="rococo_local_testnet"} 0.000358126 +substrate_tasks_polling_duration_count{kind="blocking",task_group="networking",task_name="chain-sync-network-service-provider",chain="rococo_local_testnet"} 23 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="network-worker",chain="rococo_local_testnet",le="0.001"} 660 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="network-worker",chain="rococo_local_testnet",le="0.004"} 661 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="network-worker",chain="rococo_local_testnet",le="0.016"} 662 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="network-worker",chain="rococo_local_testnet",le="0.064"} 662 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="network-worker",chain="rococo_local_testnet",le="0.256"} 662 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="network-worker",chain="rococo_local_testnet",le="1.024"} 662 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="network-worker",chain="rococo_local_testnet",le="4.096"} 662 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="network-worker",chain="rococo_local_testnet",le="16.384"} 662 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="network-worker",chain="rococo_local_testnet",le="65.536"} 662 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="networking",task_name="network-worker",chain="rococo_local_testnet",le="+Inf"} 662 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="networking",task_name="network-worker",chain="rococo_local_testnet"} 0.056545653999999945 +substrate_tasks_polling_duration_count{kind="blocking",task_group="networking",task_name="network-worker",chain="rococo_local_testnet"} 662 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="polkadot-runtime-api-request",chain="rococo_local_testnet",le="0.001"} 293 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="polkadot-runtime-api-request",chain="rococo_local_testnet",le="0.004"} 295 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="polkadot-runtime-api-request",chain="rococo_local_testnet",le="0.016"} 295 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="polkadot-runtime-api-request",chain="rococo_local_testnet",le="0.064"} 295 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="polkadot-runtime-api-request",chain="rococo_local_testnet",le="0.256"} 295 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="polkadot-runtime-api-request",chain="rococo_local_testnet",le="1.024"} 295 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="polkadot-runtime-api-request",chain="rococo_local_testnet",le="4.096"} 295 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="polkadot-runtime-api-request",chain="rococo_local_testnet",le="16.384"} 295 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="polkadot-runtime-api-request",chain="rococo_local_testnet",le="65.536"} 295 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="polkadot-runtime-api-request",chain="rococo_local_testnet",le="+Inf"} 295 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="runtime-api",task_name="polkadot-runtime-api-request",chain="rococo_local_testnet"} 0.08240850600000003 +substrate_tasks_polling_duration_count{kind="blocking",task_group="runtime-api",task_name="polkadot-runtime-api-request",chain="rococo_local_testnet"} 295 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="runtime-api-subsystem",chain="rococo_local_testnet",le="0.001"} 336 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="runtime-api-subsystem",chain="rococo_local_testnet",le="0.004"} 341 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="runtime-api-subsystem",chain="rococo_local_testnet",le="0.016"} 341 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="runtime-api-subsystem",chain="rococo_local_testnet",le="0.064"} 341 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="runtime-api-subsystem",chain="rococo_local_testnet",le="0.256"} 341 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="runtime-api-subsystem",chain="rococo_local_testnet",le="1.024"} 341 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="runtime-api-subsystem",chain="rococo_local_testnet",le="4.096"} 341 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="runtime-api-subsystem",chain="rococo_local_testnet",le="16.384"} 341 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="runtime-api-subsystem",chain="rococo_local_testnet",le="65.536"} 341 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="runtime-api",task_name="runtime-api-subsystem",chain="rococo_local_testnet",le="+Inf"} 341 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="runtime-api",task_name="runtime-api-subsystem",chain="rococo_local_testnet"} 0.01933953799999998 +substrate_tasks_polling_duration_count{kind="blocking",task_group="runtime-api",task_name="runtime-api-subsystem",chain="rococo_local_testnet"} 341 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-0",chain="rococo_local_testnet",le="0.001"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-0",chain="rococo_local_testnet",le="0.004"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-0",chain="rococo_local_testnet",le="0.016"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-0",chain="rococo_local_testnet",le="0.064"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-0",chain="rococo_local_testnet",le="0.256"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-0",chain="rococo_local_testnet",le="1.024"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-0",chain="rococo_local_testnet",le="4.096"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-0",chain="rococo_local_testnet",le="16.384"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-0",chain="rococo_local_testnet",le="65.536"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-0",chain="rococo_local_testnet",le="+Inf"} 1 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-0",chain="rococo_local_testnet"} 0.000011375 +substrate_tasks_polling_duration_count{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-0",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-1",chain="rococo_local_testnet",le="0.001"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-1",chain="rococo_local_testnet",le="0.004"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-1",chain="rococo_local_testnet",le="0.016"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-1",chain="rococo_local_testnet",le="0.064"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-1",chain="rococo_local_testnet",le="0.256"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-1",chain="rococo_local_testnet",le="1.024"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-1",chain="rococo_local_testnet",le="4.096"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-1",chain="rococo_local_testnet",le="16.384"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-1",chain="rococo_local_testnet",le="65.536"} 1 +substrate_tasks_polling_duration_bucket{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-1",chain="rococo_local_testnet",le="+Inf"} 1 +substrate_tasks_polling_duration_sum{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-1",chain="rococo_local_testnet"} 0.00000225 +substrate_tasks_polling_duration_count{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-1",chain="rococo_local_testnet"} 1 +# HELP substrate_tasks_polling_started_total Total number of times we started invoking Future::poll +# TYPE substrate_tasks_polling_started_total counter +substrate_tasks_polling_started_total{kind="async",task_group="approval-voting",task_name="approval-checks",chain="rococo_local_testnet"} 3 +substrate_tasks_polling_started_total{kind="async",task_group="authority-discovery",task_name="authority-discovery-worker",chain="rococo_local_testnet"} 287 +substrate_tasks_polling_started_total{kind="async",task_group="availability-distribution",task_name="availability-distribution-subsystem",chain="rococo_local_testnet"} 104 +substrate_tasks_polling_started_total{kind="async",task_group="availability-distribution",task_name="chunk-fetcher",chain="rococo_local_testnet"} 3 +substrate_tasks_polling_started_total{kind="async",task_group="availability-distribution",task_name="chunk-receiver",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_started_total{kind="async",task_group="availability-distribution",task_name="pov-receiver",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_started_total{kind="async",task_group="availability-recovery",task_name="availability-recovery-subsystem",chain="rococo_local_testnet"} 35 +substrate_tasks_polling_started_total{kind="async",task_group="availability-recovery",task_name="recovery-task",chain="rococo_local_testnet"} 4 +substrate_tasks_polling_started_total{kind="async",task_group="babe",task_name="babe-worker",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_started_total{kind="async",task_group="bitfield-distribution",task_name="bitfield-distribution-subsystem",chain="rococo_local_testnet"} 132 +substrate_tasks_polling_started_total{kind="async",task_group="bitfield-signing",task_name="bitfield-signing-job",chain="rococo_local_testnet"} 61 +substrate_tasks_polling_started_total{kind="async",task_group="candidate-backing",task_name="candidate-backing-subsystem",chain="rococo_local_testnet"} 99 +substrate_tasks_polling_started_total{kind="async",task_group="candidate-validation",task_name="candidate-validation-subsystem",chain="rococo_local_testnet"} 28 +substrate_tasks_polling_started_total{kind="async",task_group="candidate-validation",task_name="validate-from-exhaustive",chain="rococo_local_testnet"} 2 +substrate_tasks_polling_started_total{kind="async",task_group="collation-generation",task_name="collation-generation-subsystem",chain="rococo_local_testnet"} 26 +substrate_tasks_polling_started_total{kind="async",task_group="collator-protocol",task_name="collator-protocol-subsystem",chain="rococo_local_testnet"} 175 +substrate_tasks_polling_started_total{kind="async",task_group="default",task_name="import-queue",chain="rococo_local_testnet"} 10 +substrate_tasks_polling_started_total{kind="async",task_group="default",task_name="informant",chain="rococo_local_testnet"} 60 +substrate_tasks_polling_started_total{kind="async",task_group="default",task_name="prometheus-endpoint",chain="rococo_local_testnet"} 4 +substrate_tasks_polling_started_total{kind="async",task_group="default",task_name="storage-monitor",chain="rococo_local_testnet"} 16 +substrate_tasks_polling_started_total{kind="async",task_group="default",task_name="telemetry-periodic-send",chain="rococo_local_testnet"} 49 +substrate_tasks_polling_started_total{kind="async",task_group="default",task_name="unpin-worker",chain="rococo_local_testnet"} 23 +substrate_tasks_polling_started_total{kind="async",task_group="dispute-distribution",task_name="dispute-distribution-subsystem",chain="rococo_local_testnet"} 59 +substrate_tasks_polling_started_total{kind="async",task_group="dispute-distribution",task_name="disputes-receiver",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_started_total{kind="async",task_group="dispute-distribution",task_name="get_active_disputes",chain="rococo_local_testnet"} 30 +substrate_tasks_polling_started_total{kind="async",task_group="gossip-support",task_name="gossip-support-subsystem",chain="rococo_local_testnet"} 93 +substrate_tasks_polling_started_total{kind="async",task_group="networking",task_name="block-request-handler",chain="rococo_local_testnet"} 9 +substrate_tasks_polling_started_total{kind="async",task_group="networking",task_name="libp2p-node",chain="rococo_local_testnet"} 2508 +substrate_tasks_polling_started_total{kind="async",task_group="networking",task_name="light-client-request-handler",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_started_total{kind="async",task_group="networking",task_name="network-transactions-handler",chain="rococo_local_testnet"} 303 +substrate_tasks_polling_started_total{kind="async",task_group="networking",task_name="peer-store",chain="rococo_local_testnet"} 78 +substrate_tasks_polling_started_total{kind="async",task_group="networking",task_name="state-request-handler",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_started_total{kind="async",task_group="networking",task_name="system-rpc-handler",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_started_total{kind="async",task_group="networking",task_name="warp-sync-request-handler",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_started_total{kind="async",task_group="offchain-work",task_name="offchain-workers-runner",chain="rococo_local_testnet"} 15 +substrate_tasks_polling_started_total{kind="async",task_group="offchain-worker",task_name="offchain-on-block",chain="rococo_local_testnet"} 26 +substrate_tasks_polling_started_total{kind="async",task_group="overseer",task_name="metrics-metronome",chain="rococo_local_testnet"} 82 +substrate_tasks_polling_started_total{kind="async",task_group="prospective-parachains",task_name="prospective-parachains-subsystem",chain="rococo_local_testnet"} 40 +substrate_tasks_polling_started_total{kind="async",task_group="provisioner",task_name="provisioner-subsystem",chain="rococo_local_testnet"} 115 +substrate_tasks_polling_started_total{kind="async",task_group="provisioner",task_name="send-inherent-data",chain="rococo_local_testnet"} 35 +substrate_tasks_polling_started_total{kind="async",task_group="pvf-checker",task_name="pvf-checker-subsystem",chain="rococo_local_testnet"} 58 +substrate_tasks_polling_started_total{kind="async",task_group="relay-chain-selection",task_name="approval-checking-lag-update",chain="rococo_local_testnet"} 18 +substrate_tasks_polling_started_total{kind="async",task_group="rpc",task_name="substrate-rpc-subscription",chain="rococo_local_testnet"} 8 +substrate_tasks_polling_started_total{kind="async",task_group="statement-distribution",task_name="candidate-responder",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_started_total{kind="async",task_group="statement-distribution",task_name="large-statement-responder",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_started_total{kind="async",task_group="statement-distribution",task_name="statement-distribution-subsystem",chain="rococo_local_testnet"} 111 +substrate_tasks_polling_started_total{kind="async",task_group="transaction-pool",task_name="on-transaction-imported",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_started_total{kind="async",task_group="transaction-pool",task_name="txpool-background",chain="rococo_local_testnet"} 399 +substrate_tasks_polling_started_total{kind="async",task_group="transaction-pool",task_name="txpool-notifications",chain="rococo_local_testnet"} 26 +substrate_tasks_polling_started_total{kind="blocking",task_group="approval-distribution",task_name="approval-distribution-subsystem",chain="rococo_local_testnet"} 105 +substrate_tasks_polling_started_total{kind="blocking",task_group="approval-voting",task_name="approval-voting-subsystem",chain="rococo_local_testnet"} 142 +substrate_tasks_polling_started_total{kind="blocking",task_group="availability-recovery",task_name="erasure-task",chain="rococo_local_testnet"} 3 +substrate_tasks_polling_started_total{kind="blocking",task_group="availability-store",task_name="availability-store-subsystem",chain="rococo_local_testnet"} 72 +substrate_tasks_polling_started_total{kind="blocking",task_group="bitfield-signing",task_name="bitfield-signing-subsystem",chain="rococo_local_testnet"} 27 +substrate_tasks_polling_started_total{kind="blocking",task_group="block-import",task_name="basic-block-import-worker",chain="rococo_local_testnet"} 19 +substrate_tasks_polling_started_total{kind="blocking",task_group="candidate-validation",task_name="pvf-validation-host",chain="rococo_local_testnet"} 818 +substrate_tasks_polling_started_total{kind="blocking",task_group="chain-api",task_name="chain-api-subsystem",chain="rococo_local_testnet"} 104 +substrate_tasks_polling_started_total{kind="blocking",task_group="chain-selection",task_name="chain-selection-subsystem",chain="rococo_local_testnet"} 140 +substrate_tasks_polling_started_total{kind="blocking",task_group="default",task_name="babe",chain="rococo_local_testnet"} 42 +substrate_tasks_polling_started_total{kind="blocking",task_group="default",task_name="basic-authorship-proposer",chain="rococo_local_testnet"} 5 +substrate_tasks_polling_started_total{kind="blocking",task_group="default",task_name="beefy-gadget",chain="rococo_local_testnet"} 343 +substrate_tasks_polling_started_total{kind="blocking",task_group="default",task_name="grandpa-voter",chain="rococo_local_testnet"} 653 +substrate_tasks_polling_started_total{kind="blocking",task_group="default",task_name="overseer",chain="rococo_local_testnet"} 182 +substrate_tasks_polling_started_total{kind="blocking",task_group="default",task_name="syncing",chain="rococo_local_testnet"} 202 +substrate_tasks_polling_started_total{kind="blocking",task_group="dispute-coordinator",task_name="dispute-coordinator-subsystem",chain="rococo_local_testnet"} 150 +substrate_tasks_polling_started_total{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-in-network-worker",chain="rococo_local_testnet"} 263 +substrate_tasks_polling_started_total{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-rx-subsystem",chain="rococo_local_testnet"} 29 +substrate_tasks_polling_started_total{kind="blocking",task_group="network-bridge-tx",task_name="network-bridge-tx-subsystem",chain="rococo_local_testnet"} 65 +substrate_tasks_polling_started_total{kind="blocking",task_group="networking",task_name="chain-sync-network-service-provider",chain="rococo_local_testnet"} 23 +substrate_tasks_polling_started_total{kind="blocking",task_group="networking",task_name="network-worker",chain="rococo_local_testnet"} 662 +substrate_tasks_polling_started_total{kind="blocking",task_group="runtime-api",task_name="polkadot-runtime-api-request",chain="rococo_local_testnet"} 295 +substrate_tasks_polling_started_total{kind="blocking",task_group="runtime-api",task_name="runtime-api-subsystem",chain="rococo_local_testnet"} 341 +substrate_tasks_polling_started_total{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-0",chain="rococo_local_testnet"} 1 +substrate_tasks_polling_started_total{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-1",chain="rococo_local_testnet"} 1 +# HELP substrate_tasks_spawned_total Total number of tasks that have been spawned on the Service +# TYPE substrate_tasks_spawned_total counter +substrate_tasks_spawned_total{kind="async",task_group="approval-voting",task_name="approval-checks",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="authority-discovery",task_name="authority-discovery-worker",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="availability-distribution",task_name="availability-distribution-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="availability-distribution",task_name="chunk-fetcher",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="availability-distribution",task_name="chunk-receiver",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="availability-distribution",task_name="pov-receiver",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="availability-recovery",task_name="availability-recovery-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="availability-recovery",task_name="recovery-task",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="babe",task_name="babe-worker",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="bitfield-distribution",task_name="bitfield-distribution-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="bitfield-signing",task_name="bitfield-signing-job",chain="rococo_local_testnet"} 14 +substrate_tasks_spawned_total{kind="async",task_group="candidate-backing",task_name="candidate-backing-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="candidate-validation",task_name="candidate-validation-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="candidate-validation",task_name="validate-from-exhaustive",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="collation-generation",task_name="collation-generation-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="collator-protocol",task_name="collator-protocol-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="default",task_name="import-queue",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="default",task_name="informant",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="default",task_name="prometheus-endpoint",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="default",task_name="storage-monitor",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="default",task_name="telemetry-periodic-send",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="default",task_name="unpin-worker",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="dispute-distribution",task_name="dispute-distribution-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="dispute-distribution",task_name="disputes-receiver",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="dispute-distribution",task_name="get_active_disputes",chain="rococo_local_testnet"} 15 +substrate_tasks_spawned_total{kind="async",task_group="gossip-support",task_name="gossip-support-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="networking",task_name="block-request-handler",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="networking",task_name="libp2p-node",chain="rococo_local_testnet"} 17 +substrate_tasks_spawned_total{kind="async",task_group="networking",task_name="light-client-request-handler",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="networking",task_name="network-transactions-handler",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="networking",task_name="peer-store",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="networking",task_name="state-request-handler",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="networking",task_name="system-rpc-handler",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="networking",task_name="warp-sync-request-handler",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="offchain-work",task_name="offchain-workers-runner",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="offchain-worker",task_name="offchain-on-block",chain="rococo_local_testnet"} 13 +substrate_tasks_spawned_total{kind="async",task_group="overseer",task_name="metrics-metronome",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="prospective-parachains",task_name="prospective-parachains-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="provisioner",task_name="provisioner-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="provisioner",task_name="send-inherent-data",chain="rococo_local_testnet"} 5 +substrate_tasks_spawned_total{kind="async",task_group="pvf-checker",task_name="pvf-checker-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="relay-chain-selection",task_name="approval-checking-lag-update",chain="rococo_local_testnet"} 18 +substrate_tasks_spawned_total{kind="async",task_group="rpc",task_name="substrate-rpc-subscription",chain="rococo_local_testnet"} 2 +substrate_tasks_spawned_total{kind="async",task_group="statement-distribution",task_name="candidate-responder",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="statement-distribution",task_name="large-statement-responder",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="statement-distribution",task_name="statement-distribution-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="transaction-pool",task_name="on-transaction-imported",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="transaction-pool",task_name="txpool-background",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="async",task_group="transaction-pool",task_name="txpool-notifications",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="approval-distribution",task_name="approval-distribution-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="approval-voting",task_name="approval-voting-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="availability-recovery",task_name="erasure-task",chain="rococo_local_testnet"} 2 +substrate_tasks_spawned_total{kind="blocking",task_group="availability-store",task_name="availability-store-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="bitfield-signing",task_name="bitfield-signing-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="block-import",task_name="basic-block-import-worker",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="candidate-validation",task_name="pvf-validation-host",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="chain-api",task_name="chain-api-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="chain-selection",task_name="chain-selection-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="default",task_name="babe",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="default",task_name="basic-authorship-proposer",chain="rococo_local_testnet"} 5 +substrate_tasks_spawned_total{kind="blocking",task_group="default",task_name="beefy-gadget",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="default",task_name="grandpa-voter",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="default",task_name="overseer",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="default",task_name="syncing",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="dispute-coordinator",task_name="dispute-coordinator-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-in-network-worker",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="network-bridge-rx",task_name="network-bridge-rx-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="network-bridge-tx",task_name="network-bridge-tx-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="networking",task_name="chain-sync-network-service-provider",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="networking",task_name="network-worker",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="runtime-api",task_name="polkadot-runtime-api-request",chain="rococo_local_testnet"} 295 +substrate_tasks_spawned_total{kind="blocking",task_group="runtime-api",task_name="runtime-api-subsystem",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-0",chain="rococo_local_testnet"} 1 +substrate_tasks_spawned_total{kind="blocking",task_group="transaction-pool",task_name="transaction-pool-task-1",chain="rococo_local_testnet"} 1 +# HELP substrate_tokio_threads_alive Number of threads alive right now +# TYPE substrate_tokio_threads_alive gauge +substrate_tokio_threads_alive{chain="rococo_local_testnet"} 42 +# HELP substrate_tokio_threads_total Total number of threads created +# TYPE substrate_tokio_threads_total counter +substrate_tokio_threads_total{chain="rococo_local_testnet"} 42 +# HELP substrate_unbounded_channel_len Items sent/received/dropped on each mpsc::unbounded instance +# TYPE substrate_unbounded_channel_len counter +substrate_unbounded_channel_len{action="received",entity="mpsc_api_protocol",chain="rococo_local_testnet"} 8 +substrate_unbounded_channel_len{action="received",entity="mpsc_beefy_best_block_notification_stream",chain="rococo_local_testnet"} 6 +substrate_unbounded_channel_len{action="received",entity="mpsc_beefy_gossip_validator",chain="rococo_local_testnet"} 3 +substrate_unbounded_channel_len{action="received",entity="mpsc_buffered_link",chain="rococo_local_testnet"} 9 +substrate_unbounded_channel_len{action="received",entity="mpsc_chain_sync",chain="rococo_local_testnet"} 119 +substrate_unbounded_channel_len{action="received",entity="mpsc_finality_notification_stream",chain="rococo_local_testnet"} 44 +substrate_unbounded_channel_len{action="received",entity="mpsc_grandpa_gossip_validator",chain="rococo_local_testnet"} 106 +substrate_unbounded_channel_len{action="received",entity="mpsc_grandpa_neighbor_packet_worker",chain="rococo_local_testnet"} 31 +substrate_unbounded_channel_len{action="received",entity="mpsc_import_notification_stream",chain="rococo_local_testnet"} 105 +substrate_unbounded_channel_len{action="received",entity="mpsc_import_queue_worker_blocks",chain="rococo_local_testnet"} 9 +substrate_unbounded_channel_len{action="received",entity="mpsc_network_service_provider",chain="rococo_local_testnet"} 22 +substrate_unbounded_channel_len{action="received",entity="mpsc_network_worker",chain="rococo_local_testnet"} 363 +substrate_unbounded_channel_len{action="received",entity="mpsc_notifications_protocol",chain="rococo_local_testnet"} 12 +substrate_unbounded_channel_len{action="received",entity="mpsc_protocol_controllers_to_notifications",chain="rococo_local_testnet"} 20 +substrate_unbounded_channel_len{action="received",entity="mpsc_revalidation_queue",chain="rococo_local_testnet"} 13 +substrate_unbounded_channel_len{action="received",entity="mpsc_syncing_engine_protocol",chain="rococo_local_testnet"} 19 +substrate_unbounded_channel_len{action="received",entity="network-gossip",chain="rococo_local_testnet"} 4 +substrate_unbounded_channel_len{action="received",entity="transactions-handler-sync",chain="rococo_local_testnet"} 2 +substrate_unbounded_channel_len{action="received",entity="unpin-worker-channel",chain="rococo_local_testnet"} 23 +substrate_unbounded_channel_len{action="send",entity="mpsc_api_protocol",chain="rococo_local_testnet"} 8 +substrate_unbounded_channel_len{action="send",entity="mpsc_beefy_best_block_notification_stream",chain="rococo_local_testnet"} 6 +substrate_unbounded_channel_len{action="send",entity="mpsc_beefy_gossip_validator",chain="rococo_local_testnet"} 3 +substrate_unbounded_channel_len{action="send",entity="mpsc_buffered_link",chain="rococo_local_testnet"} 9 +substrate_unbounded_channel_len{action="send",entity="mpsc_chain_sync",chain="rococo_local_testnet"} 119 +substrate_unbounded_channel_len{action="send",entity="mpsc_finality_notification_stream",chain="rococo_local_testnet"} 44 +substrate_unbounded_channel_len{action="send",entity="mpsc_grandpa_gossip_validator",chain="rococo_local_testnet"} 106 +substrate_unbounded_channel_len{action="send",entity="mpsc_grandpa_neighbor_packet_worker",chain="rococo_local_testnet"} 31 +substrate_unbounded_channel_len{action="send",entity="mpsc_import_notification_stream",chain="rococo_local_testnet"} 105 +substrate_unbounded_channel_len{action="send",entity="mpsc_import_queue_worker_blocks",chain="rococo_local_testnet"} 9 +substrate_unbounded_channel_len{action="send",entity="mpsc_network_service_provider",chain="rococo_local_testnet"} 22 +substrate_unbounded_channel_len{action="send",entity="mpsc_network_worker",chain="rococo_local_testnet"} 363 +substrate_unbounded_channel_len{action="send",entity="mpsc_notifications_protocol",chain="rococo_local_testnet"} 12 +substrate_unbounded_channel_len{action="send",entity="mpsc_protocol_controllers_to_notifications",chain="rococo_local_testnet"} 20 +substrate_unbounded_channel_len{action="send",entity="mpsc_revalidation_queue",chain="rococo_local_testnet"} 13 +substrate_unbounded_channel_len{action="send",entity="mpsc_syncing_engine_protocol",chain="rococo_local_testnet"} 19 +substrate_unbounded_channel_len{action="send",entity="network-gossip",chain="rococo_local_testnet"} 4 +substrate_unbounded_channel_len{action="send",entity="transactions-handler-sync",chain="rococo_local_testnet"} 2 +substrate_unbounded_channel_len{action="send",entity="unpin-worker-channel",chain="rococo_local_testnet"} 23 +# HELP substrate_unbounded_channel_size Size (number of messages to be processed) of each mpsc::unbounded instance +# TYPE substrate_unbounded_channel_size gauge +substrate_unbounded_channel_size{entity="mpsc_api_protocol",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="mpsc_beefy_best_block_notification_stream",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="mpsc_beefy_gossip_validator",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="mpsc_buffered_link",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="mpsc_chain_sync",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="mpsc_finality_notification_stream",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="mpsc_grandpa_gossip_validator",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="mpsc_grandpa_neighbor_packet_worker",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="mpsc_import_notification_stream",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="mpsc_import_queue_worker_blocks",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="mpsc_network_service_provider",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="mpsc_network_worker",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="mpsc_notifications_protocol",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="mpsc_ocw_to_api",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="mpsc_ocw_to_worker",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="mpsc_protocol_controllers_to_notifications",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="mpsc_revalidation_queue",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="mpsc_syncing_engine_protocol",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="network-gossip",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="transactions-handler-sync",chain="rococo_local_testnet"} 0 +substrate_unbounded_channel_size{entity="unpin-worker-channel",chain="rococo_local_testnet"} 0 diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/.gitignore b/vendor/pezkuwi-zombienet-sdk/crates/provider/.gitignore new file mode 100644 index 00000000..4fffb2f8 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/.gitignore @@ -0,0 +1,2 @@ +/target +/Cargo.lock diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/Cargo.toml b/vendor/pezkuwi-zombienet-sdk/crates/provider/Cargo.toml new file mode 100644 index 00000000..bd9eb24c --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "zombienet-provider" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +publish = true +license.workspace = true +repository.workspace = true +description = "Zombienet provider, implement the logic to run the nodes in the native provider" +keywords = ["zombienet", "provider", "native"] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +async-trait = { workspace = true } +futures = { workspace = true } +serde = { workspace = true, features = ["derive", "rc"] } +serde_json = { workspace = true } +serde_yaml = { workspace = true } +tokio = { workspace = true, features = [ + "process", + "macros", + "fs", + "time", + "rt", +] } +tokio-util = { workspace = true, features = ["compat"] } +thiserror = { workspace = true } +anyhow = { workspace = true } +uuid = { workspace = true, features = ["v4"] } +nix = { workspace = true, features = ["signal"] } +kube = { workspace = true, features = ["ws", "runtime"] } +k8s-openapi = { workspace = true, features = ["v1_27"] } +tar = { workspace = true } +sha2 = { workspace = true } +hex = { workspace = true } +tracing = { workspace = true } +reqwest = { workspace = true } +regex = { workspace = true } +url = { workspace = true } +flate2 = { workspace = true } +erased-serde = { workspace = true } + +# Zomebienet deps +support = { workspace = true } +configuration = { workspace = true } diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/docker.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/docker.rs new file mode 100644 index 00000000..565db021 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/docker.rs @@ -0,0 +1,6 @@ +mod client; +mod namespace; +mod node; +mod provider; + +pub use provider::DockerProvider; diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/docker/client.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/docker/client.rs new file mode 100644 index 00000000..5fe016ae --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/docker/client.rs @@ -0,0 +1,596 @@ +use std::{collections::HashMap, path::Path, process::Stdio}; + +use anyhow::anyhow; +use futures::future::try_join_all; +use serde::{Deserialize, Deserializer}; +use tokio::process::Command; +use tracing::{info, trace}; + +use crate::types::{ExecutionResult, Port}; + +#[derive(thiserror::Error, Debug)] +#[error(transparent)] +pub struct Error(#[from] anyhow::Error); + +pub type Result = core::result::Result; + +#[derive(Clone)] +pub struct DockerClient { + using_podman: bool, +} + +#[derive(Debug)] +pub struct ContainerRunOptions { + image: String, + command: Vec, + env: Option>, + volume_mounts: Option>, + name: Option, + entrypoint: Option, + port_mapping: HashMap, + rm: bool, + detach: bool, +} + +enum Container { + Docker(DockerContainer), + Podman(PodmanContainer), +} + +// TODO: we may don't need this +#[allow(dead_code)] +#[derive(Deserialize, Debug)] +struct DockerContainer { + #[serde(alias = "Names", deserialize_with = "deserialize_list")] + names: Vec, + #[serde(alias = "Ports", deserialize_with = "deserialize_list")] + ports: Vec, + #[serde(alias = "State")] + state: String, +} + +// TODO: we may don't need this +#[allow(dead_code)] +#[derive(Deserialize, Debug)] +struct PodmanPort { + host_ip: String, + container_port: u16, + host_port: u16, + range: u16, + protocol: String, +} + +// TODO: we may don't need this +#[allow(dead_code)] +#[derive(Deserialize, Debug)] +struct PodmanContainer { + #[serde(alias = "Id")] + id: String, + #[serde(alias = "Image")] + image: String, + #[serde(alias = "Mounts")] + mounts: Vec, + #[serde(alias = "Names")] + names: Vec, + #[serde(alias = "Ports", deserialize_with = "deserialize_null_as_default")] + ports: Vec, + #[serde(alias = "State")] + state: String, +} + +fn deserialize_list<'de, D>(deserializer: D) -> std::result::Result, D::Error> +where + D: Deserializer<'de>, +{ + let str_sequence = String::deserialize(deserializer)?; + Ok(str_sequence + .split(',') + .filter(|item| !item.is_empty()) + .map(|item| item.to_owned()) + .collect()) +} + +fn deserialize_null_as_default<'de, D, T>(deserializer: D) -> std::result::Result +where + T: Default + Deserialize<'de>, + D: Deserializer<'de>, +{ + let opt = Option::deserialize(deserializer)?; + Ok(opt.unwrap_or_default()) +} + +impl ContainerRunOptions { + pub fn new(image: &str, command: Vec) -> Self + where + S: Into + std::fmt::Debug + Send + Clone, + { + ContainerRunOptions { + image: image.to_string(), + command: command + .clone() + .into_iter() + .map(|s| s.into()) + .collect::>(), + env: None, + volume_mounts: None, + name: None, + entrypoint: None, + port_mapping: HashMap::default(), + rm: false, + detach: true, // add -d flag by default + } + } + + pub fn env(mut self, env: Vec<(S, S)>) -> Self + where + S: Into + std::fmt::Debug + Send + Clone, + { + self.env = Some( + env.into_iter() + .map(|(name, value)| (name.into(), value.into())) + .collect(), + ); + self + } + + pub fn volume_mounts(mut self, volume_mounts: HashMap) -> Self + where + S: Into + std::fmt::Debug + Send + Clone, + { + self.volume_mounts = Some( + volume_mounts + .into_iter() + .map(|(source, target)| (source.into(), target.into())) + .collect(), + ); + self + } + + pub fn name(mut self, name: S) -> Self + where + S: Into + std::fmt::Debug + Send + Clone, + { + self.name = Some(name.into()); + self + } + + pub fn entrypoint(mut self, entrypoint: S) -> Self + where + S: Into + std::fmt::Debug + Send + Clone, + { + self.entrypoint = Some(entrypoint.into()); + self + } + + pub fn port_mapping(mut self, port_mapping: &HashMap) -> Self { + self.port_mapping.clone_from(port_mapping); + self + } + + pub fn rm(mut self) -> Self { + self.rm = true; + self + } + + pub fn detach(mut self, choice: bool) -> Self { + self.detach = choice; + self + } +} + +impl DockerClient { + pub async fn new() -> Result { + let using_podman = Self::is_using_podman().await?; + + Ok(DockerClient { using_podman }) + } + + pub fn client_binary(&self) -> String { + String::from(if self.using_podman { + "podman" + } else { + "docker" + }) + } + + async fn is_using_podman() -> Result { + if let Ok(output) = tokio::process::Command::new("docker") + .arg("version") + .output() + .await + { + // detect whether we're actually running podman with docker emulation + return Ok(String::from_utf8_lossy(&output.stdout) + .to_lowercase() + .contains("podman")); + } + + tokio::process::Command::new("podman") + .arg("--version") + .output() + .await + .map_err(|err| anyhow!("Failed to detect container engine: {err}"))?; + + Ok(true) + } +} + +impl DockerClient { + fn client_command(&self) -> tokio::process::Command { + tokio::process::Command::new(self.client_binary()) + } + + pub async fn create_volume(&self, name: &str) -> Result<()> { + let result = self + .client_command() + .args(["volume", "create", name]) + .output() + .await + .map_err(|err| anyhow!("Failed to create volume '{name}': {err}"))?; + + if !result.status.success() { + return Err(anyhow!( + "Failed to create volume '{name}': {}", + String::from_utf8_lossy(&result.stderr) + ) + .into()); + } + + Ok(()) + } + + pub async fn container_run(&self, options: ContainerRunOptions) -> Result { + let mut cmd = self.client_command(); + cmd.args(["run", "--platform", "linux/amd64"]); + + if options.detach { + cmd.arg("-d"); + } + + Self::apply_cmd_options(&mut cmd, &options); + + trace!("cmd: {:?}", cmd); + + let result = cmd.output().await.map_err(|err| { + anyhow!( + "Failed to run container with image '{image}' and command '{command}': {err}", + image = options.image, + command = options.command.join(" "), + ) + })?; + + if !result.status.success() { + return Err(anyhow!( + "Failed to run container with image '{image}' and command '{command}': {err}", + image = options.image, + command = options.command.join(" "), + err = String::from_utf8_lossy(&result.stderr) + ) + .into()); + } + + Ok(String::from_utf8_lossy(&result.stdout).to_string()) + } + + pub async fn container_create(&self, options: ContainerRunOptions) -> Result { + let mut cmd = self.client_command(); + cmd.args(["container", "create"]); + + Self::apply_cmd_options(&mut cmd, &options); + + trace!("cmd: {:?}", cmd); + + let result = cmd.output().await.map_err(|err| { + anyhow!( + "Failed to run container with image '{image}' and command '{command}': {err}", + image = options.image, + command = options.command.join(" "), + ) + })?; + + if !result.status.success() { + return Err(anyhow!( + "Failed to run container with image '{image}' and command '{command}': {err}", + image = options.image, + command = options.command.join(" "), + err = String::from_utf8_lossy(&result.stderr) + ) + .into()); + } + + Ok(String::from_utf8_lossy(&result.stdout).to_string()) + } + + pub async fn container_exec( + &self, + name: &str, + command: Vec, + env: Option>, + as_user: Option, + ) -> Result + where + S: Into + std::fmt::Debug + Send + Clone, + { + let mut cmd = self.client_command(); + cmd.arg("exec"); + + if let Some(env) = env { + for env_var in env { + cmd.args(["-e", &format!("{}={}", env_var.0.into(), env_var.1.into())]); + } + } + + if let Some(user) = as_user { + cmd.args(["-u", user.into().as_ref()]); + } + + cmd.arg(name); + + cmd.args( + command + .clone() + .into_iter() + .map(|s| >::into(s)), + ); + + trace!("cmd is : {:?}", cmd); + + let result = cmd.output().await.map_err(|err| { + anyhow!( + "Failed to exec '{}' on '{}': {err}", + command + .into_iter() + .map(|s| >::into(s)) + .collect::>() + .join(" "), + name, + ) + })?; + + if !result.status.success() { + return Ok(Err(( + result.status, + String::from_utf8_lossy(&result.stderr).to_string(), + ))); + } + + Ok(Ok(String::from_utf8_lossy(&result.stdout).to_string())) + } + + pub async fn container_cp( + &self, + name: &str, + local_path: &Path, + remote_path: &Path, + ) -> Result<()> { + let result = self + .client_command() + .args([ + "cp", + local_path.to_string_lossy().as_ref(), + &format!("{name}:{}", remote_path.to_string_lossy().as_ref()), + ]) + .output() + .await + .map_err(|err| { + anyhow!( + "Failed copy file '{file}' to container '{name}': {err}", + file = local_path.to_string_lossy(), + ) + })?; + + if !result.status.success() { + return Err(anyhow!( + "Failed to copy file '{file}' to container '{name}': {err}", + file = local_path.to_string_lossy(), + err = String::from_utf8_lossy(&result.stderr) + ) + .into()); + } + + Ok(()) + } + + pub async fn container_rm(&self, name: &str) -> Result<()> { + let result = self + .client_command() + .args(["rm", "--force", "--volumes", name]) + .output() + .await + .map_err(|err| anyhow!("Failed do remove container '{name}: {err}"))?; + + if !result.status.success() { + return Err(anyhow!( + "Failed to remove container '{name}': {err}", + err = String::from_utf8_lossy(&result.stderr) + ) + .into()); + } + + Ok(()) + } + + pub async fn namespaced_containers_rm(&self, namespace: &str) -> Result<()> { + let container_names: Vec = self + .get_containers() + .await? + .into_iter() + .filter_map(|container| match container { + Container::Docker(container) => { + if let Some(name) = container.names.first() { + if name.starts_with(namespace) { + return Some(name.to_string()); + } + } + + None + }, + Container::Podman(container) => { + if let Some(name) = container.names.first() { + if name.starts_with(namespace) { + return Some(name.to_string()); + } + } + + None + }, + }) + .collect(); + + info!("{:?}", container_names); + let futures = container_names + .iter() + .map(|name| self.container_rm(name)) + .collect::>(); + try_join_all(futures).await?; + + Ok(()) + } + + pub async fn container_ip(&self, container_name: &str) -> Result { + let ip = if self.using_podman { + "127.0.0.1".into() + } else { + let mut cmd = tokio::process::Command::new("docker"); + cmd.args(vec![ + "inspect", + "-f", + "{{ .NetworkSettings.IPAddress }}", + container_name, + ]); + + trace!("CMD: {cmd:?}"); + + let res = cmd + .output() + .await + .map_err(|err| anyhow!("Failed to get docker container ip, output: {err}"))?; + + String::from_utf8(res.stdout) + .map_err(|err| anyhow!("Failed to get docker container ip, output: {err}"))? + .trim() + .into() + }; + + trace!("IP: {ip}"); + Ok(ip) + } + + async fn get_containers(&self) -> Result> { + let containers = if self.using_podman { + self.get_podman_containers() + .await? + .into_iter() + .map(Container::Podman) + .collect() + } else { + self.get_docker_containers() + .await? + .into_iter() + .map(Container::Docker) + .collect() + }; + + Ok(containers) + } + + async fn get_podman_containers(&self) -> Result> { + let res = tokio::process::Command::new("podman") + .args(vec!["ps", "--all", "--no-trunc", "--format", "json"]) + .output() + .await + .map_err(|err| anyhow!("Failed to get podman containers output: {err}"))?; + + let stdout = String::from_utf8_lossy(&res.stdout); + + let containers = serde_json::from_str(&stdout) + .map_err(|err| anyhow!("Failed to parse podman containers output: {err}"))?; + + Ok(containers) + } + + async fn get_docker_containers(&self) -> Result> { + let res = tokio::process::Command::new("docker") + .args(vec!["ps", "--all", "--no-trunc", "--format", "json"]) + .output() + .await + .unwrap(); + + let stdout = String::from_utf8_lossy(&res.stdout); + + let mut containers = vec![]; + for line in stdout.lines() { + containers.push( + serde_json::from_str::(line) + .map_err(|err| anyhow!("Failed to parse docker container output: {err}"))?, + ); + } + + Ok(containers) + } + + pub(crate) async fn container_logs(&self, container_name: &str) -> Result { + let output = Command::new("sh") + .arg("-c") + .arg(format!("docker logs -t '{container_name}' 2>&1")) + .stdout(Stdio::piped()) + .output() + .await + .map_err(|err| { + anyhow!( + "Failed to spawn docker logs command for container '{container_name}': {err}" + ) + })?; + + let logs = String::from_utf8_lossy(&output.stdout).to_string(); + + if !output.status.success() { + // stderr was redirected to stdout, so logs should contain the error message if any + return Err(anyhow!( + "Failed to get logs for container '{name}': {logs}", + name = container_name, + logs = &logs + ) + .into()); + } + + Ok(logs) + } + + fn apply_cmd_options(cmd: &mut Command, options: &ContainerRunOptions) { + if options.rm { + cmd.arg("--rm"); + } + + if let Some(entrypoint) = options.entrypoint.as_ref() { + cmd.args(["--entrypoint", entrypoint]); + } + + if let Some(volume_mounts) = options.volume_mounts.as_ref() { + for (source, target) in volume_mounts { + cmd.args(["-v", &format!("{source}:{target}")]); + } + } + + if let Some(env) = options.env.as_ref() { + for env_var in env { + cmd.args(["-e", &format!("{}={}", env_var.0, env_var.1)]); + } + } + + // add published ports + for (container_port, host_port) in options.port_mapping.iter() { + cmd.args(["-p", &format!("{host_port}:{container_port}")]); + } + + if let Some(name) = options.name.as_ref() { + cmd.args(["--name", name]); + } + + cmd.arg(&options.image); + + for arg in &options.command { + cmd.arg(arg); + } + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/docker/namespace.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/docker/namespace.rs new file mode 100644 index 00000000..5b13df13 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/docker/namespace.rs @@ -0,0 +1,494 @@ +use std::{ + collections::HashMap, + path::{Path, PathBuf}, + sync::{Arc, Weak}, + thread, +}; + +use async_trait::async_trait; +use support::{constants::THIS_IS_A_BUG, fs::FileSystem}; +use tokio::sync::{Mutex, RwLock}; +use tracing::{debug, trace, warn}; +use uuid::Uuid; + +use super::{ + client::{ContainerRunOptions, DockerClient}, + node::DockerNode, + DockerProvider, +}; +use crate::{ + constants::NAMESPACE_PREFIX, + docker::{ + node::{DeserializableDockerNodeOptions, DockerNodeOptions}, + provider, + }, + shared::helpers::extract_execution_result, + types::{ + GenerateFileCommand, GenerateFilesOptions, ProviderCapabilities, RunCommandOptions, + SpawnNodeOptions, + }, + DynNode, ProviderError, ProviderNamespace, ProviderNode, +}; + +pub struct DockerNamespace +where + FS: FileSystem + Send + Sync + Clone, +{ + weak: Weak>, + #[allow(dead_code)] + provider: Weak>, + name: String, + base_dir: PathBuf, + capabilities: ProviderCapabilities, + docker_client: DockerClient, + filesystem: FS, + delete_on_drop: Arc>, + pub(super) nodes: RwLock>>>, +} + +impl DockerNamespace +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + pub(super) async fn new( + provider: &Weak>, + tmp_dir: &PathBuf, + capabilities: &ProviderCapabilities, + docker_client: &DockerClient, + filesystem: &FS, + custom_base_dir: Option<&Path>, + ) -> Result, ProviderError> { + let name = format!("{}{}", NAMESPACE_PREFIX, Uuid::new_v4()); + let base_dir = if let Some(custom_base_dir) = custom_base_dir { + if !filesystem.exists(custom_base_dir).await { + filesystem.create_dir(custom_base_dir).await?; + } else { + warn!( + "⚠️ Using and existing directory {} as base dir", + custom_base_dir.to_string_lossy() + ); + } + PathBuf::from(custom_base_dir) + } else { + let base_dir = PathBuf::from_iter([tmp_dir, &PathBuf::from(&name)]); + filesystem.create_dir(&base_dir).await?; + base_dir + }; + + let namespace = Arc::new_cyclic(|weak| DockerNamespace { + weak: weak.clone(), + provider: provider.clone(), + name, + base_dir, + capabilities: capabilities.clone(), + filesystem: filesystem.clone(), + docker_client: docker_client.clone(), + nodes: RwLock::new(HashMap::new()), + delete_on_drop: Arc::new(Mutex::new(true)), + }); + + namespace.initialize().await?; + + Ok(namespace) + } + + pub(super) async fn attach_to_live( + provider: &Weak>, + capabilities: &ProviderCapabilities, + docker_client: &DockerClient, + filesystem: &FS, + custom_base_dir: &Path, + name: &str, + ) -> Result, ProviderError> { + let base_dir = custom_base_dir.to_path_buf(); + + let namespace = Arc::new_cyclic(|weak| DockerNamespace { + weak: weak.clone(), + provider: provider.clone(), + name: name.to_owned(), + base_dir, + capabilities: capabilities.clone(), + filesystem: filesystem.clone(), + docker_client: docker_client.clone(), + nodes: RwLock::new(HashMap::new()), + delete_on_drop: Arc::new(Mutex::new(false)), + }); + + Ok(namespace) + } + + async fn initialize(&self) -> Result<(), ProviderError> { + // let ns_scripts_shared = PathBuf::from_iter([&self.base_dir, &PathBuf::from("shared-scripts")]); + // self.filesystem.create_dir(&ns_scripts_shared).await?; + self.initialize_zombie_scripts_volume().await?; + self.initialize_helper_binaries_volume().await?; + + Ok(()) + } + + async fn initialize_zombie_scripts_volume(&self) -> Result<(), ProviderError> { + let local_zombie_wrapper_path = + PathBuf::from_iter([&self.base_dir, &PathBuf::from("zombie-wrapper.sh")]); + + self.filesystem + .write( + &local_zombie_wrapper_path, + include_str!("../shared/scripts/zombie-wrapper.sh"), + ) + .await?; + + let local_helper_binaries_downloader_path = PathBuf::from_iter([ + &self.base_dir, + &PathBuf::from("helper-binaries-downloader.sh"), + ]); + + self.filesystem + .write( + &local_helper_binaries_downloader_path, + include_str!("../shared/scripts/helper-binaries-downloader.sh"), + ) + .await?; + + let zombie_wrapper_volume_name = format!("{}-zombie-wrapper", self.name); + let zombie_wrapper_container_name = format!("{}-scripts", self.name); + + self.docker_client + .create_volume(&zombie_wrapper_volume_name) + .await + .map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?; + + self.docker_client + .container_create( + ContainerRunOptions::new("alpine:latest", vec!["tail", "-f", "/dev/null"]) + .volume_mounts(HashMap::from([( + zombie_wrapper_volume_name.as_str(), + "/scripts", + )])) + .name(&zombie_wrapper_container_name) + .rm(), + ) + .await + .map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?; + + // copy the scripts + self.docker_client + .container_cp( + &zombie_wrapper_container_name, + &local_zombie_wrapper_path, + &PathBuf::from("/scripts/zombie-wrapper.sh"), + ) + .await + .map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?; + + self.docker_client + .container_cp( + &zombie_wrapper_container_name, + &local_helper_binaries_downloader_path, + &PathBuf::from("/scripts/helper-binaries-downloader.sh"), + ) + .await + .map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?; + + // set permissions for rwx on whole volume recursively + self.docker_client + .container_run( + ContainerRunOptions::new("alpine:latest", vec!["chmod", "-R", "777", "/scripts"]) + .volume_mounts(HashMap::from([( + zombie_wrapper_volume_name.as_ref(), + "/scripts", + )])) + .rm(), + ) + .await + .map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?; + + Ok(()) + } + + async fn initialize_helper_binaries_volume(&self) -> Result<(), ProviderError> { + let helper_binaries_volume_name = format!("{}-helper-binaries", self.name); + let zombie_wrapper_volume_name = format!("{}-zombie-wrapper", self.name); + + self.docker_client + .create_volume(&helper_binaries_volume_name) + .await + .map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?; + + // download binaries to volume + self.docker_client + .container_run( + ContainerRunOptions::new( + "alpine:latest", + vec!["ash", "/scripts/helper-binaries-downloader.sh"], + ) + .volume_mounts(HashMap::from([ + ( + helper_binaries_volume_name.as_str(), + "/helpers", + ), + ( + zombie_wrapper_volume_name.as_ref(), + "/scripts", + ) + ])) + // wait until complete + .detach(false) + .rm(), + ) + .await + .map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?; + + // set permissions for rwx on whole volume recursively + self.docker_client + .container_run( + ContainerRunOptions::new("alpine:latest", vec!["chmod", "-R", "777", "/helpers"]) + .volume_mounts(HashMap::from([( + helper_binaries_volume_name.as_ref(), + "/helpers", + )])) + .rm(), + ) + .await + .map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?; + + Ok(()) + } + + pub async fn set_delete_on_drop(&self, delete_on_drop: bool) { + *self.delete_on_drop.lock().await = delete_on_drop; + } + + pub async fn delete_on_drop(&self) -> bool { + if let Ok(delete_on_drop) = self.delete_on_drop.try_lock() { + *delete_on_drop + } else { + // if we can't lock just remove the ns + true + } + } +} + +#[async_trait] +impl ProviderNamespace for DockerNamespace +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + fn name(&self) -> &str { + &self.name + } + + fn base_dir(&self) -> &PathBuf { + &self.base_dir + } + + fn capabilities(&self) -> &ProviderCapabilities { + &self.capabilities + } + + fn provider_name(&self) -> &str { + provider::PROVIDER_NAME + } + + async fn detach(&self) { + self.set_delete_on_drop(false).await; + } + + async fn is_detached(&self) -> bool { + self.delete_on_drop().await + } + + async fn nodes(&self) -> HashMap { + self.nodes + .read() + .await + .iter() + .map(|(name, node)| (name.clone(), node.clone() as DynNode)) + .collect() + } + + async fn get_node_available_args( + &self, + (command, image): (String, Option), + ) -> Result { + let node_image = image.expect(&format!("image should be present when getting node available args with docker provider {THIS_IS_A_BUG}")); + + let temp_node = self + .spawn_node( + &SpawnNodeOptions::new(format!("temp-{}", Uuid::new_v4()), "cat".to_string()) + .image(node_image.clone()), + ) + .await?; + + let available_args_output = temp_node + .run_command(RunCommandOptions::new(command.clone()).args(vec!["--help"])) + .await? + .map_err(|(_exit, status)| { + ProviderError::NodeAvailableArgsError(node_image, command, status) + })?; + + temp_node.destroy().await?; + + Ok(available_args_output) + } + + async fn spawn_node(&self, options: &SpawnNodeOptions) -> Result { + debug!("spawn option {:?}", options); + + let node = DockerNode::new(DockerNodeOptions { + namespace: &self.weak, + namespace_base_dir: &self.base_dir, + name: &options.name, + image: options.image.as_ref(), + program: &options.program, + args: &options.args, + env: &options.env, + startup_files: &options.injected_files, + db_snapshot: options.db_snapshot.as_ref(), + docker_client: &self.docker_client, + container_name: format!("{}-{}", self.name, options.name), + filesystem: &self.filesystem, + port_mapping: options.port_mapping.as_ref().unwrap_or(&HashMap::default()), + }) + .await?; + + self.nodes + .write() + .await + .insert(node.name().to_string(), node.clone()); + + Ok(node) + } + + async fn spawn_node_from_json( + &self, + json_value: &serde_json::Value, + ) -> Result { + let deserializable: DeserializableDockerNodeOptions = + serde_json::from_value(json_value.clone())?; + let options = DockerNodeOptions::from_deserializable( + &deserializable, + &self.weak, + &self.base_dir, + &self.docker_client, + &self.filesystem, + ); + + let node = DockerNode::attach_to_live(options).await?; + + self.nodes + .write() + .await + .insert(node.name().to_string(), node.clone()); + + Ok(node) + } + + async fn generate_files(&self, options: GenerateFilesOptions) -> Result<(), ProviderError> { + debug!("generate files options {options:#?}"); + + let node_name = options + .temp_name + .unwrap_or_else(|| format!("temp-{}", Uuid::new_v4())); + let node_image = options.image.expect(&format!( + "image should be present when generating files with docker provider {THIS_IS_A_BUG}" + )); + + // run dummy command in a new container + let temp_node = self + .spawn_node( + &SpawnNodeOptions::new(node_name, "cat".to_string()) + .injected_files(options.injected_files) + .image(node_image), + ) + .await?; + + for GenerateFileCommand { + program, + args, + env, + local_output_path, + } in options.commands + { + let local_output_full_path = format!( + "{}{}{}", + self.base_dir.to_string_lossy(), + if local_output_path.starts_with("/") { + "" + } else { + "/" + }, + local_output_path.to_string_lossy() + ); + + let contents = extract_execution_result( + &temp_node, + RunCommandOptions { program, args, env }, + options.expected_path.as_ref(), + ) + .await?; + self.filesystem + .write(local_output_full_path, contents) + .await + .map_err(|err| ProviderError::FileGenerationFailed(err.into()))?; + } + + temp_node.destroy().await + } + + async fn static_setup(&self) -> Result<(), ProviderError> { + todo!() + } + + async fn destroy(&self) -> Result<(), ProviderError> { + let _ = self + .docker_client + .namespaced_containers_rm(&self.name) + .await + .map_err(|err| ProviderError::DeleteNamespaceFailed(self.name.clone(), err.into()))?; + + if let Some(provider) = self.provider.upgrade() { + provider.namespaces.write().await.remove(&self.name); + } + + Ok(()) + } +} + +impl Drop for DockerNamespace +where + FS: FileSystem + Send + Sync + Clone, +{ + fn drop(&mut self) { + let ns_name = self.name.clone(); + if let Ok(delete_on_drop) = self.delete_on_drop.try_lock() { + if *delete_on_drop { + let client = self.docker_client.clone(); + let provider = self.provider.upgrade(); + + let handler = thread::spawn(move || { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async move { + trace!("🧟 deleting ns {ns_name} from cluster"); + let _ = client.namespaced_containers_rm(&ns_name).await; + trace!("✅ deleted"); + }); + }); + + if handler.join().is_ok() { + if let Some(provider) = provider { + if let Ok(mut p) = provider.namespaces.try_write() { + p.remove(&self.name); + } else { + warn!( + "⚠️ Can not acquire write lock to the provider, ns {} not removed", + self.name + ); + } + } + } + } else { + trace!("⚠️ leaking ns {ns_name} in cluster"); + } + }; + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/docker/node.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/docker/node.rs new file mode 100644 index 00000000..72a4076c --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/docker/node.rs @@ -0,0 +1,659 @@ +use std::{ + collections::HashMap, + net::IpAddr, + path::{Component, Path, PathBuf}, + sync::{Arc, Weak}, + time::Duration, +}; + +use anyhow::anyhow; +use async_trait::async_trait; +use configuration::types::AssetLocation; +use futures::future::try_join_all; +use serde::{Deserialize, Serialize}; +use support::{constants::THIS_IS_A_BUG, fs::FileSystem}; +use tokio::{time::sleep, try_join}; +use tracing::debug; + +use super::{ + client::{ContainerRunOptions, DockerClient}, + namespace::DockerNamespace, +}; +use crate::{ + constants::{NODE_CONFIG_DIR, NODE_DATA_DIR, NODE_RELAY_DATA_DIR, NODE_SCRIPTS_DIR}, + docker, + types::{ExecutionResult, Port, RunCommandOptions, RunScriptOptions, TransferedFile}, + ProviderError, ProviderNamespace, ProviderNode, +}; + +pub(super) struct DockerNodeOptions<'a, FS> +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + pub(super) namespace: &'a Weak>, + pub(super) namespace_base_dir: &'a PathBuf, + pub(super) name: &'a str, + pub(super) image: Option<&'a String>, + pub(super) program: &'a str, + pub(super) args: &'a [String], + pub(super) env: &'a [(String, String)], + pub(super) startup_files: &'a [TransferedFile], + pub(super) db_snapshot: Option<&'a AssetLocation>, + pub(super) docker_client: &'a DockerClient, + pub(super) container_name: String, + pub(super) filesystem: &'a FS, + pub(super) port_mapping: &'a HashMap, +} + +impl<'a, FS> DockerNodeOptions<'a, FS> +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + pub fn from_deserializable( + deserializable: &'a DeserializableDockerNodeOptions, + namespace: &'a Weak>, + namespace_base_dir: &'a PathBuf, + docker_client: &'a DockerClient, + filesystem: &'a FS, + ) -> Self { + DockerNodeOptions { + namespace, + namespace_base_dir, + name: &deserializable.name, + image: deserializable.image.as_ref(), + program: &deserializable.program, + args: &deserializable.args, + env: &deserializable.env, + startup_files: &[], + db_snapshot: None, + docker_client, + container_name: deserializable.container_name.clone(), + filesystem, + port_mapping: &deserializable.port_mapping, + } + } +} + +#[derive(Deserialize)] +pub(super) struct DeserializableDockerNodeOptions { + pub(super) name: String, + pub(super) image: Option, + pub(super) program: String, + pub(super) args: Vec, + pub(super) env: Vec<(String, String)>, + pub(super) container_name: String, + pub(super) port_mapping: HashMap, +} + +#[derive(Serialize)] +pub struct DockerNode +where + FS: FileSystem + Send + Sync + Clone, +{ + #[serde(skip)] + namespace: Weak>, + name: String, + image: String, + program: String, + args: Vec, + env: Vec<(String, String)>, + base_dir: PathBuf, + config_dir: PathBuf, + data_dir: PathBuf, + relay_data_dir: PathBuf, + scripts_dir: PathBuf, + log_path: PathBuf, + #[serde(skip)] + docker_client: DockerClient, + container_name: String, + port_mapping: HashMap, + #[allow(dead_code)] + #[serde(skip)] + filesystem: FS, + provider_tag: String, +} + +impl DockerNode +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + pub(super) async fn new( + options: DockerNodeOptions<'_, FS>, + ) -> Result, ProviderError> { + let image = options.image.ok_or_else(|| { + ProviderError::MissingNodeInfo(options.name.to_string(), "missing image".to_string()) + })?; + + let filesystem = options.filesystem.clone(); + + let base_dir = + PathBuf::from_iter([options.namespace_base_dir, &PathBuf::from(options.name)]); + filesystem.create_dir_all(&base_dir).await?; + + let base_dir_raw = base_dir.to_string_lossy(); + let config_dir = PathBuf::from(format!("{base_dir_raw}{NODE_CONFIG_DIR}")); + let data_dir = PathBuf::from(format!("{base_dir_raw}{NODE_DATA_DIR}")); + let relay_data_dir = PathBuf::from(format!("{base_dir_raw}{NODE_RELAY_DATA_DIR}")); + let scripts_dir = PathBuf::from(format!("{base_dir_raw}{NODE_SCRIPTS_DIR}")); + let log_path = base_dir.join("node.log"); + + try_join!( + filesystem.create_dir_all(&config_dir), + filesystem.create_dir_all(&data_dir), + filesystem.create_dir_all(&relay_data_dir), + filesystem.create_dir_all(&scripts_dir), + )?; + + let node = Arc::new(DockerNode { + namespace: options.namespace.clone(), + name: options.name.to_string(), + image: image.to_string(), + program: options.program.to_string(), + args: options.args.to_vec(), + env: options.env.to_vec(), + base_dir, + config_dir, + data_dir, + relay_data_dir, + scripts_dir, + log_path, + filesystem: filesystem.clone(), + docker_client: options.docker_client.clone(), + container_name: options.container_name, + port_mapping: options.port_mapping.clone(), + provider_tag: docker::provider::PROVIDER_NAME.to_string(), + }); + + node.initialize_docker().await?; + + if let Some(db_snap) = options.db_snapshot { + node.initialize_db_snapshot(db_snap).await?; + } + + node.initialize_startup_files(options.startup_files).await?; + + node.start().await?; + + Ok(node) + } + + pub(super) async fn attach_to_live( + options: DockerNodeOptions<'_, FS>, + ) -> Result, ProviderError> { + let image = options.image.ok_or_else(|| { + ProviderError::MissingNodeInfo(options.name.to_string(), "missing image".to_string()) + })?; + + let filesystem = options.filesystem.clone(); + + let base_dir = + PathBuf::from_iter([options.namespace_base_dir, &PathBuf::from(options.name)]); + filesystem.create_dir_all(&base_dir).await?; + + let base_dir_raw = base_dir.to_string_lossy(); + let config_dir = PathBuf::from(format!("{base_dir_raw}{NODE_CONFIG_DIR}")); + let data_dir = PathBuf::from(format!("{base_dir_raw}{NODE_DATA_DIR}")); + let relay_data_dir = PathBuf::from(format!("{base_dir_raw}{NODE_RELAY_DATA_DIR}")); + let scripts_dir = PathBuf::from(format!("{base_dir_raw}{NODE_SCRIPTS_DIR}")); + let log_path = base_dir.join("node.log"); + + let node = Arc::new(DockerNode { + namespace: options.namespace.clone(), + name: options.name.to_string(), + image: image.to_string(), + program: options.program.to_string(), + args: options.args.to_vec(), + env: options.env.to_vec(), + base_dir, + config_dir, + data_dir, + relay_data_dir, + scripts_dir, + log_path, + filesystem: filesystem.clone(), + docker_client: options.docker_client.clone(), + container_name: options.container_name, + port_mapping: options.port_mapping.clone(), + provider_tag: docker::provider::PROVIDER_NAME.to_string(), + }); + + Ok(node) + } + + async fn initialize_docker(&self) -> Result<(), ProviderError> { + let command = [vec![self.program.to_string()], self.args.to_vec()].concat(); + + self.docker_client + .container_run( + ContainerRunOptions::new(&self.image, command) + .name(&self.container_name) + .env(self.env.clone()) + .volume_mounts(HashMap::from([ + ( + format!("{}-zombie-wrapper", self.namespace_name(),), + "/scripts".to_string(), + ), + ( + format!("{}-helper-binaries", self.namespace_name()), + "/helpers".to_string(), + ), + ( + self.config_dir.to_string_lossy().into_owned(), + "/cfg".to_string(), + ), + ( + self.data_dir.to_string_lossy().into_owned(), + "/data".to_string(), + ), + ( + self.relay_data_dir.to_string_lossy().into_owned(), + "/relay-data".to_string(), + ), + ])) + .entrypoint("/scripts/zombie-wrapper.sh") + .port_mapping(&self.port_mapping), + ) + .await + .map_err(|err| ProviderError::NodeSpawningFailed(self.name.clone(), err.into()))?; + + // change dirs permission + let _ = self + .docker_client + .container_exec( + &self.container_name, + ["chmod", "777", "/cfg", "/data", "/relay-data"].into(), + None, + Some("root"), + ) + .await + .map_err(|err| ProviderError::NodeSpawningFailed(self.name.clone(), err.into()))?; + + Ok(()) + } + + async fn initialize_db_snapshot( + &self, + _db_snapshot: &AssetLocation, + ) -> Result<(), ProviderError> { + todo!() + // trace!("snap: {db_snapshot}"); + // let url_of_snap = match db_snapshot { + // AssetLocation::Url(location) => location.clone(), + // AssetLocation::FilePath(filepath) => self.upload_to_fileserver(filepath).await?, + // }; + + // // we need to get the snapshot from a public access + // // and extract to /data + // let opts = RunCommandOptions::new("mkdir").args([ + // "-p", + // "/data/", + // "&&", + // "mkdir", + // "-p", + // "/relay-data/", + // "&&", + // // Use our version of curl + // "/cfg/curl", + // url_of_snap.as_ref(), + // "--output", + // "/data/db.tgz", + // "&&", + // "cd", + // "/", + // "&&", + // "tar", + // "--skip-old-files", + // "-xzvf", + // "/data/db.tgz", + // ]); + + // trace!("cmd opts: {:#?}", opts); + // let _ = self.run_command(opts).await?; + + // Ok(()) + } + + async fn initialize_startup_files( + &self, + startup_files: &[TransferedFile], + ) -> Result<(), ProviderError> { + try_join_all( + startup_files + .iter() + .map(|file| self.send_file(&file.local_path, &file.remote_path, &file.mode)), + ) + .await?; + + Ok(()) + } + + pub(super) async fn start(&self) -> Result<(), ProviderError> { + self.docker_client + .container_exec( + &self.container_name, + vec!["sh", "-c", "echo start > /tmp/zombiepipe"], + None, + None, + ) + .await + .map_err(|err| { + ProviderError::NodeSpawningFailed( + format!("failed to start pod {} after spawning", self.name), + err.into(), + ) + })? + .map_err(|err| { + ProviderError::NodeSpawningFailed( + format!("failed to start pod {} after spawning", self.name,), + anyhow!("command failed in container: status {}: {}", err.0, err.1), + ) + })?; + + Ok(()) + } + + fn get_remote_parent_dir(&self, remote_file_path: &Path) -> Option { + if let Some(remote_parent_dir) = remote_file_path.parent() { + if matches!( + remote_parent_dir.components().rev().peekable().peek(), + Some(Component::Normal(_)) + ) { + return Some(remote_parent_dir.to_path_buf()); + } + } + + None + } + + async fn create_remote_dir(&self, remote_dir: &Path) -> Result<(), ProviderError> { + let _ = self + .docker_client + .container_exec( + &self.container_name, + vec!["mkdir", "-p", &remote_dir.to_string_lossy()], + None, + None, + ) + .await + .map_err(|err| { + ProviderError::NodeSpawningFailed( + format!( + "failed to create dir {} for container {}", + remote_dir.to_string_lossy(), + &self.name + ), + err.into(), + ) + })?; + + Ok(()) + } + + fn namespace_name(&self) -> String { + self.namespace + .upgrade() + .map(|namespace| namespace.name().to_string()) + .unwrap_or_else(|| panic!("namespace shouldn't be dropped, {THIS_IS_A_BUG}")) + } +} + +#[async_trait] +impl ProviderNode for DockerNode +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + fn name(&self) -> &str { + &self.name + } + + fn args(&self) -> Vec<&str> { + self.args.iter().map(|arg| arg.as_str()).collect() + } + + fn base_dir(&self) -> &PathBuf { + &self.base_dir + } + + fn config_dir(&self) -> &PathBuf { + &self.config_dir + } + + fn data_dir(&self) -> &PathBuf { + &self.data_dir + } + + fn relay_data_dir(&self) -> &PathBuf { + &self.relay_data_dir + } + + fn scripts_dir(&self) -> &PathBuf { + &self.scripts_dir + } + + fn log_path(&self) -> &PathBuf { + &self.log_path + } + + fn log_cmd(&self) -> String { + format!( + "{} logs -f {}", + self.docker_client.client_binary(), + self.container_name + ) + } + + fn path_in_node(&self, file: &Path) -> PathBuf { + // here is just a noop op since we will receive the path + // for the file inside the pod + PathBuf::from(file) + } + + async fn logs(&self) -> Result { + self.docker_client + .container_logs(&self.container_name) + .await + .map_err(|err| ProviderError::GetLogsFailed(self.name.to_string(), err.into())) + } + + async fn dump_logs(&self, local_dest: PathBuf) -> Result<(), ProviderError> { + let logs = self.logs().await?; + + self.filesystem + .write(local_dest, logs) + .await + .map_err(|err| ProviderError::DumpLogsFailed(self.name.to_string(), err.into()))?; + + Ok(()) + } + + async fn run_command( + &self, + options: RunCommandOptions, + ) -> Result { + debug!( + "running command for {} with options {:?}", + self.name, options + ); + let command = [vec![options.program], options.args].concat(); + + self.docker_client + .container_exec( + &self.container_name, + vec!["sh", "-c", &command.join(" ")], + Some( + options + .env + .iter() + .map(|(k, v)| (k.as_ref(), v.as_ref())) + .collect(), + ), + None, + ) + .await + .map_err(|err| { + ProviderError::RunCommandError( + format!("sh -c {}", &command.join(" ")), + format!("in pod {}", self.name), + err.into(), + ) + }) + } + + async fn run_script( + &self, + _options: RunScriptOptions, + ) -> Result { + todo!() + } + + async fn send_file( + &self, + local_file_path: &Path, + remote_file_path: &Path, + mode: &str, + ) -> Result<(), ProviderError> { + if let Some(remote_parent_dir) = self.get_remote_parent_dir(remote_file_path) { + self.create_remote_dir(&remote_parent_dir).await?; + } + + debug!( + "starting sending file for {}: {} to {} with mode {}", + self.name, + local_file_path.to_string_lossy(), + remote_file_path.to_string_lossy(), + mode + ); + + let _ = self + .docker_client + .container_cp(&self.container_name, local_file_path, remote_file_path) + .await + .map_err(|err| { + ProviderError::SendFile( + local_file_path.to_string_lossy().to_string(), + self.name.clone(), + err.into(), + ) + }); + + let _ = self + .docker_client + .container_exec( + &self.container_name, + vec!["chmod", mode, &remote_file_path.to_string_lossy()], + None, + None, + ) + .await + .map_err(|err| { + ProviderError::SendFile( + self.name.clone(), + local_file_path.to_string_lossy().to_string(), + err.into(), + ) + })?; + + Ok(()) + } + + async fn receive_file( + &self, + _remote_src: &Path, + _local_dest: &Path, + ) -> Result<(), ProviderError> { + Ok(()) + } + + async fn ip(&self) -> Result { + let ip = self + .docker_client + .container_ip(&self.container_name) + .await + .map_err(|err| { + ProviderError::InvalidConfig(format!("Error getting container ip, err: {err}")) + })?; + + Ok(ip.parse::().map_err(|err| { + ProviderError::InvalidConfig(format!( + "Can not parse the container ip: {ip}, err: {err}" + )) + })?) + } + + async fn pause(&self) -> Result<(), ProviderError> { + self.docker_client + .container_exec( + &self.container_name, + vec!["sh", "-c", "echo pause > /tmp/zombiepipe"], + None, + None, + ) + .await + .map_err(|err| ProviderError::PauseNodeFailed(self.name.to_string(), err.into()))? + .map_err(|err| { + ProviderError::PauseNodeFailed( + self.name.to_string(), + anyhow!("error when pausing node: status {}: {}", err.0, err.1), + ) + })?; + + Ok(()) + } + + async fn resume(&self) -> Result<(), ProviderError> { + self.docker_client + .container_exec( + &self.container_name, + vec!["sh", "-c", "echo resume > /tmp/zombiepipe"], + None, + None, + ) + .await + .map_err(|err| ProviderError::PauseNodeFailed(self.name.to_string(), err.into()))? + .map_err(|err| { + ProviderError::PauseNodeFailed( + self.name.to_string(), + anyhow!("error when pausing node: status {}: {}", err.0, err.1), + ) + })?; + + Ok(()) + } + + async fn restart(&self, after: Option) -> Result<(), ProviderError> { + if let Some(duration) = after { + sleep(duration).await; + } + + self.docker_client + .container_exec( + &self.container_name, + vec!["sh", "-c", "echo restart > /tmp/zombiepipe"], + None, + None, + ) + .await + .map_err(|err| ProviderError::PauseNodeFailed(self.name.to_string(), err.into()))? + .map_err(|err| { + ProviderError::PauseNodeFailed( + self.name.to_string(), + anyhow!("error when pausing node: status {}: {}", err.0, err.1), + ) + })?; + + Ok(()) + } + + async fn destroy(&self) -> Result<(), ProviderError> { + self.docker_client + .container_rm(&self.container_name) + .await + .map_err(|err| ProviderError::KillNodeFailed(self.name.to_string(), err.into()))?; + + if let Some(namespace) = self.namespace.upgrade() { + namespace.nodes.write().await.remove(&self.name); + } + + Ok(()) + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/docker/provider.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/docker/provider.rs new file mode 100644 index 00000000..0beaf85a --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/docker/provider.rs @@ -0,0 +1,161 @@ +use std::{ + collections::HashMap, + path::{Path, PathBuf}, + sync::{Arc, Weak}, +}; + +use async_trait::async_trait; +use support::fs::FileSystem; +use tokio::sync::RwLock; + +use super::{client::DockerClient, namespace::DockerNamespace}; +use crate::{ + shared::helpers::extract_namespace_info, types::ProviderCapabilities, DynNamespace, Provider, + ProviderError, ProviderNamespace, +}; + +pub const PROVIDER_NAME: &str = "docker"; + +pub struct DockerProvider +where + FS: FileSystem + Send + Sync + Clone, +{ + weak: Weak>, + capabilities: ProviderCapabilities, + tmp_dir: PathBuf, + docker_client: DockerClient, + filesystem: FS, + pub(super) namespaces: RwLock>>>, +} + +impl DockerProvider +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + pub async fn new(filesystem: FS) -> Arc { + let docker_client = DockerClient::new().await.unwrap(); + + let provider = Arc::new_cyclic(|weak| DockerProvider { + weak: weak.clone(), + capabilities: ProviderCapabilities { + requires_image: true, + has_resources: false, + prefix_with_full_path: false, + use_default_ports_in_cmd: true, + }, + tmp_dir: std::env::temp_dir(), + docker_client, + filesystem, + namespaces: RwLock::new(HashMap::new()), + }); + + let cloned_provider = provider.clone(); + tokio::spawn(async move { + tokio::signal::ctrl_c().await.unwrap(); + for (_, ns) in cloned_provider.namespaces().await { + if ns.is_detached().await { + // best effort + let _ = ns.destroy().await; + } + } + + // exit the process (130, SIGINT) + std::process::exit(130) + }); + + provider + } + + pub fn tmp_dir(mut self, tmp_dir: impl Into) -> Self { + self.tmp_dir = tmp_dir.into(); + self + } +} + +#[async_trait] +impl Provider for DockerProvider +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + fn name(&self) -> &str { + PROVIDER_NAME + } + + fn capabilities(&self) -> &ProviderCapabilities { + &self.capabilities + } + + async fn namespaces(&self) -> HashMap { + self.namespaces + .read() + .await + .iter() + .map(|(name, namespace)| (name.clone(), namespace.clone() as DynNamespace)) + .collect() + } + + async fn create_namespace(&self) -> Result { + let namespace = DockerNamespace::new( + &self.weak, + &self.tmp_dir, + &self.capabilities, + &self.docker_client, + &self.filesystem, + None, + ) + .await?; + + self.namespaces + .write() + .await + .insert(namespace.name().to_string(), namespace.clone()); + + Ok(namespace) + } + + async fn create_namespace_with_base_dir( + &self, + base_dir: &Path, + ) -> Result { + let namespace = DockerNamespace::new( + &self.weak, + &self.tmp_dir, + &self.capabilities, + &self.docker_client, + &self.filesystem, + Some(base_dir), + ) + .await?; + + self.namespaces + .write() + .await + .insert(namespace.name().to_string(), namespace.clone()); + + Ok(namespace) + } + + async fn create_namespace_from_json( + &self, + json_value: &serde_json::Value, + ) -> Result { + let (base_dir, name) = extract_namespace_info(json_value)?; + + let namespace = DockerNamespace::attach_to_live( + &self.weak, + &self.capabilities, + &self.docker_client, + &self.filesystem, + &base_dir, + &name, + ) + .await?; + + self.namespaces + .write() + .await + .insert(namespace.name().to_string(), namespace.clone()); + + Ok(namespace) + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes.rs new file mode 100644 index 00000000..7c9e208e --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes.rs @@ -0,0 +1,7 @@ +mod client; +mod namespace; +mod node; +mod pod_spec_builder; +mod provider; + +pub use provider::KubernetesProvider; diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/client.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/client.rs new file mode 100644 index 00000000..41bffc11 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/client.rs @@ -0,0 +1,602 @@ +use std::{ + collections::BTreeMap, fmt::Debug, os::unix::process::ExitStatusExt, process::ExitStatus, + time::Duration, +}; + +use anyhow::anyhow; +use futures::{StreamExt, TryStreamExt}; +use k8s_openapi::api::core::v1::{ + ConfigMap, Namespace, Pod, PodSpec, PodStatus, Service, ServiceSpec, +}; +use kube::{ + api::{AttachParams, DeleteParams, ListParams, LogParams, PostParams, WatchParams}, + core::{DynamicObject, GroupVersionKind, ObjectMeta, TypeMeta, WatchEvent}, + discovery::ApiResource, + runtime::{conditions, wait::await_condition}, + Api, Resource, +}; +use serde::de::DeserializeOwned; +use support::constants::THIS_IS_A_BUG; +use tokio::{ + io::{AsyncRead, ErrorKind}, + net::TcpListener, + task::JoinHandle, +}; +use tokio_util::compat::FuturesAsyncReadCompatExt; +use tracing::{debug, trace}; + +use crate::{constants::LOCALHOST, types::ExecutionResult}; + +#[derive(thiserror::Error, Debug)] +#[error(transparent)] +pub struct Error(#[from] anyhow::Error); + +pub type Result = core::result::Result; + +#[derive(Clone)] +pub struct KubernetesClient { + inner: kube::Client, +} + +impl KubernetesClient { + pub(super) async fn new() -> Result { + Ok(Self { + // TODO: make it more flexible with path to kube config + inner: kube::Client::try_default() + .await + .map_err(|err| Error::from(anyhow!("error initializing kubers client: {err}")))?, + }) + } + + #[allow(dead_code)] + pub(super) async fn get_namespace(&self, name: &str) -> Result> { + Api::::all(self.inner.clone()) + .get_opt(name.as_ref()) + .await + .map_err(|err| Error::from(anyhow!("error while getting namespace {name}: {err}"))) + } + + #[allow(dead_code)] + pub(super) async fn get_namespaces(&self) -> Result> { + Ok(Api::::all(self.inner.clone()) + .list(&ListParams::default()) + .await + .map_err(|err| Error::from(anyhow!("error while getting all namespaces: {err}")))? + .into_iter() + .filter(|ns| matches!(&ns.meta().name, Some(name) if name.starts_with("zombienet"))) + .collect()) + } + + pub(super) async fn create_namespace( + &self, + name: &str, + labels: BTreeMap, + ) -> Result { + let namespaces = Api::::all(self.inner.clone()); + + let namespace = Namespace { + metadata: ObjectMeta { + name: Some(name.to_string()), + labels: Some(labels), + ..Default::default() + }, + ..Default::default() + }; + + namespaces + .create(&PostParams::default(), &namespace) + .await + .map_err(|err| Error::from(anyhow!("error while created namespace {name}: {err}")))?; + + self.wait_created(namespaces, name).await?; + + Ok(namespace) + } + + pub(super) async fn delete_namespace(&self, name: &str) -> Result<()> { + let namespaces = Api::::all(self.inner.clone()); + + namespaces + .delete(name, &DeleteParams::default()) + .await + .map_err(|err| Error::from(anyhow!("error while deleting namespace {name}: {err}")))?; + + Ok(()) + } + + pub(super) async fn create_config_map_from_file( + &self, + namespace: &str, + name: &str, + file_name: &str, + file_contents: &str, + labels: BTreeMap, + ) -> Result { + let config_maps = Api::::namespaced(self.inner.clone(), namespace); + + let config_map = ConfigMap { + metadata: ObjectMeta { + name: Some(name.to_string()), + namespace: Some(namespace.to_string()), + labels: Some(labels), + ..Default::default() + }, + data: Some(BTreeMap::from([( + file_name.to_string(), + file_contents.to_string(), + )])), + ..Default::default() + }; + + config_maps + .create(&PostParams::default(), &config_map) + .await + .map_err(|err| { + Error::from(anyhow!( + "error while creating config map {name} for {file_name}: {err}" + )) + })?; + + self.wait_created(config_maps, name).await?; + + Ok(config_map) + } + + pub(super) async fn create_pod( + &self, + namespace: &str, + name: &str, + spec: PodSpec, + labels: BTreeMap, + ) -> Result { + let pods = Api::::namespaced(self.inner.clone(), namespace); + + let pod = Pod { + metadata: ObjectMeta { + name: Some(name.to_string()), + namespace: Some(namespace.to_string()), + labels: Some(labels), + ..Default::default() + }, + spec: Some(spec), + ..Default::default() + }; + + pods.create(&PostParams::default(), &pod) + .await + .map_err(|err| Error::from(anyhow!("error while creating pod {name}: {err}")))?; + + trace!("Pod {name} checking for ready state!"); + let wait_ready = await_condition(pods, name, helpers::is_pod_ready()); + // TODO: we should use the `node_spawn_timeout` from global settings here. + let _ = tokio::time::timeout(Duration::from_secs(600), wait_ready) + .await + .map_err(|err| { + Error::from(anyhow!("error while awaiting pod {name} running: {err}")) + })?; + + debug!("Pod {name} is Ready!"); + Ok(pod) + } + + pub(super) async fn pod_logs(&self, namespace: &str, name: &str) -> Result { + Api::::namespaced(self.inner.clone(), namespace) + .logs( + name, + &LogParams { + pretty: true, + timestamps: true, + ..Default::default() + }, + ) + .await + .map_err(|err| Error::from(anyhow!("error while getting logs for pod {name}: {err}"))) + } + + pub(super) async fn pod_status(&self, namespace: &str, name: &str) -> Result { + let pod = Api::::namespaced(self.inner.clone(), namespace) + .get(name) + .await + .map_err(|err| Error::from(anyhow!("error while getting pod {name}: {err}")))?; + + let status = pod.status.ok_or(Error::from(anyhow!( + "error while getting status for pod {name}" + )))?; + Ok(status) + } + + #[allow(dead_code)] + pub(super) async fn create_pod_logs_stream( + &self, + namespace: &str, + name: &str, + ) -> Result> { + Ok(Box::new( + Api::::namespaced(self.inner.clone(), namespace) + .log_stream( + name, + &LogParams { + follow: true, + pretty: true, + timestamps: true, + ..Default::default() + }, + ) + .await + .map_err(|err| { + Error::from(anyhow!( + "error while getting a log stream for {name}: {err}" + )) + })? + .compat(), + )) + } + + pub(super) async fn pod_exec( + &self, + namespace: &str, + name: &str, + command: Vec, + ) -> Result + where + S: Into + std::fmt::Debug + Send, + { + trace!("running command: {command:?} on pod {name} for ns {namespace}"); + let mut process = Api::::namespaced(self.inner.clone(), namespace) + .exec( + name, + command, + &AttachParams::default().stdout(true).stderr(true), + ) + .await + .map_err(|err| Error::from(anyhow!("error while exec in the pod {name}: {err}")))?; + + let stdout_stream = process.stdout().expect(&format!( + "stdout shouldn't be None when true passed to exec {THIS_IS_A_BUG}" + )); + let stdout = tokio_util::io::ReaderStream::new(stdout_stream) + .filter_map(|r| async { r.ok().and_then(|v| String::from_utf8(v.to_vec()).ok()) }) + .collect::>() + .await + .join(""); + let stderr_stream = process.stderr().expect(&format!( + "stderr shouldn't be None when true passed to exec {THIS_IS_A_BUG}" + )); + let stderr = tokio_util::io::ReaderStream::new(stderr_stream) + .filter_map(|r| async { r.ok().and_then(|v| String::from_utf8(v.to_vec()).ok()) }) + .collect::>() + .await + .join(""); + + let status = process + .take_status() + .expect(&format!( + "first call to status shouldn't fail {THIS_IS_A_BUG}" + )) + .await; + + // await process to finish + process.join().await.map_err(|err| { + Error::from(anyhow!( + "error while joining process during exec for {name}: {err}" + )) + })?; + + match status { + // command succeeded with stdout + Some(status) if status.status.as_ref().is_some_and(|s| s == "Success") => { + Ok(Ok(stdout)) + }, + // command failed + Some(status) if status.status.as_ref().is_some_and(|s| s == "Failure") => { + match status.reason { + // due to exit code + Some(reason) if reason == "NonZeroExitCode" => { + let exit_status = status + .details + .and_then(|details| { + details.causes.and_then(|causes| { + causes.first().and_then(|cause| { + cause.message.as_deref().and_then(|message| { + message.parse::().ok().map(ExitStatus::from_raw) + }) + }) + }) + }) + .expect( + &format!("command with non-zero exit code should have exit code present {THIS_IS_A_BUG}") + ); + + Ok(Err((exit_status, stderr))) + }, + // due to other unknown reason + Some(ref reason) => Err(Error::from(anyhow!( + format!("unhandled reason while exec for {name}: {reason}, stderr: {stderr}, status: {status:?}") + ))), + None => { + panic!("command had status but no reason was present, this is a bug") + }, + } + }, + Some(_) => { + unreachable!("command had status but it didn't matches either Success or Failure, this is a bug from the kube.rs library itself"); + }, + None => { + panic!("command has no status following its execution, this is a bug"); + }, + } + } + + pub(super) async fn delete_pod(&self, namespace: &str, name: &str) -> Result<()> { + let pods = Api::::namespaced(self.inner.clone(), namespace); + + pods.delete(name, &DeleteParams::default()) + .await + .map_err(|err| Error::from(anyhow!("error when deleting pod {name}: {err}")))?; + + await_condition(pods, name, conditions::is_deleted(name)) + .await + .map_err(|err| { + Error::from(anyhow!( + "error when waiting for pod {name} to be deleted: {err}" + )) + })?; + + Ok(()) + } + + pub(super) async fn create_service( + &self, + namespace: &str, + name: &str, + spec: ServiceSpec, + labels: BTreeMap, + ) -> Result { + let services = Api::::namespaced(self.inner.clone(), namespace); + + let service = Service { + metadata: ObjectMeta { + name: Some(name.to_string()), + namespace: Some(namespace.to_string()), + labels: Some(labels), + ..Default::default() + }, + spec: Some(spec), + ..Default::default() + }; + + services + .create(&PostParams::default(), &service) + .await + .map_err(|err| Error::from(anyhow!("error while creating service {name}: {err}")))?; + + Ok(service) + } + + pub(super) async fn create_pod_port_forward( + &self, + namespace: &str, + name: &str, + local_port: u16, + remote_port: u16, + ) -> Result<(u16, JoinHandle<()>)> { + let pods = Api::::namespaced(self.inner.clone(), namespace); + let bind = TcpListener::bind((LOCALHOST, local_port)) + .await + .map_err(|err| { + Error::from(anyhow!( + "error binding port {local_port} for pod {name}: {err}" + )) + })?; + let local_port = bind.local_addr().map_err(|err| Error(err.into()))?.port(); + let name = name.to_string(); + + const MAX_FAILURES: usize = 5; + let monitor_handle = tokio::spawn(async move { + let mut consecutive_failures = 0; + loop { + let (mut client_conn, _) = match bind.accept().await { + Ok(conn) => { + consecutive_failures = 0; + conn + }, + Err(e) => { + if consecutive_failures < MAX_FAILURES { + trace!("Port-forward accept error: {e:?}, retrying in 1s"); + tokio::time::sleep(Duration::from_secs(1)).await; + consecutive_failures += 1; + continue; + } else { + trace!("Port-forward accept failed too many times, giving up"); + break; + } + }, + }; + + let peer = match client_conn.peer_addr() { + Ok(addr) => addr, + Err(e) => { + trace!("Failed to get peer address: {e:?}"); + break; + }, + }; + + trace!("new connection on local_port: {local_port}, peer: {peer}"); + let (name, pods) = (name.clone(), pods.clone()); + + tokio::spawn(async move { + loop { + // Try to establish port-forward + let mut forwarder = match pods.portforward(&name, &[remote_port]).await { + Ok(f) => { + consecutive_failures = 0; + f + }, + Err(e) => { + consecutive_failures += 1; + if consecutive_failures < MAX_FAILURES { + trace!("portforward failed to establish ({}/{}): {e:?}, retrying in 1s", + consecutive_failures, MAX_FAILURES); + tokio::time::sleep(Duration::from_secs(1)).await; + continue; + } else { + trace!("portforward failed to establish after {} attempts: {e:?}, closing connection", + consecutive_failures); + break; + } + }, + }; + + let mut upstream_conn = match forwarder.take_stream(remote_port) { + Some(s) => s, + None => { + trace!("Failed to take stream for remote_port: {remote_port}, retrying in 1s"); + tokio::time::sleep(Duration::from_secs(1)).await; + continue; + }, + }; + + match tokio::io::copy_bidirectional(&mut client_conn, &mut upstream_conn) + .await + { + Ok((_n1, _n2)) => { + // EOF reached, close connection + trace!("copy_bidirectional finished (EOF), closing connection"); + + drop(upstream_conn); + let _ = forwarder.join().await; + + break; + }, + Err(e) => { + let kind = e.kind(); + match kind { + ErrorKind::ConnectionReset + | ErrorKind::ConnectionAborted + | ErrorKind::ConnectionRefused + | ErrorKind::TimedOut => { + consecutive_failures += 1; + if consecutive_failures < MAX_FAILURES { + trace!("Network error ({kind:?}): {e:?}, retrying port-forward for this connection"); + tokio::time::sleep(Duration::from_secs(1)).await; + continue; + } else { + trace!("portforward failed to establish after {} attempts: {e:?}, closing connection", + consecutive_failures); + break; + } + }, + _ => { + trace!("Non-network error ({kind:?}): {e:?}, closing connection"); + break; + }, + } + }, + } + } + }); + + trace!("finished forwarder process for local port: {local_port}, peer: {peer}"); + } + }); + + Ok((local_port, monitor_handle)) + } + + /// Create resources from yamls in `static-configs` directory + pub(super) async fn create_static_resource( + &self, + namespace: &str, + raw_manifest: &str, + ) -> Result<()> { + let tm: TypeMeta = serde_yaml::from_str(raw_manifest).map_err(|err| { + Error::from(anyhow!( + "error while extracting TypeMeta from manifest: {raw_manifest}: {err}" + )) + })?; + let gvk = GroupVersionKind::try_from(&tm).map_err(|err| { + Error::from(anyhow!( + "error while extracting GroupVersionKind from manifest: {raw_manifest}: {err}" + )) + })?; + + let ar = ApiResource::from_gvk(&gvk); + let api: Api = Api::namespaced_with(self.inner.clone(), namespace, &ar); + + api.create( + &PostParams::default(), + &serde_yaml::from_str(raw_manifest).unwrap(), + ) + .await + .map_err(|err| { + Error::from(anyhow!( + "error while creating static-config {raw_manifest}: {err}" + )) + })?; + + Ok(()) + } + + async fn wait_created(&self, api: Api, name: &str) -> Result<()> + where + K: Clone + DeserializeOwned + Debug, + { + let params = &WatchParams::default().fields(&format!("metadata.name={name}")); + let mut stream = api + .watch(params, "0") + .await + .map_err(|err| { + Error::from(anyhow!( + "error while awaiting first response when resource {name} is created: {err}" + )) + })? + .boxed(); + + while let Some(status) = stream.try_next().await.map_err(|err| { + Error::from(anyhow!( + "error while awaiting next change after resource {name} is created: {err}" + )) + })? { + match status { + WatchEvent::Added(_) => break, + WatchEvent::Error(err) => Err(Error::from(anyhow!( + "error while awaiting resource {name} is created: {err}" + )))?, + WatchEvent::Bookmark(_) => { + // bookmark events are periodically sent as keep-alive/checkpoint, we should continue waiting + } + any_other_event => panic!("Unexpected event happened while creating '{name}' {THIS_IS_A_BUG}. Event: {any_other_event:?}"), + } + } + + Ok(()) + } +} + +mod helpers { + use k8s_openapi::api::core::v1::Pod; + use kube::runtime::wait::Condition; + use tracing::trace; + + /// An await condition for `Pod` that returns `true` once it is ready + /// based on [`kube::runtime::wait::conditions::is_pod_running`] + pub fn is_pod_ready() -> impl Condition { + |obj: Option<&Pod>| { + if let Some(pod) = &obj { + if let Some(status) = &pod.status { + if let Some(conditions) = &status.conditions { + let ready = conditions + .iter() + .any(|cond| cond.status == "True" && cond.type_ == "Ready"); + + if ready { + trace!("{:#?}", status); + return ready; + } + } + } + } + false + } + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/namespace.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/namespace.rs new file mode 100644 index 00000000..c51d9b7d --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/namespace.rs @@ -0,0 +1,600 @@ +use std::{ + collections::{BTreeMap, HashMap}, + env, + path::{Path, PathBuf}, + sync::{Arc, Weak}, +}; + +use async_trait::async_trait; +use k8s_openapi::{ + api::core::v1::{ + Container, ContainerPort, HTTPGetAction, PodSpec, Probe, ServicePort, ServiceSpec, + }, + apimachinery::pkg::util::intstr::IntOrString, +}; +use support::{constants::THIS_IS_A_BUG, fs::FileSystem, replacer::apply_replacements}; +use tokio::sync::{Mutex, RwLock}; +use tracing::{debug, trace, warn}; +use uuid::Uuid; + +use super::{client::KubernetesClient, node::KubernetesNode}; +use crate::{ + constants::NAMESPACE_PREFIX, + kubernetes::{ + node::{DeserializableKubernetesNodeOptions, KubernetesNodeOptions}, + provider, + }, + shared::helpers::{extract_execution_result, running_in_ci}, + types::{ + GenerateFileCommand, GenerateFilesOptions, ProviderCapabilities, RunCommandOptions, + SpawnNodeOptions, + }, + DynNode, KubernetesProvider, ProviderError, ProviderNamespace, ProviderNode, +}; + +const FILE_SERVER_IMAGE: &str = "europe-west3-docker.pkg.dev/parity-zombienet/zombienet-public-images/zombienet-file-server:latest"; + +// env var used by our internal CI to pass the namespace created and ready to use +const ZOMBIE_K8S_CI_NAMESPACE: &str = "ZOMBIE_K8S_CI_NAMESPACE"; + +pub(super) struct KubernetesNamespace +where + FS: FileSystem + Send + Sync + Clone, +{ + weak: Weak>, + provider: Weak>, + name: String, + base_dir: PathBuf, + capabilities: ProviderCapabilities, + k8s_client: KubernetesClient, + filesystem: FS, + file_server_fw_task: RwLock>>, + delete_on_drop: Arc>, + pub(super) file_server_port: RwLock>, + pub(super) nodes: RwLock>>>, +} + +impl KubernetesNamespace +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + pub(super) async fn new( + provider: &Weak>, + tmp_dir: &PathBuf, + capabilities: &ProviderCapabilities, + k8s_client: &KubernetesClient, + filesystem: &FS, + custom_base_dir: Option<&Path>, + ) -> Result, ProviderError> { + // If the namespace is already provided + let name = if let Ok(name) = env::var(ZOMBIE_K8S_CI_NAMESPACE) { + name + } else { + format!("{}{}", NAMESPACE_PREFIX, Uuid::new_v4()) + }; + + let base_dir = if let Some(custom_base_dir) = custom_base_dir { + if !filesystem.exists(custom_base_dir).await { + filesystem.create_dir(custom_base_dir).await?; + } else { + warn!( + "⚠️ Using and existing directory {} as base dir", + custom_base_dir.to_string_lossy() + ); + } + PathBuf::from(custom_base_dir) + } else { + let base_dir = PathBuf::from_iter([tmp_dir, &PathBuf::from(&name)]); + filesystem.create_dir(&base_dir).await?; + base_dir + }; + + let namespace = Arc::new_cyclic(|weak| KubernetesNamespace { + weak: weak.clone(), + provider: provider.clone(), + name, + base_dir, + capabilities: capabilities.clone(), + filesystem: filesystem.clone(), + k8s_client: k8s_client.clone(), + file_server_port: RwLock::new(None), + file_server_fw_task: RwLock::new(None), + nodes: RwLock::new(HashMap::new()), + delete_on_drop: Arc::new(Mutex::new(true)), + }); + + namespace.initialize().await?; + + Ok(namespace) + } + + pub(super) async fn attach_to_live( + provider: &Weak>, + capabilities: &ProviderCapabilities, + k8s_client: &KubernetesClient, + filesystem: &FS, + custom_base_dir: &Path, + name: &str, + ) -> Result, ProviderError> { + let base_dir = custom_base_dir.to_path_buf(); + + let namespace = Arc::new_cyclic(|weak| KubernetesNamespace { + weak: weak.clone(), + provider: provider.clone(), + name: name.to_owned(), + base_dir, + capabilities: capabilities.clone(), + filesystem: filesystem.clone(), + k8s_client: k8s_client.clone(), + file_server_port: RwLock::new(None), + file_server_fw_task: RwLock::new(None), + nodes: RwLock::new(HashMap::new()), + delete_on_drop: Arc::new(Mutex::new(false)), + }); + + namespace.setup_file_server_port_fwd("fileserver").await?; + + Ok(namespace) + } + + async fn initialize(&self) -> Result<(), ProviderError> { + // Initialize the namespace IFF + // we are not in CI or we don't have the env `ZOMBIE_NAMESPACE` set + if env::var(ZOMBIE_K8S_CI_NAMESPACE).is_err() || !running_in_ci() { + self.initialize_k8s().await?; + } + + // Ensure namespace isolation and minimal resources IFF we are running in CI + if running_in_ci() { + self.initialize_static_resources().await? + } + + self.initialize_file_server().await?; + + self.setup_script_config_map( + "zombie-wrapper", + include_str!("../shared/scripts/zombie-wrapper.sh"), + "zombie_wrapper_config_map_manifest.yaml", + // TODO: add correct labels + BTreeMap::new(), + ) + .await?; + + self.setup_script_config_map( + "helper-binaries-downloader", + include_str!("../shared/scripts/helper-binaries-downloader.sh"), + "helper_binaries_downloader_config_map_manifest.yaml", + // TODO: add correct labels + BTreeMap::new(), + ) + .await?; + + Ok(()) + } + + async fn initialize_k8s(&self) -> Result<(), ProviderError> { + // TODO (javier): check with Hamid if we are using this labels in any scheduling logic. + let labels = BTreeMap::from([ + ( + "jobId".to_string(), + env::var("CI_JOB_ID").unwrap_or("".to_string()), + ), + ( + "projectName".to_string(), + env::var("CI_PROJECT_NAME").unwrap_or("".to_string()), + ), + ( + "projectId".to_string(), + env::var("CI_PROJECT_ID").unwrap_or("".to_string()), + ), + ]); + + let manifest = self + .k8s_client + .create_namespace(&self.name, labels) + .await + .map_err(|err| { + ProviderError::CreateNamespaceFailed(self.name.to_string(), err.into()) + })?; + + let serialized_manifest = serde_yaml::to_string(&manifest).map_err(|err| { + ProviderError::CreateNamespaceFailed(self.name.to_string(), err.into()) + })?; + + let dest_path = + PathBuf::from_iter([&self.base_dir, &PathBuf::from("namespace_manifest.yaml")]); + + self.filesystem + .write(dest_path, serialized_manifest) + .await?; + + Ok(()) + } + + async fn initialize_static_resources(&self) -> Result<(), ProviderError> { + let np_manifest = apply_replacements( + include_str!("./static-configs/namespace-network-policy.yaml"), + &HashMap::from([("namespace", self.name())]), + ); + + // Apply NetworkPolicy manifest + self.k8s_client + .create_static_resource(&self.name, &np_manifest) + .await + .map_err(|err| { + ProviderError::CreateNamespaceFailed(self.name.to_string(), err.into()) + })?; + + // Apply LimitRange manifest + self.k8s_client + .create_static_resource( + &self.name, + include_str!("./static-configs/baseline-resources.yaml"), + ) + .await + .map_err(|err| { + ProviderError::CreateNamespaceFailed(self.name.to_string(), err.into()) + })?; + Ok(()) + } + + async fn initialize_file_server(&self) -> Result<(), ProviderError> { + let name = "fileserver".to_string(); + let labels = BTreeMap::from([ + ("app.kubernetes.io/name".to_string(), name.clone()), + ( + "x-infra-instance".to_string(), + env::var("X_INFRA_INSTANCE").unwrap_or("ondemand".to_string()), + ), + ]); + + let pod_spec = PodSpec { + hostname: Some(name.clone()), + containers: vec![Container { + name: name.clone(), + image: Some(FILE_SERVER_IMAGE.to_string()), + image_pull_policy: Some("Always".to_string()), + ports: Some(vec![ContainerPort { + container_port: 80, + ..Default::default() + }]), + startup_probe: Some(Probe { + http_get: Some(HTTPGetAction { + path: Some("/".to_string()), + port: IntOrString::Int(80), + ..Default::default() + }), + initial_delay_seconds: Some(1), + period_seconds: Some(2), + failure_threshold: Some(3), + ..Default::default() + }), + ..Default::default() + }], + restart_policy: Some("OnFailure".into()), + ..Default::default() + }; + + let pod_manifest = self + .k8s_client + .create_pod(&self.name, &name, pod_spec, labels.clone()) + .await + .map_err(|err| ProviderError::FileServerSetupError(err.into()))?; + + // TODO: remove duplication across methods + let pod_serialized_manifest = serde_yaml::to_string(&pod_manifest) + .map_err(|err| ProviderError::FileServerSetupError(err.into()))?; + + let pod_dest_path = PathBuf::from_iter([ + &self.base_dir, + &PathBuf::from("file_server_pod_manifest.yaml"), + ]); + + self.filesystem + .write(pod_dest_path, pod_serialized_manifest) + .await?; + + let service_spec = ServiceSpec { + selector: Some(labels.clone()), + ports: Some(vec![ServicePort { + port: 80, + ..Default::default() + }]), + ..Default::default() + }; + + let service_manifest = self + .k8s_client + .create_service(&self.name, &name, service_spec, labels) + .await + .map_err(|err| ProviderError::FileServerSetupError(err.into()))?; + + let serialized_service_manifest = serde_yaml::to_string(&service_manifest) + .map_err(|err| ProviderError::FileServerSetupError(err.into()))?; + + let service_dest_path = PathBuf::from_iter([ + &self.base_dir, + &PathBuf::from("file_server_service_manifest.yaml"), + ]); + + self.filesystem + .write(service_dest_path, serialized_service_manifest) + .await?; + + self.setup_file_server_port_fwd(&name).await?; + + Ok(()) + } + + async fn setup_file_server_port_fwd(&self, name: &str) -> Result<(), ProviderError> { + let (port, task) = self + .k8s_client + .create_pod_port_forward(&self.name, name, 0, 80) + .await + .map_err(|err| ProviderError::FileServerSetupError(err.into()))?; + + *self.file_server_port.write().await = Some(port); + *self.file_server_fw_task.write().await = Some(task); + + Ok(()) + } + + async fn setup_script_config_map( + &self, + name: &str, + script_contents: &str, + local_manifest_name: &str, + labels: BTreeMap, + ) -> Result<(), ProviderError> { + let manifest = self + .k8s_client + .create_config_map_from_file( + &self.name, + name, + &format!("{name}.sh"), + script_contents, + labels, + ) + .await + .map_err(|err| { + ProviderError::CreateNamespaceFailed(self.name.to_string(), err.into()) + })?; + + let serializer_manifest = serde_yaml::to_string(&manifest).map_err(|err| { + ProviderError::CreateNamespaceFailed(self.name.to_string(), err.into()) + })?; + + let dest_path = PathBuf::from_iter([&self.base_dir, &PathBuf::from(local_manifest_name)]); + + self.filesystem + .write(dest_path, serializer_manifest) + .await?; + + Ok(()) + } + + pub async fn set_delete_on_drop(&self, delete_on_drop: bool) { + *self.delete_on_drop.lock().await = delete_on_drop; + } + + pub async fn delete_on_drop(&self) -> bool { + if let Ok(delete_on_drop) = self.delete_on_drop.try_lock() { + *delete_on_drop + } else { + // if we can't lock just remove the ns + true + } + } +} + +impl Drop for KubernetesNamespace +where + FS: FileSystem + Send + Sync + Clone, +{ + fn drop(&mut self) { + let ns_name = self.name.clone(); + if let Ok(delete_on_drop) = self.delete_on_drop.try_lock() { + if *delete_on_drop { + let client = self.k8s_client.clone(); + let provider = self.provider.upgrade(); + futures::executor::block_on(async move { + trace!("🧟 deleting ns {ns_name} from cluster"); + let _ = client.delete_namespace(&ns_name).await; + if let Some(provider) = provider { + provider.namespaces.write().await.remove(&ns_name); + } + + trace!("✅ deleted"); + }); + } else { + trace!("⚠️ leaking ns {ns_name} in cluster"); + } + }; + } +} + +#[async_trait] +impl ProviderNamespace for KubernetesNamespace +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + fn name(&self) -> &str { + &self.name + } + + fn base_dir(&self) -> &PathBuf { + &self.base_dir + } + + fn capabilities(&self) -> &ProviderCapabilities { + &self.capabilities + } + + fn provider_name(&self) -> &str { + provider::PROVIDER_NAME + } + + async fn detach(&self) { + self.set_delete_on_drop(false).await; + } + + async fn is_detached(&self) -> bool { + self.delete_on_drop().await + } + + async fn nodes(&self) -> HashMap { + self.nodes + .read() + .await + .iter() + .map(|(name, node)| (name.clone(), node.clone() as DynNode)) + .collect() + } + + async fn get_node_available_args( + &self, + (command, image): (String, Option), + ) -> Result { + let node_image = image.expect(&format!("image should be present when getting node available args with kubernetes provider {THIS_IS_A_BUG}")); + + // run dummy command in new pod + let temp_node = self + .spawn_node( + &SpawnNodeOptions::new(format!("temp-{}", Uuid::new_v4()), "cat".to_string()) + .image(node_image.clone()), + ) + .await?; + + let available_args_output = temp_node + .run_command(RunCommandOptions::new(command.clone()).args(vec!["--help"])) + .await? + .map_err(|(_exit, status)| { + ProviderError::NodeAvailableArgsError(node_image, command, status) + })?; + + temp_node.destroy().await?; + + Ok(available_args_output) + } + + async fn spawn_node(&self, options: &SpawnNodeOptions) -> Result { + trace!("spawn node options {options:?}"); + + let node = KubernetesNode::new(KubernetesNodeOptions { + namespace: &self.weak, + namespace_base_dir: &self.base_dir, + name: &options.name, + image: options.image.as_ref(), + program: &options.program, + args: &options.args, + env: &options.env, + startup_files: &options.injected_files, + resources: options.resources.as_ref(), + db_snapshot: options.db_snapshot.as_ref(), + k8s_client: &self.k8s_client, + filesystem: &self.filesystem, + }) + .await?; + + self.nodes + .write() + .await + .insert(node.name().to_string(), node.clone()); + + Ok(node) + } + + async fn spawn_node_from_json( + &self, + json_value: &serde_json::Value, + ) -> Result { + let deserializable: DeserializableKubernetesNodeOptions = + serde_json::from_value(json_value.clone())?; + let options = KubernetesNodeOptions::from_deserializable( + &deserializable, + &self.weak, + &self.base_dir, + &self.k8s_client, + &self.filesystem, + ); + + let node = KubernetesNode::attach_to_live(options).await?; + + self.nodes + .write() + .await + .insert(node.name().to_string(), node.clone()); + + Ok(node) + } + + async fn generate_files(&self, options: GenerateFilesOptions) -> Result<(), ProviderError> { + debug!("generate files options {options:#?}"); + + let node_name = options + .temp_name + .unwrap_or_else(|| format!("temp-{}", Uuid::new_v4())); + let node_image = options + .image + .expect(&format!("image should be present when generating files with kubernetes provider {THIS_IS_A_BUG}")); + + // run dummy command in new pod + let temp_node = self + .spawn_node( + &SpawnNodeOptions::new(node_name, "cat".to_string()) + .injected_files(options.injected_files) + .image(node_image), + ) + .await?; + + for GenerateFileCommand { + program, + args, + env, + local_output_path, + } in options.commands + { + let local_output_full_path = format!( + "{}{}{}", + self.base_dir.to_string_lossy(), + if local_output_path.starts_with("/") { + "" + } else { + "/" + }, + local_output_path.to_string_lossy() + ); + + let contents = extract_execution_result( + &temp_node, + RunCommandOptions { program, args, env }, + options.expected_path.as_ref(), + ) + .await?; + self.filesystem + .write(local_output_full_path, contents) + .await + .map_err(|err| ProviderError::FileGenerationFailed(err.into()))?; + } + + temp_node.destroy().await + } + + async fn static_setup(&self) -> Result<(), ProviderError> { + todo!() + } + + async fn destroy(&self) -> Result<(), ProviderError> { + let _ = self + .k8s_client + .delete_namespace(&self.name) + .await + .map_err(|err| ProviderError::DeleteNamespaceFailed(self.name.clone(), err.into()))?; + + if let Some(provider) = self.provider.upgrade() { + provider.namespaces.write().await.remove(&self.name); + } + + Ok(()) + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/node.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/node.rs new file mode 100644 index 00000000..422a9a82 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/node.rs @@ -0,0 +1,886 @@ +use std::{ + collections::{BTreeMap, HashMap}, + env, + net::IpAddr, + path::{Component, Path, PathBuf}, + sync::{Arc, Weak}, + time::Duration, +}; + +use anyhow::anyhow; +use async_trait::async_trait; +use configuration::{shared::resources::Resources, types::AssetLocation}; +use futures::future::try_join_all; +use k8s_openapi::api::core::v1::{ServicePort, ServiceSpec}; +use serde::{Deserialize, Serialize}; +use sha2::Digest; +use support::{constants::THIS_IS_A_BUG, fs::FileSystem}; +use tokio::{sync::RwLock, task::JoinHandle, time::sleep, try_join}; +use tracing::{debug, trace, warn}; +use url::Url; + +use super::{ + client::KubernetesClient, namespace::KubernetesNamespace, pod_spec_builder::PodSpecBuilder, +}; +use crate::{ + constants::{ + NODE_CONFIG_DIR, NODE_DATA_DIR, NODE_RELAY_DATA_DIR, NODE_SCRIPTS_DIR, P2P_PORT, + PROMETHEUS_PORT, RPC_HTTP_PORT, RPC_WS_PORT, + }, + kubernetes, + types::{ExecutionResult, RunCommandOptions, RunScriptOptions, TransferedFile}, + ProviderError, ProviderNamespace, ProviderNode, +}; + +pub(super) struct KubernetesNodeOptions<'a, FS> +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + pub(super) namespace: &'a Weak>, + pub(super) namespace_base_dir: &'a PathBuf, + pub(super) name: &'a str, + pub(super) image: Option<&'a String>, + pub(super) program: &'a str, + pub(super) args: &'a [String], + pub(super) env: &'a [(String, String)], + pub(super) startup_files: &'a [TransferedFile], + pub(super) resources: Option<&'a Resources>, + pub(super) db_snapshot: Option<&'a AssetLocation>, + pub(super) k8s_client: &'a KubernetesClient, + pub(super) filesystem: &'a FS, +} + +impl<'a, FS> KubernetesNodeOptions<'a, FS> +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + pub(super) fn from_deserializable( + deserializable: &'a DeserializableKubernetesNodeOptions, + namespace: &'a Weak>, + namespace_base_dir: &'a PathBuf, + k8s_client: &'a KubernetesClient, + filesystem: &'a FS, + ) -> KubernetesNodeOptions<'a, FS> { + KubernetesNodeOptions { + namespace, + namespace_base_dir, + name: &deserializable.name, + image: deserializable.image.as_ref(), + program: &deserializable.program, + args: &deserializable.args, + env: &deserializable.env, + startup_files: &[], + resources: deserializable.resources.as_ref(), + db_snapshot: None, + k8s_client, + filesystem, + } + } +} + +#[derive(Deserialize)] +pub(super) struct DeserializableKubernetesNodeOptions { + pub(super) name: String, + pub(super) image: Option, + pub(super) program: String, + pub(super) args: Vec, + pub(super) env: Vec<(String, String)>, + pub(super) resources: Option, +} + +type FwdInfo = (u16, JoinHandle<()>); + +#[derive(Serialize)] +pub(super) struct KubernetesNode +where + FS: FileSystem + Send + Sync + Clone, +{ + #[serde(skip)] + namespace: Weak>, + name: String, + image: String, + program: String, + args: Vec, + env: Vec<(String, String)>, + resources: Option, + base_dir: PathBuf, + config_dir: PathBuf, + data_dir: PathBuf, + relay_data_dir: PathBuf, + scripts_dir: PathBuf, + log_path: PathBuf, + #[serde(skip)] + k8s_client: KubernetesClient, + #[serde(skip)] + http_client: reqwest::Client, + #[serde(skip)] + filesystem: FS, + #[serde(skip)] + port_fwds: RwLock>, + provider_tag: String, +} + +impl KubernetesNode +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + pub(super) async fn new( + options: KubernetesNodeOptions<'_, FS>, + ) -> Result, ProviderError> { + let image = options.image.ok_or_else(|| { + ProviderError::MissingNodeInfo(options.name.to_string(), "missing image".to_string()) + })?; + + let filesystem = options.filesystem.clone(); + + let base_dir = + PathBuf::from_iter([options.namespace_base_dir, &PathBuf::from(options.name)]); + filesystem.create_dir_all(&base_dir).await?; + + let base_dir_raw = base_dir.to_string_lossy(); + let config_dir = PathBuf::from(format!("{base_dir_raw}{NODE_CONFIG_DIR}")); + let data_dir = PathBuf::from(format!("{base_dir_raw}{NODE_DATA_DIR}")); + let relay_data_dir = PathBuf::from(format!("{base_dir_raw}{NODE_RELAY_DATA_DIR}")); + let scripts_dir = PathBuf::from(format!("{base_dir_raw}{NODE_SCRIPTS_DIR}")); + let log_path = base_dir.join("node.log"); + + try_join!( + filesystem.create_dir(&config_dir), + filesystem.create_dir(&data_dir), + filesystem.create_dir(&relay_data_dir), + filesystem.create_dir(&scripts_dir), + )?; + + let node = Arc::new(KubernetesNode { + namespace: options.namespace.clone(), + name: options.name.to_string(), + image: image.to_string(), + program: options.program.to_string(), + args: options.args.to_vec(), + env: options.env.to_vec(), + resources: options.resources.cloned(), + base_dir, + config_dir, + data_dir, + relay_data_dir, + scripts_dir, + log_path, + filesystem: filesystem.clone(), + k8s_client: options.k8s_client.clone(), + http_client: reqwest::Client::new(), + port_fwds: Default::default(), + provider_tag: kubernetes::provider::PROVIDER_NAME.to_string(), + }); + + node.initialize_k8s().await?; + + if let Some(db_snap) = options.db_snapshot { + node.initialize_db_snapshot(db_snap).await?; + } + + node.initialize_startup_files(options.startup_files).await?; + + node.start().await?; + + Ok(node) + } + + pub(super) async fn attach_to_live( + options: KubernetesNodeOptions<'_, FS>, + ) -> Result, ProviderError> { + let image = options.image.ok_or_else(|| { + ProviderError::MissingNodeInfo(options.name.to_string(), "missing image".to_string()) + })?; + + let filesystem = options.filesystem.clone(); + + let base_dir = + PathBuf::from_iter([options.namespace_base_dir, &PathBuf::from(options.name)]); + filesystem.create_dir_all(&base_dir).await?; + + let base_dir_raw = base_dir.to_string_lossy(); + let config_dir = PathBuf::from(format!("{base_dir_raw}{NODE_CONFIG_DIR}")); + let data_dir = PathBuf::from(format!("{base_dir_raw}{NODE_DATA_DIR}")); + let relay_data_dir = PathBuf::from(format!("{base_dir_raw}{NODE_RELAY_DATA_DIR}")); + let scripts_dir = PathBuf::from(format!("{base_dir_raw}{NODE_SCRIPTS_DIR}")); + let log_path = base_dir.join("node.log"); + + let node = Arc::new(KubernetesNode { + namespace: options.namespace.clone(), + name: options.name.to_string(), + image: image.to_string(), + program: options.program.to_string(), + args: options.args.to_vec(), + env: options.env.to_vec(), + resources: options.resources.cloned(), + base_dir, + config_dir, + data_dir, + relay_data_dir, + scripts_dir, + log_path, + filesystem: filesystem.clone(), + k8s_client: options.k8s_client.clone(), + http_client: reqwest::Client::new(), + port_fwds: Default::default(), + provider_tag: kubernetes::provider::PROVIDER_NAME.to_string(), + }); + + Ok(node) + } + + async fn initialize_k8s(&self) -> Result<(), ProviderError> { + let labels = BTreeMap::from([ + ( + "app.kubernetes.io/name".to_string(), + self.name().to_string(), + ), + ( + "x-infra-instance".to_string(), + env::var("X_INFRA_INSTANCE").unwrap_or("ondemand".to_string()), + ), + ]); + + // Create pod + let pod_spec = PodSpecBuilder::build( + &self.name, + &self.image, + self.resources.as_ref(), + &self.program, + &self.args, + &self.env, + ); + + let manifest = self + .k8s_client + .create_pod(&self.namespace_name(), &self.name, pod_spec, labels.clone()) + .await + .map_err(|err| ProviderError::NodeSpawningFailed(self.name.clone(), err.into()))?; + + let serialized_manifest = serde_yaml::to_string(&manifest) + .map_err(|err| ProviderError::NodeSpawningFailed(self.name.to_string(), err.into()))?; + + let dest_path = PathBuf::from_iter([ + &self.base_dir, + &PathBuf::from(format!("{}_manifest.yaml", &self.name)), + ]); + + self.filesystem + .write(dest_path, serialized_manifest) + .await + .map_err(|err| ProviderError::NodeSpawningFailed(self.name.to_string(), err.into()))?; + + // Create service for pod + let service_spec = ServiceSpec { + selector: Some(labels.clone()), + ports: Some(vec![ + ServicePort { + port: P2P_PORT.into(), + name: Some("p2p".into()), + ..Default::default() + }, + ServicePort { + port: RPC_WS_PORT.into(), + name: Some("rpc".into()), + ..Default::default() + }, + ServicePort { + port: RPC_HTTP_PORT.into(), + name: Some("rpc-http".into()), + ..Default::default() + }, + ServicePort { + port: PROMETHEUS_PORT.into(), + name: Some("prom".into()), + ..Default::default() + }, + ]), + ..Default::default() + }; + + let service_manifest = self + .k8s_client + .create_service(&self.namespace_name(), &self.name, service_spec, labels) + .await + .map_err(|err| ProviderError::FileServerSetupError(err.into()))?; + + let serialized_service_manifest = serde_yaml::to_string(&service_manifest) + .map_err(|err| ProviderError::FileServerSetupError(err.into()))?; + + let service_dest_path = PathBuf::from_iter([ + &self.base_dir, + &PathBuf::from(format!("{}_svc_manifest.yaml", &self.name)), + ]); + + self.filesystem + .write(service_dest_path, serialized_service_manifest) + .await?; + + Ok(()) + } + + async fn initialize_db_snapshot( + &self, + db_snapshot: &AssetLocation, + ) -> Result<(), ProviderError> { + trace!("snap: {db_snapshot}"); + let url_of_snap = match db_snapshot { + AssetLocation::Url(location) => location.clone(), + AssetLocation::FilePath(filepath) => { + let (url, _) = self.upload_to_fileserver(filepath).await?; + url + }, + }; + + // we need to get the snapshot from a public access + // and extract to /data + let opts = RunCommandOptions::new("mkdir").args([ + "-p", + "/data/", + "&&", + "mkdir", + "-p", + "/relay-data/", + "&&", + // Use our version of curl + "/cfg/curl", + url_of_snap.as_ref(), + "--output", + "/data/db.tgz", + "&&", + "cd", + "/", + "&&", + "tar", + "--skip-old-files", + "-xzvf", + "/data/db.tgz", + ]); + + trace!("cmd opts: {:#?}", opts); + let _ = self.run_command(opts).await?; + + Ok(()) + } + + async fn initialize_startup_files( + &self, + startup_files: &[TransferedFile], + ) -> Result<(), ProviderError> { + try_join_all( + startup_files + .iter() + .map(|file| self.send_file(&file.local_path, &file.remote_path, &file.mode)), + ) + .await?; + + Ok(()) + } + + pub(super) async fn start(&self) -> Result<(), ProviderError> { + self.k8s_client + .pod_exec( + &self.namespace_name(), + &self.name, + vec!["sh", "-c", "echo start > /tmp/zombiepipe"], + ) + .await + .map_err(|err| { + ProviderError::NodeSpawningFailed( + format!("failed to start pod {} after spawning", self.name), + err.into(), + ) + })? + .map_err(|err| { + ProviderError::NodeSpawningFailed( + format!("failed to start pod {} after spawning", self.name,), + anyhow!("command failed in container: status {}: {}", err.0, err.1), + ) + })?; + + Ok(()) + } + + fn get_remote_parent_dir(&self, remote_file_path: &Path) -> Option { + if let Some(remote_parent_dir) = remote_file_path.parent() { + if matches!( + remote_parent_dir.components().rev().peekable().peek(), + Some(Component::Normal(_)) + ) { + return Some(remote_parent_dir.to_path_buf()); + } + } + + None + } + + async fn create_remote_dir(&self, remote_dir: &Path) -> Result<(), ProviderError> { + let _ = self + .k8s_client + .pod_exec( + &self.namespace_name(), + &self.name, + vec!["mkdir", "-p", &remote_dir.to_string_lossy()], + ) + .await + .map_err(|err| { + ProviderError::NodeSpawningFailed( + format!( + "failed to create dir {} for pod {}", + remote_dir.to_string_lossy(), + &self.name + ), + err.into(), + ) + })?; + + Ok(()) + } + + fn namespace_name(&self) -> String { + self.namespace + .upgrade() + .map(|namespace| namespace.name().to_string()) + .unwrap_or_else(|| panic!("namespace shouldn't be dropped, {THIS_IS_A_BUG}")) + } + + async fn upload_to_fileserver(&self, location: &Path) -> Result<(Url, String), ProviderError> { + let file_name = if let Some(name) = location.file_name() { + name.to_string_lossy() + } else { + "unnamed".into() + }; + + let data = self.filesystem.read(location).await?; + let content_hashed = hex::encode(sha2::Sha256::digest(&data)); + let req = self + .http_client + .head(format!( + "http://{}/{content_hashed}__{file_name}", + self.file_server_local_host().await? + )) + .build() + .map_err(|err| { + ProviderError::UploadFile(location.to_string_lossy().to_string(), err.into()) + })?; + + let url = req.url().clone(); + let res = self.http_client.execute(req).await.map_err(|err| { + ProviderError::UploadFile(location.to_string_lossy().to_string(), err.into()) + })?; + + if res.status() != reqwest::StatusCode::OK { + // we need to upload the file + self.http_client + .post(url.as_ref()) + .body(data) + .send() + .await + .map_err(|err| { + ProviderError::UploadFile(location.to_string_lossy().to_string(), err.into()) + })?; + } + + Ok((url, content_hashed)) + } + + async fn file_server_local_host(&self) -> Result { + if let Some(namespace) = self.namespace.upgrade() { + if let Some(port) = *namespace.file_server_port.read().await { + return Ok(format!("localhost:{port}")); + } + } + + Err(ProviderError::FileServerSetupError(anyhow!( + "file server port not bound locally" + ))) + } + + async fn download_file( + &self, + url: &str, + remote_file_path: &Path, + hash: Option<&str>, + ) -> Result<(), ProviderError> { + let r = self + .k8s_client + .pod_exec( + &self.namespace_name(), + &self.name, + vec![ + "/cfg/curl", + url, + "--output", + &remote_file_path.to_string_lossy(), + ], + ) + .await + .map_err(|err| { + ProviderError::DownloadFile( + remote_file_path.to_string_lossy().to_string(), + anyhow!(format!("node: {}, err: {}", self.name(), err)), + ) + })?; + + trace!("download url {} result: {:?}", url, r); + + if r.is_err() { + return Err(ProviderError::DownloadFile( + remote_file_path.to_string_lossy().to_string(), + anyhow!(format!("node: {}, err downloading file", self.name())), + )); + } + + if let Some(hash) = hash { + // check if the hash of the file is correct + let res = self + .k8s_client + .pod_exec( + &self.namespace_name(), + &self.name, + vec![ + "/cfg/coreutils", + "sha256sum", + &remote_file_path.to_string_lossy(), + ], + ) + .await + .map_err(|err| { + ProviderError::DownloadFile( + remote_file_path.to_string_lossy().to_string(), + anyhow!(format!("node: {}, err: {}", self.name(), err)), + ) + })?; + + if let Ok(output) = res { + if !output.contains(hash) { + return Err(ProviderError::DownloadFile( + remote_file_path.to_string_lossy().to_string(), + anyhow!(format!("node: {}, invalid sha256sum hash: {hash} for file, output was {output}", self.name())), + )); + } + } else { + return Err(ProviderError::DownloadFile( + remote_file_path.to_string_lossy().to_string(), + anyhow!(format!( + "node: {}, err calculating sha256sum for file {:?}", + self.name(), + res + )), + )); + } + } + + Ok(()) + } +} + +#[async_trait] +impl ProviderNode for KubernetesNode +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + fn name(&self) -> &str { + &self.name + } + + fn args(&self) -> Vec<&str> { + self.args.iter().map(|arg| arg.as_str()).collect() + } + + fn base_dir(&self) -> &PathBuf { + &self.base_dir + } + + fn config_dir(&self) -> &PathBuf { + &self.config_dir + } + + fn data_dir(&self) -> &PathBuf { + &self.data_dir + } + + fn relay_data_dir(&self) -> &PathBuf { + &self.relay_data_dir + } + + fn scripts_dir(&self) -> &PathBuf { + &self.scripts_dir + } + + fn log_path(&self) -> &PathBuf { + &self.log_path + } + + fn log_cmd(&self) -> String { + format!("kubectl -n {} logs {}", self.namespace_name(), self.name) + } + + fn path_in_node(&self, file: &Path) -> PathBuf { + // here is just a noop op since we will receive the path + // for the file inside the pod + PathBuf::from(file) + } + + // TODO: handle log rotation as we do in v1 + async fn logs(&self) -> Result { + self.k8s_client + .pod_logs(&self.namespace_name(), &self.name) + .await + .map_err(|err| ProviderError::GetLogsFailed(self.name.to_string(), err.into())) + } + + async fn dump_logs(&self, local_dest: PathBuf) -> Result<(), ProviderError> { + let logs = self.logs().await?; + + self.filesystem + .write(local_dest, logs) + .await + .map_err(|err| ProviderError::DumpLogsFailed(self.name.to_string(), err.into()))?; + + Ok(()) + } + + async fn create_port_forward( + &self, + local_port: u16, + remote_port: u16, + ) -> Result, ProviderError> { + // If the fwd exist just return the local port + if let Some(fwd_info) = self.port_fwds.read().await.get(&remote_port) { + return Ok(Some(fwd_info.0)); + }; + + let (port, task) = self + .k8s_client + .create_pod_port_forward(&self.namespace_name(), &self.name, local_port, remote_port) + .await + .map_err(|err| ProviderError::PortForwardError(local_port, remote_port, err.into()))?; + + self.port_fwds + .write() + .await + .insert(remote_port, (port, task)); + + Ok(Some(port)) + } + + async fn run_command( + &self, + options: RunCommandOptions, + ) -> Result { + let mut command = vec![]; + + for (name, value) in options.env { + command.push(format!("export {name}={value};")); + } + + command.push(options.program); + + for arg in options.args { + command.push(arg); + } + + self.k8s_client + .pod_exec( + &self.namespace_name(), + &self.name, + vec!["sh", "-c", &command.join(" ")], + ) + .await + .map_err(|err| { + ProviderError::RunCommandError( + format!("sh -c {}", &command.join(" ")), + format!("in pod {}", self.name), + err.into(), + ) + }) + } + + async fn run_script( + &self, + options: RunScriptOptions, + ) -> Result { + let file_name = options + .local_script_path + .file_name() + .expect(&format!( + "file name should be present at this point {THIS_IS_A_BUG}" + )) + .to_string_lossy(); + + self.run_command(RunCommandOptions { + program: format!("/tmp/{file_name}"), + args: options.args, + env: options.env, + }) + .await + .map_err(|err| ProviderError::RunScriptError(self.name.to_string(), err.into())) + } + + async fn send_file( + &self, + local_file_path: &Path, + remote_file_path: &Path, + mode: &str, + ) -> Result<(), ProviderError> { + if let Some(remote_parent_dir) = self.get_remote_parent_dir(remote_file_path) { + self.create_remote_dir(&remote_parent_dir).await?; + } + + debug!( + "Uploading file: {} IFF not present in the fileserver", + local_file_path.to_string_lossy() + ); + + // we need to override the url to use inside the pod + let (mut url, hash) = self.upload_to_fileserver(local_file_path).await?; + let _ = url.set_host(Some("fileserver")); + let _ = url.set_port(Some(80)); + + // Sometimes downloading the file fails (the file is corrupted) + // Add at most 5 retries + let mut last_err = None; + for i in 0..5 { + if i > 0 { + warn!("retrying number {i} download file {:?}", remote_file_path); + tokio::time::sleep(Duration::from_secs(i)).await; + } + + let res = self + .download_file(url.as_ref(), remote_file_path, Some(&hash)) + .await; + + last_err = res.err(); + + if last_err.is_none() { + // ready to continue + break; + } + } + + if let Some(last_err) = last_err { + return Err(last_err); + } + + let _ = self + .k8s_client + .pod_exec( + &self.namespace_name(), + &self.name, + vec!["chmod", mode, &remote_file_path.to_string_lossy()], + ) + .await + .map_err(|err| { + ProviderError::SendFile( + self.name.clone(), + local_file_path.to_string_lossy().to_string(), + err.into(), + ) + })?; + + Ok(()) + } + + async fn receive_file( + &self, + _remote_src: &Path, + _local_dest: &Path, + ) -> Result<(), ProviderError> { + Ok(()) + } + + async fn ip(&self) -> Result { + let status = self + .k8s_client + .pod_status(&self.namespace_name(), &self.name) + .await + .map_err(|_| ProviderError::MissingNode(self.name.clone()))?; + + if let Some(ip) = status.pod_ip { + // Pod ip should be parseable + Ok(ip.parse::().map_err(|err| { + ProviderError::InvalidConfig(format!("Can not parse the pod ip: {ip}, err: {err}")) + })?) + } else { + Err(ProviderError::InvalidConfig(format!( + "Can not find ip of pod: {}", + self.name() + ))) + } + } + + async fn pause(&self) -> Result<(), ProviderError> { + self.k8s_client + .pod_exec( + &self.namespace_name(), + &self.name, + vec!["sh", "-c", "echo pause > /tmp/zombiepipe"], + ) + .await + .map_err(|err| ProviderError::PauseNodeFailed(self.name.to_string(), err.into()))? + .map_err(|err| { + ProviderError::PauseNodeFailed( + self.name.to_string(), + anyhow!("error when pausing node: status {}: {}", err.0, err.1), + ) + })?; + + Ok(()) + } + + async fn resume(&self) -> Result<(), ProviderError> { + self.k8s_client + .pod_exec( + &self.namespace_name(), + &self.name, + vec!["sh", "-c", "echo resume > /tmp/zombiepipe"], + ) + .await + .map_err(|err| ProviderError::ResumeNodeFailed(self.name.to_string(), err.into()))? + .map_err(|err| { + ProviderError::ResumeNodeFailed( + self.name.to_string(), + anyhow!("error when pausing node: status {}: {}", err.0, err.1), + ) + })?; + + Ok(()) + } + + async fn restart(&self, after: Option) -> Result<(), ProviderError> { + if let Some(duration) = after { + sleep(duration).await; + } + + self.k8s_client + .pod_exec( + &self.namespace_name(), + &self.name, + vec!["sh", "-c", "echo restart > /tmp/zombiepipe"], + ) + .await + .map_err(|err| ProviderError::RestartNodeFailed(self.name.to_string(), err.into()))? + .map_err(|err| { + ProviderError::RestartNodeFailed( + self.name.to_string(), + anyhow!("error when restarting node: status {}: {}", err.0, err.1), + ) + })?; + + Ok(()) + } + + async fn destroy(&self) -> Result<(), ProviderError> { + self.k8s_client + .delete_pod(&self.namespace_name(), &self.name) + .await + .map_err(|err| ProviderError::KillNodeFailed(self.name.to_string(), err.into()))?; + + if let Some(namespace) = self.namespace.upgrade() { + namespace.nodes.write().await.remove(&self.name); + } + + Ok(()) + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/pod_spec_builder.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/pod_spec_builder.rs new file mode 100644 index 00000000..76a2da77 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/pod_spec_builder.rs @@ -0,0 +1,188 @@ +use std::collections::BTreeMap; + +use configuration::shared::resources::{ResourceQuantity, Resources}; +use k8s_openapi::{ + api::core::v1::{ + ConfigMapVolumeSource, Container, EnvVar, PodSpec, ResourceRequirements, Volume, + VolumeMount, + }, + apimachinery::pkg::api::resource::Quantity, +}; + +pub(super) struct PodSpecBuilder; + +impl PodSpecBuilder { + pub(super) fn build( + name: &str, + image: &str, + resources: Option<&Resources>, + program: &str, + args: &[String], + env: &[(String, String)], + ) -> PodSpec { + PodSpec { + hostname: Some(name.to_string()), + init_containers: Some(vec![Self::build_helper_binaries_setup_container()]), + containers: vec![Self::build_main_container( + name, image, resources, program, args, env, + )], + volumes: Some(Self::build_volumes()), + ..Default::default() + } + } + + fn build_main_container( + name: &str, + image: &str, + resources: Option<&Resources>, + program: &str, + args: &[String], + env: &[(String, String)], + ) -> Container { + Container { + name: name.to_string(), + image: Some(image.to_string()), + image_pull_policy: Some("Always".to_string()), + command: Some( + [ + vec!["/zombie-wrapper.sh".to_string(), program.to_string()], + args.to_vec(), + ] + .concat(), + ), + env: Some( + env.iter() + .map(|(name, value)| EnvVar { + name: name.clone(), + value: Some(value.clone()), + value_from: None, + }) + .collect(), + ), + volume_mounts: Some(Self::build_volume_mounts(vec![VolumeMount { + name: "zombie-wrapper-volume".to_string(), + mount_path: "/zombie-wrapper.sh".to_string(), + sub_path: Some("zombie-wrapper.sh".to_string()), + ..Default::default() + }])), + resources: Self::build_resources_requirements(resources), + ..Default::default() + } + } + + fn build_helper_binaries_setup_container() -> Container { + Container { + name: "helper-binaries-setup".to_string(), + image: Some("europe-west3-docker.pkg.dev/parity-zombienet/zombienet-public-images/alpine:latest".to_string()), + image_pull_policy: Some("IfNotPresent".to_string()), + volume_mounts: Some(Self::build_volume_mounts(vec![VolumeMount { + name: "helper-binaries-downloader-volume".to_string(), + mount_path: "/helper-binaries-downloader.sh".to_string(), + sub_path: Some("helper-binaries-downloader.sh".to_string()), + ..Default::default() + }])), + command: Some(vec![ + "ash".to_string(), + "/helper-binaries-downloader.sh".to_string(), + ]), + ..Default::default() + } + } + + fn build_volumes() -> Vec { + vec![ + Volume { + name: "cfg".to_string(), + ..Default::default() + }, + Volume { + name: "data".to_string(), + ..Default::default() + }, + Volume { + name: "relay-data".to_string(), + ..Default::default() + }, + Volume { + name: "zombie-wrapper-volume".to_string(), + config_map: Some(ConfigMapVolumeSource { + name: Some("zombie-wrapper".to_string()), + default_mode: Some(0o755), + ..Default::default() + }), + ..Default::default() + }, + Volume { + name: "helper-binaries-downloader-volume".to_string(), + config_map: Some(ConfigMapVolumeSource { + name: Some("helper-binaries-downloader".to_string()), + default_mode: Some(0o755), + ..Default::default() + }), + ..Default::default() + }, + ] + } + + fn build_volume_mounts(non_default_mounts: Vec) -> Vec { + [ + vec![ + VolumeMount { + name: "cfg".to_string(), + mount_path: "/cfg".to_string(), + read_only: Some(false), + ..Default::default() + }, + VolumeMount { + name: "data".to_string(), + mount_path: "/data".to_string(), + read_only: Some(false), + ..Default::default() + }, + VolumeMount { + name: "relay-data".to_string(), + mount_path: "/relay-data".to_string(), + read_only: Some(false), + ..Default::default() + }, + ], + non_default_mounts, + ] + .concat() + } + + fn build_resources_requirements(resources: Option<&Resources>) -> Option { + resources.map(|resources| ResourceRequirements { + limits: Self::build_resources_requirements_quantities( + resources.limit_cpu(), + resources.limit_memory(), + ), + requests: Self::build_resources_requirements_quantities( + resources.request_cpu(), + resources.request_memory(), + ), + ..Default::default() + }) + } + + fn build_resources_requirements_quantities( + cpu: Option<&ResourceQuantity>, + memory: Option<&ResourceQuantity>, + ) -> Option> { + let mut quantities = BTreeMap::new(); + + if let Some(cpu) = cpu { + quantities.insert("cpu".to_string(), Quantity(cpu.as_str().to_string())); + } + + if let Some(memory) = memory { + quantities.insert("memory".to_string(), Quantity(memory.as_str().to_string())); + } + + if !quantities.is_empty() { + Some(quantities) + } else { + None + } + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/provider.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/provider.rs new file mode 100644 index 00000000..2199d160 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/provider.rs @@ -0,0 +1,145 @@ +use std::{ + collections::HashMap, + path::{Path, PathBuf}, + sync::{Arc, Weak}, +}; + +use async_trait::async_trait; +use support::fs::FileSystem; +use tokio::sync::RwLock; + +use super::{client::KubernetesClient, namespace::KubernetesNamespace}; +use crate::{ + shared::helpers::extract_namespace_info, types::ProviderCapabilities, DynNamespace, Provider, + ProviderError, ProviderNamespace, +}; + +pub const PROVIDER_NAME: &str = "k8s"; + +pub struct KubernetesProvider +where + FS: FileSystem + Send + Sync + Clone, +{ + weak: Weak>, + capabilities: ProviderCapabilities, + tmp_dir: PathBuf, + k8s_client: KubernetesClient, + filesystem: FS, + pub(super) namespaces: RwLock>>>, +} + +impl KubernetesProvider +where + FS: FileSystem + Send + Sync + Clone, +{ + pub async fn new(filesystem: FS) -> Arc { + let k8s_client = KubernetesClient::new().await.unwrap(); + + Arc::new_cyclic(|weak| KubernetesProvider { + weak: weak.clone(), + capabilities: ProviderCapabilities { + requires_image: true, + has_resources: true, + prefix_with_full_path: false, + use_default_ports_in_cmd: true, + }, + tmp_dir: std::env::temp_dir(), + k8s_client, + filesystem, + namespaces: RwLock::new(HashMap::new()), + }) + } + + pub fn tmp_dir(mut self, tmp_dir: impl Into) -> Self { + self.tmp_dir = tmp_dir.into(); + self + } +} + +#[async_trait] +impl Provider for KubernetesProvider +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + fn name(&self) -> &str { + PROVIDER_NAME + } + + fn capabilities(&self) -> &ProviderCapabilities { + &self.capabilities + } + + async fn namespaces(&self) -> HashMap { + self.namespaces + .read() + .await + .iter() + .map(|(name, namespace)| (name.clone(), namespace.clone() as DynNamespace)) + .collect() + } + + async fn create_namespace(&self) -> Result { + let namespace = KubernetesNamespace::new( + &self.weak, + &self.tmp_dir, + &self.capabilities, + &self.k8s_client, + &self.filesystem, + None, + ) + .await?; + + self.namespaces + .write() + .await + .insert(namespace.name().to_string(), namespace.clone()); + + Ok(namespace) + } + + async fn create_namespace_with_base_dir( + &self, + base_dir: &Path, + ) -> Result { + let namespace = KubernetesNamespace::new( + &self.weak, + &self.tmp_dir, + &self.capabilities, + &self.k8s_client, + &self.filesystem, + Some(base_dir), + ) + .await?; + + self.namespaces + .write() + .await + .insert(namespace.name().to_string(), namespace.clone()); + + Ok(namespace) + } + + async fn create_namespace_from_json( + &self, + json_value: &serde_json::Value, + ) -> Result { + let (base_dir, name) = extract_namespace_info(json_value)?; + + let namespace = KubernetesNamespace::attach_to_live( + &self.weak, + &self.capabilities, + &self.k8s_client, + &self.filesystem, + &base_dir, + &name, + ) + .await?; + + self.namespaces + .write() + .await + .insert(namespace.name().to_string(), namespace.clone()); + + Ok(namespace) + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/static-configs/baseline-resources.yaml b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/static-configs/baseline-resources.yaml new file mode 100644 index 00000000..78da6d4e --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/static-configs/baseline-resources.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: mem-limit-range +spec: + limits: + - defaultRequest: + memory: 1G + cpu: 0.5 + type: Container diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/static-configs/namespace-network-policy.yaml b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/static-configs/namespace-network-policy.yaml new file mode 100644 index 00000000..e51ee95e --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/kubernetes/static-configs/namespace-network-policy.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: internal-access +spec: + podSelector: {} + ingress: + - from: + - namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - {{namespace}} + - gitlab + - arc-runner + - loki + - tempo + - monitoring + - parachain-exporter + - default + policyTypes: + - Ingress diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/lib.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/lib.rs new file mode 100644 index 00000000..13166203 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/lib.rs @@ -0,0 +1,264 @@ +#![allow(clippy::expect_fun_call)] +mod docker; +mod kubernetes; +mod native; +pub mod shared; + +use std::{ + collections::HashMap, + net::IpAddr, + path::{Path, PathBuf}, + sync::Arc, + time::Duration, +}; + +use async_trait::async_trait; +use shared::{ + constants::LOCALHOST, + types::{ + ExecutionResult, GenerateFilesOptions, ProviderCapabilities, RunCommandOptions, + RunScriptOptions, SpawnNodeOptions, + }, +}; +use support::fs::FileSystemError; + +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum ProviderError { + #[error("Failed to create client '{0}': {1}")] + CreateClientFailed(String, anyhow::Error), + + #[error("Failed to create namespace '{0}': {1}")] + CreateNamespaceFailed(String, anyhow::Error), + + #[error("Failed to spawn node '{0}': {1}")] + NodeSpawningFailed(String, anyhow::Error), + + #[error("Error running command '{0}' {1}: {2}")] + RunCommandError(String, String, anyhow::Error), + + #[error("Error running script'{0}': {1}")] + RunScriptError(String, anyhow::Error), + + #[error("Invalid network configuration field {0}")] + InvalidConfig(String), + + #[error("Failed to retrieve node available args using image {0} and command {1}: {2}")] + NodeAvailableArgsError(String, String, String), + + #[error("Can not recover node: {0}")] + MissingNode(String), + + #[error("Can not recover node: {0} info, field: {1}")] + MissingNodeInfo(String, String), + + #[error("File generation failed: {0}")] + FileGenerationFailed(anyhow::Error), + + #[error(transparent)] + FileSystemError(#[from] FileSystemError), + + #[error("Invalid script path for {0}")] + InvalidScriptPath(anyhow::Error), + + #[error("Script with path {0} not found")] + ScriptNotFound(PathBuf), + + #[error("Failed to retrieve process ID for node '{0}'")] + ProcessIdRetrievalFailed(String), + + #[error("Failed to pause node '{0}': {1}")] + PauseNodeFailed(String, anyhow::Error), + + #[error("Failed to resume node '{0}': {1}")] + ResumeNodeFailed(String, anyhow::Error), + + #[error("Failed to kill node '{0}': {1}")] + KillNodeFailed(String, anyhow::Error), + + #[error("Failed to restart node '{0}': {1}")] + RestartNodeFailed(String, anyhow::Error), + + #[error("Failed to destroy node '{0}': {1}")] + DestroyNodeFailed(String, anyhow::Error), + + #[error("Failed to get logs for node '{0}': {1}")] + GetLogsFailed(String, anyhow::Error), + + #[error("Failed to dump logs for node '{0}': {1}")] + DumpLogsFailed(String, anyhow::Error), + + #[error("Failed to copy file from node '{0}': {1}")] + CopyFileFromNodeError(String, anyhow::Error), + + #[error("Failed to setup fileserver: {0}")] + FileServerSetupError(anyhow::Error), + + #[error("Error uploading file: '{0}': {1}")] + UploadFile(String, anyhow::Error), + + #[error("Error downloading file: '{0}': {1}")] + DownloadFile(String, anyhow::Error), + + #[error("Error sending file '{0}' to {1}: {2}")] + SendFile(String, String, anyhow::Error), + + #[error("Error creating port-forward '{0}:{1}': {2}")] + PortForwardError(u16, u16, anyhow::Error), + + #[error("Failed to delete namespace '{0}': {1}")] + DeleteNamespaceFailed(String, anyhow::Error), + + #[error("Serialization error")] + SerializationError(#[from] serde_json::Error), + + #[error("Failed to acquire lock: {0}")] + FailedToAcquireLock(String), +} + +#[async_trait] +pub trait Provider { + fn name(&self) -> &str; + + fn capabilities(&self) -> &ProviderCapabilities; + + async fn namespaces(&self) -> HashMap; + + async fn create_namespace(&self) -> Result; + + async fn create_namespace_with_base_dir( + &self, + base_dir: &Path, + ) -> Result; + + async fn create_namespace_from_json( + &self, + json_value: &serde_json::Value, + ) -> Result; +} + +pub type DynProvider = Arc; + +#[async_trait] +pub trait ProviderNamespace { + fn name(&self) -> &str; + + fn base_dir(&self) -> &PathBuf; + + fn capabilities(&self) -> &ProviderCapabilities; + + fn provider_name(&self) -> &str; + + async fn detach(&self) { + // noop by default + warn!("Detach is not implemented for {}", self.name()); + } + + async fn is_detached(&self) -> bool { + // false by default + false + } + + async fn nodes(&self) -> HashMap; + + async fn get_node_available_args( + &self, + options: (String, Option), + ) -> Result; + + async fn spawn_node(&self, options: &SpawnNodeOptions) -> Result; + + async fn spawn_node_from_json( + &self, + json_value: &serde_json::Value, + ) -> Result; + + async fn generate_files(&self, options: GenerateFilesOptions) -> Result<(), ProviderError>; + + async fn destroy(&self) -> Result<(), ProviderError>; + + async fn static_setup(&self) -> Result<(), ProviderError>; +} + +pub type DynNamespace = Arc; + +#[async_trait] +pub trait ProviderNode: erased_serde::Serialize { + fn name(&self) -> &str; + + fn args(&self) -> Vec<&str>; + + fn base_dir(&self) -> &PathBuf; + + fn config_dir(&self) -> &PathBuf; + + fn data_dir(&self) -> &PathBuf; + + fn relay_data_dir(&self) -> &PathBuf; + + fn scripts_dir(&self) -> &PathBuf; + + fn log_path(&self) -> &PathBuf; + + fn log_cmd(&self) -> String; + + // Return the absolute path to the file in the `node` perspective + // TODO: purpose? + fn path_in_node(&self, file: &Path) -> PathBuf; + + async fn logs(&self) -> Result; + + async fn dump_logs(&self, local_dest: PathBuf) -> Result<(), ProviderError>; + + // By default return localhost, should be overrided for k8s + async fn ip(&self) -> Result { + Ok(LOCALHOST) + } + + // Noop by default (native/docker provider) + async fn create_port_forward( + &self, + _local_port: u16, + _remote_port: u16, + ) -> Result, ProviderError> { + Ok(None) + } + + async fn run_command( + &self, + options: RunCommandOptions, + ) -> Result; + + async fn run_script(&self, options: RunScriptOptions) + -> Result; + + async fn send_file( + &self, + local_file_path: &Path, + remote_file_path: &Path, + mode: &str, + ) -> Result<(), ProviderError>; + + async fn receive_file( + &self, + remote_file_path: &Path, + local_file_path: &Path, + ) -> Result<(), ProviderError>; + + async fn pause(&self) -> Result<(), ProviderError>; + + async fn resume(&self) -> Result<(), ProviderError>; + + async fn restart(&self, after: Option) -> Result<(), ProviderError>; + + async fn destroy(&self) -> Result<(), ProviderError>; +} + +pub type DynNode = Arc; + +// re-export +pub use docker::*; +pub use kubernetes::*; +pub use native::*; +pub use shared::{constants, types}; +use tracing::warn; diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/native.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/native.rs new file mode 100644 index 00000000..0f63b721 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/native.rs @@ -0,0 +1,5 @@ +mod namespace; +mod node; +mod provider; + +pub use provider::NativeProvider; diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/native/namespace.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/native/namespace.rs new file mode 100644 index 00000000..39936e5c --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/native/namespace.rs @@ -0,0 +1,374 @@ +use std::{ + collections::HashMap, + path::{Path, PathBuf}, + sync::{Arc, Weak}, +}; + +use async_trait::async_trait; +use support::fs::FileSystem; +use tokio::sync::RwLock; +use tracing::{trace, warn}; +use uuid::Uuid; + +use super::node::{NativeNode, NativeNodeOptions}; +use crate::{ + constants::NAMESPACE_PREFIX, + native::{node::DeserializableNativeNodeOptions, provider}, + shared::helpers::extract_execution_result, + types::{ + GenerateFileCommand, GenerateFilesOptions, ProviderCapabilities, RunCommandOptions, + SpawnNodeOptions, + }, + DynNode, NativeProvider, ProviderError, ProviderNamespace, ProviderNode, +}; + +pub(super) struct NativeNamespace +where + FS: FileSystem + Send + Sync + Clone, +{ + weak: Weak>, + name: String, + provider: Weak>, + base_dir: PathBuf, + capabilities: ProviderCapabilities, + filesystem: FS, + pub(super) nodes: RwLock>>>, +} + +impl NativeNamespace +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + pub(super) async fn new( + provider: &Weak>, + tmp_dir: &PathBuf, + capabilities: &ProviderCapabilities, + filesystem: &FS, + custom_base_dir: Option<&Path>, + ) -> Result, ProviderError> { + let name = format!("{}{}", NAMESPACE_PREFIX, Uuid::new_v4()); + let base_dir = if let Some(custom_base_dir) = custom_base_dir { + if !filesystem.exists(custom_base_dir).await { + filesystem.create_dir_all(custom_base_dir).await?; + } else { + warn!( + "⚠️ Using and existing directory {} as base dir", + custom_base_dir.to_string_lossy() + ); + } + PathBuf::from(custom_base_dir) + } else { + let base_dir = PathBuf::from_iter([tmp_dir, &PathBuf::from(&name)]); + filesystem.create_dir(&base_dir).await?; + base_dir + }; + + Ok(Arc::new_cyclic(|weak| NativeNamespace { + weak: weak.clone(), + provider: provider.clone(), + name, + base_dir, + capabilities: capabilities.clone(), + filesystem: filesystem.clone(), + nodes: RwLock::new(HashMap::new()), + })) + } + + pub(super) async fn attach_to_live( + provider: &Weak>, + capabilities: &ProviderCapabilities, + filesystem: &FS, + custom_base_dir: &Path, + name: &str, + ) -> Result, ProviderError> { + let base_dir = custom_base_dir.to_path_buf(); + + Ok(Arc::new_cyclic(|weak| NativeNamespace { + weak: weak.clone(), + provider: provider.clone(), + name: name.to_string(), + base_dir, + capabilities: capabilities.clone(), + filesystem: filesystem.clone(), + nodes: RwLock::new(HashMap::new()), + })) + } +} + +#[async_trait] +impl ProviderNamespace for NativeNamespace +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + fn name(&self) -> &str { + &self.name + } + + fn base_dir(&self) -> &PathBuf { + &self.base_dir + } + + fn capabilities(&self) -> &ProviderCapabilities { + &self.capabilities + } + + fn provider_name(&self) -> &str { + provider::PROVIDER_NAME + } + + async fn nodes(&self) -> HashMap { + self.nodes + .read() + .await + .iter() + .map(|(name, node)| (name.clone(), node.clone() as DynNode)) + .collect() + } + + async fn get_node_available_args( + &self, + (command, _image): (String, Option), + ) -> Result { + let temp_node = self + .spawn_node( + &SpawnNodeOptions::new(format!("temp-{}", Uuid::new_v4()), "bash".to_string()) + .args(vec!["-c", "while :; do sleep 1; done"]), + ) + .await?; + + let available_args_output = temp_node + .run_command(RunCommandOptions::new(command.clone()).args(vec!["--help"])) + .await? + .map_err(|(_exit, status)| { + ProviderError::NodeAvailableArgsError("".to_string(), command, status) + })?; + + temp_node.destroy().await?; + + Ok(available_args_output) + } + + async fn spawn_node(&self, options: &SpawnNodeOptions) -> Result { + trace!("spawn node options {options:?}"); + + let node = NativeNode::new(NativeNodeOptions { + namespace: &self.weak, + namespace_base_dir: &self.base_dir, + name: &options.name, + program: &options.program, + args: &options.args, + env: &options.env, + startup_files: &options.injected_files, + created_paths: &options.created_paths, + db_snapshot: options.db_snapshot.as_ref(), + filesystem: &self.filesystem, + node_log_path: options.node_log_path.as_ref(), + }) + .await?; + + self.nodes + .write() + .await + .insert(options.name.clone(), node.clone()); + + Ok(node) + } + + async fn spawn_node_from_json( + &self, + json_value: &serde_json::Value, + ) -> Result { + let deserializable: DeserializableNativeNodeOptions = + serde_json::from_value(json_value.clone())?; + let options = NativeNodeOptions::from_deserializable( + &deserializable, + &self.weak, + &self.base_dir, + &self.filesystem, + ); + + let pid = json_value + .get("process_handle") + .and_then(|v| v.as_i64()) + .ok_or_else(|| ProviderError::InvalidConfig("Missing pid field".to_string()))? + as i32; + let node = NativeNode::attach_to_live(options, pid).await?; + + self.nodes + .write() + .await + .insert(node.name().to_string(), node.clone()); + + Ok(node) + } + + async fn generate_files(&self, options: GenerateFilesOptions) -> Result<(), ProviderError> { + let node_name = if let Some(name) = options.temp_name { + name + } else { + format!("temp-{}", Uuid::new_v4()) + }; + + // we spawn a node doing nothing but looping so we can execute our commands + let temp_node = self + .spawn_node( + &SpawnNodeOptions::new(node_name, "bash".to_string()) + .args(vec!["-c", "while :; do sleep 1; done"]) + .injected_files(options.injected_files), + ) + .await?; + + for GenerateFileCommand { + program, + args, + env, + local_output_path, + } in options.commands + { + trace!( + "🏗 building file {:?} in path {} with command {} {}", + local_output_path.as_os_str(), + self.base_dir.to_string_lossy(), + program, + args.join(" ") + ); + let local_output_full_path = format!( + "{}{}{}", + self.base_dir.to_string_lossy(), + if local_output_path.starts_with("/") { + "" + } else { + "/" + }, + local_output_path.to_string_lossy() + ); + + let contents = extract_execution_result( + &temp_node, + RunCommandOptions { program, args, env }, + options.expected_path.as_ref(), + ) + .await?; + self.filesystem + .write(local_output_full_path, contents) + .await + .map_err(|err| ProviderError::FileGenerationFailed(err.into()))?; + } + + temp_node.destroy().await + } + + async fn static_setup(&self) -> Result<(), ProviderError> { + // no static setup exists for native provider + todo!() + } + + async fn destroy(&self) -> Result<(), ProviderError> { + let mut names = vec![]; + + for node in self.nodes.read().await.values() { + node.abort() + .await + .map_err(|err| ProviderError::DestroyNodeFailed(node.name().to_string(), err))?; + names.push(node.name().to_string()); + } + + let mut nodes = self.nodes.write().await; + for name in names { + nodes.remove(&name); + } + + if let Some(provider) = self.provider.upgrade() { + provider.namespaces.write().await.remove(&self.name); + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use support::fs::local::LocalFileSystem; + + use super::*; + use crate::{ + types::{GenerateFileCommand, GenerateFilesOptions}, + NativeProvider, Provider, + }; + + fn unique_temp_dir() -> PathBuf { + let mut base = std::env::temp_dir(); + base.push(format!("znet_native_ns_test_{}", uuid::Uuid::new_v4())); + base + } + + #[tokio::test] + async fn generate_files_uses_expected_path_when_provided() { + let fs = LocalFileSystem; + let provider = NativeProvider::new(fs.clone()); + let base_dir = unique_temp_dir(); + // Namespace builder will create directory if needed + let ns = provider + .create_namespace_with_base_dir(&base_dir) + .await + .expect("namespace should be created"); + + // Create a unique on-host path that the native node will write to + let expected_path = + std::env::temp_dir().join(format!("znet_expected_{}.json", uuid::Uuid::new_v4())); + + // Command will write JSON into expected_path; stdout will be something else to ensure we don't read it + let program = "bash".to_string(); + let script = format!( + "echo -n '{{\"hello\":\"world\"}}' > {} && echo should_not_be_used", + expected_path.to_string_lossy() + ); + let args: Vec = vec!["-lc".into(), script]; + + let out_name = PathBuf::from("result_expected.json"); + let cmd = GenerateFileCommand::new(program, out_name.clone()).args(args); + let options = GenerateFilesOptions::new(vec![cmd], None, Some(expected_path.clone())); + + ns.generate_files(options) + .await + .expect("generation should succeed"); + + // Read produced file from namespace base_dir + let produced_path = base_dir.join(out_name); + let produced = fs + .read_to_string(&produced_path) + .await + .expect("should read produced file"); + assert_eq!(produced, "{\"hello\":\"world\"}"); + } + + #[tokio::test] + async fn generate_files_uses_stdout_when_expected_path_absent() { + let fs = LocalFileSystem; + let provider = NativeProvider::new(fs.clone()); + let base_dir = unique_temp_dir(); + let ns = provider + .create_namespace_with_base_dir(&base_dir) + .await + .expect("namespace should be created"); + + // Command prints to stdout only + let program = "bash".to_string(); + let args: Vec = vec!["-lc".into(), "echo -n 42".into()]; + + let out_name = PathBuf::from("result_stdout.txt"); + let cmd = GenerateFileCommand::new(program, out_name.clone()).args(args); + let options = GenerateFilesOptions::new(vec![cmd], None, None); + + ns.generate_files(options) + .await + .expect("generation should succeed"); + + let produced_path = base_dir.join(out_name); + let produced = fs + .read_to_string(&produced_path) + .await + .expect("should read produced file"); + assert_eq!(produced, "42"); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/native/node.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/native/node.rs new file mode 100644 index 00000000..fd99e098 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/native/node.rs @@ -0,0 +1,734 @@ +use std::{ + collections::HashMap, + env, + path::{Path, PathBuf}, + process::Stdio, + sync::{Arc, Weak}, + time::Duration, +}; + +use anyhow::anyhow; +use async_trait::async_trait; +use configuration::types::AssetLocation; +use flate2::read::GzDecoder; +use futures::future::try_join_all; +use nix::{ + sys::signal::{kill, Signal}, + unistd::Pid, +}; +use serde::{ser::Error, Deserialize, Serialize, Serializer}; +use sha2::Digest; +use support::{constants::THIS_IS_A_BUG, fs::FileSystem}; +use tar::Archive; +use tokio::{ + fs, + io::{AsyncRead, AsyncReadExt, BufReader}, + process::{Child, ChildStderr, ChildStdout, Command}, + sync::{ + mpsc::{self, Sender}, + RwLock, + }, + task::JoinHandle, + time::sleep, + try_join, +}; +use tracing::trace; + +use super::namespace::NativeNamespace; +use crate::{ + constants::{NODE_CONFIG_DIR, NODE_DATA_DIR, NODE_RELAY_DATA_DIR, NODE_SCRIPTS_DIR}, + native, + types::{ExecutionResult, RunCommandOptions, RunScriptOptions, TransferedFile}, + ProviderError, ProviderNamespace, ProviderNode, +}; + +pub(super) struct NativeNodeOptions<'a, FS> +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + pub(super) namespace: &'a Weak>, + pub(super) namespace_base_dir: &'a PathBuf, + pub(super) name: &'a str, + pub(super) program: &'a str, + pub(super) args: &'a [String], + pub(super) env: &'a [(String, String)], + pub(super) startup_files: &'a [TransferedFile], + pub(super) created_paths: &'a [PathBuf], + pub(super) db_snapshot: Option<&'a AssetLocation>, + pub(super) filesystem: &'a FS, + pub(super) node_log_path: Option<&'a PathBuf>, +} + +impl<'a, FS> NativeNodeOptions<'a, FS> +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + pub(super) fn from_deserializable( + deserializable: &'a DeserializableNativeNodeOptions, + namespace: &'a Weak>, + namespace_base_dir: &'a PathBuf, + filesystem: &'a FS, + ) -> NativeNodeOptions<'a, FS> { + NativeNodeOptions { + namespace, + namespace_base_dir, + name: &deserializable.name, + program: &deserializable.program, + args: &deserializable.args, + env: &deserializable.env, + startup_files: &[], + created_paths: &[], + db_snapshot: None, + filesystem, + node_log_path: deserializable.node_log_path.as_ref(), + } + } +} + +#[derive(Deserialize)] +pub(super) struct DeserializableNativeNodeOptions { + pub name: String, + pub program: String, + pub args: Vec, + pub env: Vec<(String, String)>, + pub node_log_path: Option, +} + +enum ProcessHandle { + Spawned(Child, Pid), + Attached(Pid), +} + +#[derive(Serialize)] +pub(super) struct NativeNode +where + FS: FileSystem + Send + Sync + Clone, +{ + #[serde(skip)] + namespace: Weak>, + name: String, + program: String, + args: Vec, + env: Vec<(String, String)>, + base_dir: PathBuf, + config_dir: PathBuf, + data_dir: PathBuf, + relay_data_dir: PathBuf, + scripts_dir: PathBuf, + log_path: PathBuf, + #[serde(serialize_with = "serialize_process_handle")] + // using RwLock from std to serialize properly, generally using sync locks is ok in async code as long as they + // are not held across await points + process_handle: std::sync::RwLock>, + #[serde(skip)] + stdout_reading_task: RwLock>>, + #[serde(skip)] + stderr_reading_task: RwLock>>, + #[serde(skip)] + log_writing_task: RwLock>>, + #[serde(skip)] + filesystem: FS, + provider_tag: String, +} + +impl NativeNode +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + pub(super) async fn new( + options: NativeNodeOptions<'_, FS>, + ) -> Result, ProviderError> { + let filesystem = options.filesystem.clone(); + + let base_dir = + PathBuf::from_iter([options.namespace_base_dir, &PathBuf::from(options.name)]); + trace!("creating base_dir {:?}", base_dir); + options.filesystem.create_dir_all(&base_dir).await?; + trace!("created base_dir {:?}", base_dir); + + let base_dir_raw = base_dir.to_string_lossy(); + let config_dir = PathBuf::from(format!("{base_dir_raw}{NODE_CONFIG_DIR}")); + let data_dir = PathBuf::from(format!("{base_dir_raw}{NODE_DATA_DIR}")); + let relay_data_dir = PathBuf::from(format!("{base_dir_raw}{NODE_RELAY_DATA_DIR}")); + let scripts_dir = PathBuf::from(format!("{base_dir_raw}{NODE_SCRIPTS_DIR}")); + let log_path = options + .node_log_path + .cloned() + .unwrap_or_else(|| base_dir.join(format!("{}.log", options.name))); + + trace!("creating dirs {:?}", config_dir); + try_join!( + filesystem.create_dir_all(&config_dir), + filesystem.create_dir_all(&data_dir), + filesystem.create_dir_all(&relay_data_dir), + filesystem.create_dir_all(&scripts_dir), + )?; + trace!("created!"); + + let node = Arc::new(NativeNode { + namespace: options.namespace.clone(), + name: options.name.to_string(), + program: options.program.to_string(), + args: options.args.to_vec(), + env: options.env.to_vec(), + base_dir, + config_dir, + data_dir, + relay_data_dir, + scripts_dir, + log_path, + process_handle: std::sync::RwLock::new(None), + stdout_reading_task: RwLock::new(None), + stderr_reading_task: RwLock::new(None), + log_writing_task: RwLock::new(None), + filesystem: filesystem.clone(), + provider_tag: native::provider::PROVIDER_NAME.to_string(), + }); + + node.initialize_startup_paths(options.created_paths).await?; + node.initialize_startup_files(options.startup_files).await?; + + if let Some(db_snap) = options.db_snapshot { + node.initialize_db_snapshot(db_snap).await?; + } + + let (stdout, stderr) = node.initialize_process().await?; + + node.initialize_log_writing(stdout, stderr).await; + + Ok(node) + } + + pub(super) async fn attach_to_live( + options: NativeNodeOptions<'_, FS>, + pid: i32, + ) -> Result, ProviderError> { + let filesystem = options.filesystem.clone(); + + let base_dir = + PathBuf::from_iter([options.namespace_base_dir, &PathBuf::from(options.name)]); + trace!("creating base_dir {:?}", base_dir); + options.filesystem.create_dir_all(&base_dir).await?; + trace!("created base_dir {:?}", base_dir); + + let base_dir_raw = base_dir.to_string_lossy(); + let config_dir = PathBuf::from(format!("{base_dir_raw}{NODE_CONFIG_DIR}")); + let data_dir = PathBuf::from(format!("{base_dir_raw}{NODE_DATA_DIR}")); + let relay_data_dir = PathBuf::from(format!("{base_dir_raw}{NODE_RELAY_DATA_DIR}")); + let scripts_dir = PathBuf::from(format!("{base_dir_raw}{NODE_SCRIPTS_DIR}")); + let log_path = options + .node_log_path + .cloned() + .unwrap_or_else(|| base_dir.join(format!("{}.log", options.name))); + + let pid = Pid::from_raw(pid); + + let node = Arc::new(NativeNode { + namespace: options.namespace.clone(), + name: options.name.to_string(), + program: options.program.to_string(), + args: options.args.to_vec(), + env: options.env.to_vec(), + base_dir, + config_dir, + data_dir, + relay_data_dir, + scripts_dir, + log_path, + process_handle: std::sync::RwLock::new(Some(ProcessHandle::Attached(pid))), + stdout_reading_task: RwLock::new(None), + stderr_reading_task: RwLock::new(None), + log_writing_task: RwLock::new(None), + filesystem: filesystem.clone(), + provider_tag: native::provider::PROVIDER_NAME.to_string(), + }); + + Ok(node) + } + + async fn initialize_startup_paths(&self, paths: &[PathBuf]) -> Result<(), ProviderError> { + trace!("creating paths {:?}", paths); + let base_dir_raw = self.base_dir.to_string_lossy(); + try_join_all(paths.iter().map(|file| { + let full_path = format!("{base_dir_raw}{}", file.to_string_lossy()); + self.filesystem.create_dir_all(full_path) + })) + .await?; + trace!("paths created!"); + + Ok(()) + } + + async fn initialize_startup_files( + &self, + startup_files: &[TransferedFile], + ) -> Result<(), ProviderError> { + trace!("creating files {:?}", startup_files); + try_join_all( + startup_files + .iter() + .map(|file| self.send_file(&file.local_path, &file.remote_path, &file.mode)), + ) + .await?; + trace!("files created!"); + + Ok(()) + } + + async fn initialize_db_snapshot( + &self, + db_snapshot: &AssetLocation, + ) -> Result<(), ProviderError> { + trace!("snap: {db_snapshot}"); + + // check if we need to get the db or is already in the ns + let ns_base_dir = self.namespace_base_dir(); + let hashed_location = match db_snapshot { + AssetLocation::Url(location) => hex::encode(sha2::Sha256::digest(location.to_string())), + AssetLocation::FilePath(filepath) => { + hex::encode(sha2::Sha256::digest(filepath.to_string_lossy().to_string())) + }, + }; + + let full_path = format!("{ns_base_dir}/{hashed_location}.tgz"); + trace!("db_snap fullpath in ns: {full_path}"); + if !self.filesystem.exists(&full_path).await { + // needs to download/copy + self.get_db_snapshot(db_snapshot, &full_path).await?; + } + + let contents = self.filesystem.read(&full_path).await.unwrap(); + let gz = GzDecoder::new(&contents[..]); + let mut archive = Archive::new(gz); + archive + .unpack(self.base_dir.to_string_lossy().as_ref()) + .unwrap(); + + if std::env::var("ZOMBIE_RM_TGZ_AFTER_EXTRACT").is_ok() { + let res = fs::remove_file(&full_path).await; + trace!("removing {}, result {:?}", full_path, res); + } + + Ok(()) + } + + async fn get_db_snapshot( + &self, + location: &AssetLocation, + full_path: &str, + ) -> Result<(), ProviderError> { + trace!("getting db_snapshot from: {:?} to: {full_path}", location); + match location { + AssetLocation::Url(location) => { + let res = reqwest::get(location.as_ref()) + .await + .map_err(|err| ProviderError::DownloadFile(location.to_string(), err.into()))?; + + let contents: &[u8] = &res.bytes().await.unwrap(); + trace!("writing: {full_path}"); + self.filesystem.write(full_path, contents).await?; + }, + AssetLocation::FilePath(filepath) => { + self.filesystem.copy(filepath, full_path).await?; + }, + }; + + Ok(()) + } + + async fn initialize_process(&self) -> Result<(ChildStdout, ChildStderr), ProviderError> { + let filtered_env: HashMap = env::vars() + .filter(|(k, _)| k == "TZ" || k == "LANG" || k == "PATH") + .collect(); + + let mut process = Command::new(&self.program) + .args(&self.args) + .env_clear() + .envs(&filtered_env) // minimal environment + .envs(self.env.to_vec()) + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .kill_on_drop(true) + .current_dir(&self.base_dir) + .spawn() + .map_err(|err| ProviderError::NodeSpawningFailed(self.name.to_string(), err.into()))?; + let stdout = process + .stdout + .take() + .expect(&format!("infaillible, stdout is piped {THIS_IS_A_BUG}")); + let stderr = process + .stderr + .take() + .expect(&format!("infaillible, stderr is piped {THIS_IS_A_BUG}")); + + let pid = Pid::from_raw( + process + .id() + .ok_or_else(|| ProviderError::ProcessIdRetrievalFailed(self.name.to_string()))? + as i32, + ); + self.process_handle + .write() + .map_err(|_e| ProviderError::FailedToAcquireLock(self.name.clone()))? + .replace(ProcessHandle::Spawned(process, pid)); + + Ok((stdout, stderr)) + } + + async fn initialize_log_writing(&self, stdout: ChildStdout, stderr: ChildStderr) { + let (stdout_tx, mut rx) = mpsc::channel(10); + let stderr_tx = stdout_tx.clone(); + + self.stdout_reading_task + .write() + .await + .replace(self.create_stream_polling_task(stdout, stdout_tx)); + self.stderr_reading_task + .write() + .await + .replace(self.create_stream_polling_task(stderr, stderr_tx)); + + let filesystem = self.filesystem.clone(); + let log_path = self.log_path.clone(); + + self.log_writing_task + .write() + .await + .replace(tokio::spawn(async move { + loop { + while let Some(Ok(data)) = rx.recv().await { + // TODO: find a better way instead of ignoring error ? + let _ = filesystem.append(&log_path, data).await; + } + sleep(Duration::from_millis(250)).await; + } + })); + } + + fn create_stream_polling_task( + &self, + stream: impl AsyncRead + Unpin + Send + 'static, + tx: Sender, std::io::Error>>, + ) -> JoinHandle<()> { + tokio::spawn(async move { + let mut reader = BufReader::new(stream); + let mut buffer = vec![0u8; 1024]; + + loop { + match reader.read(&mut buffer).await { + Ok(0) => { + let _ = tx.send(Ok(Vec::new())).await; + break; + }, + Ok(n) => { + let _ = tx.send(Ok(buffer[..n].to_vec())).await; + }, + Err(e) => { + let _ = tx.send(Err(e)).await; + break; + }, + } + } + }) + } + + fn process_id(&self) -> Result { + let pid = self + .process_handle + .read() + .map_err(|_e| ProviderError::FailedToAcquireLock(self.name.clone()))? + .as_ref() + .map(|handle| match handle { + ProcessHandle::Spawned(_, pid) => *pid, + ProcessHandle::Attached(pid) => *pid, + }) + .ok_or_else(|| ProviderError::ProcessIdRetrievalFailed(self.name.to_string()))?; + + Ok(pid) + } + + pub(crate) async fn abort(&self) -> anyhow::Result<()> { + if let Some(task) = self.log_writing_task.write().await.take() { + task.abort(); + } + if let Some(task) = self.stdout_reading_task.write().await.take() { + task.abort(); + } + if let Some(task) = self.stderr_reading_task.write().await.take() { + task.abort(); + } + + let process_handle = { + let mut guard = self + .process_handle + .write() + .map_err(|_e| ProviderError::FailedToAcquireLock(self.name.clone()))?; + guard + .take() + .ok_or_else(|| anyhow!("no process was attached for the node"))? + }; + + match process_handle { + ProcessHandle::Spawned(mut child, _pid) => { + child.kill().await?; + }, + ProcessHandle::Attached(pid) => { + kill(pid, Signal::SIGKILL) + .map_err(|err| anyhow!("Failed to kill attached process {pid}: {err}"))?; + }, + } + + Ok(()) + } + + fn namespace_base_dir(&self) -> String { + self.namespace + .upgrade() + .map(|namespace| namespace.base_dir().to_string_lossy().to_string()) + .unwrap_or_else(|| panic!("namespace shouldn't be dropped, {THIS_IS_A_BUG}")) + } +} + +#[async_trait] +impl ProviderNode for NativeNode +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + fn name(&self) -> &str { + &self.name + } + + fn args(&self) -> Vec<&str> { + self.args.iter().map(|arg| arg.as_str()).collect() + } + + fn base_dir(&self) -> &PathBuf { + &self.base_dir + } + + fn config_dir(&self) -> &PathBuf { + &self.config_dir + } + + fn data_dir(&self) -> &PathBuf { + &self.data_dir + } + + fn relay_data_dir(&self) -> &PathBuf { + &self.relay_data_dir + } + + fn scripts_dir(&self) -> &PathBuf { + &self.scripts_dir + } + + fn log_path(&self) -> &PathBuf { + &self.log_path + } + + fn log_cmd(&self) -> String { + format!("tail -f {}", self.log_path().to_string_lossy()) + } + + fn path_in_node(&self, file: &Path) -> PathBuf { + let full_path = format!( + "{}/{}", + self.base_dir.to_string_lossy(), + file.to_string_lossy() + ); + PathBuf::from(full_path) + } + + async fn logs(&self) -> Result { + Ok(self.filesystem.read_to_string(&self.log_path).await?) + } + + async fn dump_logs(&self, local_dest: PathBuf) -> Result<(), ProviderError> { + Ok(self.filesystem.copy(&self.log_path, local_dest).await?) + } + + async fn run_command( + &self, + options: RunCommandOptions, + ) -> Result { + let result = Command::new(options.program.clone()) + .args(options.args.clone()) + .envs(options.env) + .current_dir(&self.base_dir) + .output() + .await + .map_err(|err| { + ProviderError::RunCommandError( + format!("{} {}", &options.program, &options.args.join(" ")), + "locally".to_string(), + err.into(), + ) + })?; + + if result.status.success() { + Ok(Ok(String::from_utf8_lossy(&result.stdout).to_string())) + } else { + Ok(Err(( + result.status, + String::from_utf8_lossy(&result.stderr).to_string(), + ))) + } + } + + async fn run_script( + &self, + options: RunScriptOptions, + ) -> Result { + let local_script_path = PathBuf::from(&options.local_script_path); + + if !self.filesystem.exists(&local_script_path).await { + return Err(ProviderError::ScriptNotFound(local_script_path)); + } + + // extract file name and build remote file path + let script_file_name = local_script_path + .file_name() + .map(|file_name| file_name.to_string_lossy().to_string()) + .ok_or(ProviderError::InvalidScriptPath(anyhow!( + "Can't retrieve filename from script with path: {:?}", + options.local_script_path + )))?; + let remote_script_path = format!( + "{}/{}", + self.scripts_dir.to_string_lossy(), + script_file_name + ); + + // copy and set script's execute permission + self.filesystem + .copy(local_script_path, &remote_script_path) + .await?; + self.filesystem.set_mode(&remote_script_path, 0o744).await?; + + // execute script + self.run_command(RunCommandOptions { + program: remote_script_path, + args: options.args, + env: options.env, + }) + .await + } + + async fn send_file( + &self, + local_file_path: &Path, + remote_file_path: &Path, + mode: &str, + ) -> Result<(), ProviderError> { + let namespaced_remote_file_path = PathBuf::from(format!( + "{}{}", + &self.base_dir.to_string_lossy(), + remote_file_path.to_string_lossy() + )); + + self.filesystem + .copy(local_file_path, &namespaced_remote_file_path) + .await?; + + self.run_command( + RunCommandOptions::new("chmod") + .args(vec![mode, &namespaced_remote_file_path.to_string_lossy()]), + ) + .await? + .map_err(|(_, err)| { + ProviderError::SendFile( + self.name.clone(), + local_file_path.to_string_lossy().to_string(), + anyhow!("{err}"), + ) + })?; + + Ok(()) + } + + async fn receive_file( + &self, + remote_file_path: &Path, + local_file_path: &Path, + ) -> Result<(), ProviderError> { + let namespaced_remote_file_path = PathBuf::from(format!( + "{}{}", + &self.base_dir.to_string_lossy(), + remote_file_path.to_string_lossy() + )); + + self.filesystem + .copy(namespaced_remote_file_path, local_file_path) + .await?; + + Ok(()) + } + + async fn pause(&self) -> Result<(), ProviderError> { + let process_id = self.process_id()?; + + kill(process_id, Signal::SIGSTOP) + .map_err(|err| ProviderError::PauseNodeFailed(self.name.clone(), err.into()))?; + + Ok(()) + } + + async fn resume(&self) -> Result<(), ProviderError> { + let process_id = self.process_id()?; + + nix::sys::signal::kill(process_id, Signal::SIGCONT) + .map_err(|err| ProviderError::ResumeNodeFailed(self.name.clone(), err.into()))?; + + Ok(()) + } + + async fn restart(&self, after: Option) -> Result<(), ProviderError> { + if let Some(duration) = after { + sleep(duration).await; + } + + self.abort() + .await + .map_err(|err| ProviderError::RestartNodeFailed(self.name.clone(), err))?; + + let (stdout, stderr) = self + .initialize_process() + .await + .map_err(|err| ProviderError::RestartNodeFailed(self.name.clone(), err.into()))?; + + self.initialize_log_writing(stdout, stderr).await; + + Ok(()) + } + + async fn destroy(&self) -> Result<(), ProviderError> { + self.abort() + .await + .map_err(|err| ProviderError::DestroyNodeFailed(self.name.clone(), err))?; + + if let Some(namespace) = self.namespace.upgrade() { + namespace.nodes.write().await.remove(&self.name); + } + + Ok(()) + } +} + +fn serialize_process_handle( + process_handle: &std::sync::RwLock>, + serializer: S, +) -> Result +where + S: Serializer, +{ + let pid = process_handle + .read() + .map_err(|_e| S::Error::custom("failed to acquire read lock"))? + .as_ref() + .map(|handle| match handle { + ProcessHandle::Spawned(_, pid) => pid.as_raw(), + ProcessHandle::Attached(pid) => pid.as_raw(), + }); + pid.serialize(serializer) +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/native/provider.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/native/provider.rs new file mode 100644 index 00000000..7a943973 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/native/provider.rs @@ -0,0 +1,142 @@ +use std::{ + collections::HashMap, + path::{Path, PathBuf}, + sync::{Arc, Weak}, +}; + +use async_trait::async_trait; +use support::fs::FileSystem; +use tokio::sync::RwLock; + +use super::namespace::NativeNamespace; +use crate::{ + shared::helpers::extract_namespace_info, types::ProviderCapabilities, DynNamespace, Provider, + ProviderError, ProviderNamespace, +}; + +pub const PROVIDER_NAME: &str = "native"; + +pub struct NativeProvider +where + FS: FileSystem + Send + Sync + Clone, +{ + weak: Weak>, + capabilities: ProviderCapabilities, + tmp_dir: PathBuf, + filesystem: FS, + pub(super) namespaces: RwLock>>>, +} + +impl NativeProvider +where + FS: FileSystem + Send + Sync + Clone, +{ + pub fn new(filesystem: FS) -> Arc { + Arc::new_cyclic(|weak| NativeProvider { + weak: weak.clone(), + capabilities: ProviderCapabilities { + has_resources: false, + requires_image: false, + prefix_with_full_path: true, + use_default_ports_in_cmd: false, + }, + // NOTE: temp_dir in linux return `/tmp` but on mac something like + // `/var/folders/rz/1cyx7hfj31qgb98d8_cg7jwh0000gn/T/`, having + // one `trailing slash` and the other no can cause issues if + // you try to build a fullpath by concatenate. Use Pathbuf to prevent the issue. + tmp_dir: std::env::temp_dir(), + filesystem, + namespaces: RwLock::new(HashMap::new()), + }) + } + + pub fn tmp_dir(mut self, tmp_dir: impl Into) -> Self { + self.tmp_dir = tmp_dir.into(); + self + } +} + +#[async_trait] +impl Provider for NativeProvider +where + FS: FileSystem + Send + Sync + Clone + 'static, +{ + fn name(&self) -> &str { + PROVIDER_NAME + } + + fn capabilities(&self) -> &ProviderCapabilities { + &self.capabilities + } + + async fn namespaces(&self) -> HashMap { + self.namespaces + .read() + .await + .iter() + .map(|(name, namespace)| (name.clone(), namespace.clone() as DynNamespace)) + .collect() + } + + async fn create_namespace(&self) -> Result { + let namespace = NativeNamespace::new( + &self.weak, + &self.tmp_dir, + &self.capabilities, + &self.filesystem, + None, + ) + .await?; + + self.namespaces + .write() + .await + .insert(namespace.name().to_string(), namespace.clone()); + + Ok(namespace) + } + + async fn create_namespace_with_base_dir( + &self, + base_dir: &Path, + ) -> Result { + let namespace = NativeNamespace::new( + &self.weak, + &self.tmp_dir, + &self.capabilities, + &self.filesystem, + Some(base_dir), + ) + .await?; + + self.namespaces + .write() + .await + .insert(namespace.name().to_string(), namespace.clone()); + + Ok(namespace) + } + + async fn create_namespace_from_json( + &self, + json_value: &serde_json::Value, + ) -> Result { + let (base_dir, name) = extract_namespace_info(json_value)?; + + let namespace = NativeNamespace::attach_to_live( + &self.weak, + &self.capabilities, + &self.filesystem, + &base_dir, + &name, + ) + .await?; + + self.namespaces + .write() + .await + .insert(namespace.name().to_string(), namespace.clone()); + + Ok(namespace) + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/shared.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/shared.rs new file mode 100644 index 00000000..d6f7eab3 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/shared.rs @@ -0,0 +1,3 @@ +pub mod constants; +pub mod helpers; +pub mod types; diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/shared/constants.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/shared/constants.rs new file mode 100644 index 00000000..65dc583e --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/shared/constants.rs @@ -0,0 +1,22 @@ +use std::net::{IpAddr, Ipv4Addr}; + +/// Namespace prefix +pub const NAMESPACE_PREFIX: &str = "zombie-"; +/// Directory for node configuration +pub const NODE_CONFIG_DIR: &str = "/cfg"; +/// Directory for node data dir +pub const NODE_DATA_DIR: &str = "/data"; +/// Directory for node relay data dir +pub const NODE_RELAY_DATA_DIR: &str = "/relay-data"; +/// Directory for node scripts +pub const NODE_SCRIPTS_DIR: &str = "/scripts"; +/// Localhost ip +pub const LOCALHOST: IpAddr = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); +/// The port substrate listens for p2p connections on +pub const P2P_PORT: u16 = 30333; +/// The remote port Prometheus can be accessed with +pub const PROMETHEUS_PORT: u16 = 9615; +/// The remote port websocket to access the RPC +pub const RPC_WS_PORT: u16 = 9944; +/// The remote port HTTP to access the RPC +pub const RPC_HTTP_PORT: u16 = 9933; diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/shared/helpers.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/shared/helpers.rs new file mode 100644 index 00000000..d12c2caa --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/shared/helpers.rs @@ -0,0 +1,79 @@ +use std::{env, path::PathBuf}; + +use anyhow::anyhow; + +use crate::{types::RunCommandOptions, DynNode, ProviderError}; + +/// Check if we are running in `CI` by checking the 'RUN_IN_CI' env var +pub fn running_in_ci() -> bool { + env::var("RUN_IN_CI").unwrap_or_default() == "1" +} + +/// Executes a command on a temporary node and extracts the execution result either from the +/// standard output or a file. +pub async fn extract_execution_result( + temp_node: &DynNode, + options: RunCommandOptions, + expected_path: Option<&PathBuf>, +) -> Result { + let output_contents = temp_node + .run_command(options) + .await? + .map_err(|(_, msg)| ProviderError::FileGenerationFailed(anyhow!("{msg}")))?; + + // If an expected_path is provided, read the file contents from inside the container + if let Some(expected_path) = expected_path.as_ref() { + Ok(temp_node + .run_command( + RunCommandOptions::new("cat") + .args(vec![expected_path.to_string_lossy().to_string()]), + ) + .await? + .map_err(|(_, msg)| { + ProviderError::FileGenerationFailed(anyhow!(format!( + "failed reading expected_path {}: {}", + expected_path.display(), + msg + ))) + })?) + } else { + Ok(output_contents) + } +} + +pub fn extract_namespace_info( + json_value: &serde_json::Value, +) -> Result<(PathBuf, String), ProviderError> { + let base_dir = json_value + .get("local_base_dir") + .and_then(|v| v.as_str()) + .map(PathBuf::from) + .ok_or(ProviderError::InvalidConfig( + "`field local_base_dir` is missing from zombie.json".to_string(), + ))?; + + let name = + json_value + .get("ns") + .and_then(|v| v.as_str()) + .ok_or(ProviderError::InvalidConfig( + "field `ns` is missing from zombie.json".to_string(), + ))?; + + Ok((base_dir, name.to_string())) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn check_runing_in_ci_env_var() { + assert!(!running_in_ci()); + // now set the env var + env::set_var("RUN_IN_CI", "1"); + assert!(running_in_ci()); + // reset + env::set_var("RUN_IN_CI", ""); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/shared/scripts/helper-binaries-downloader.sh b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/shared/scripts/helper-binaries-downloader.sh new file mode 100644 index 00000000..9d12bca8 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/shared/scripts/helper-binaries-downloader.sh @@ -0,0 +1,34 @@ +#!/bin/ash + +log() { + echo "$(date +"%F %T") $1" +} + +# used to handle the distinction where /cfg is used for k8s and /helpers for docker/podman +# to share a volume across nodes containing helper binaries and independent from /cfg +# where some node files are stored +OUTDIR=$([ -d /helpers ] && echo "/helpers" || echo "/cfg") + +# Allow to use our image and just cp'd the binaries. + +if [ -f /tmp/curl ]; then + cp /tmp/curl $OUTDIR/curl + log "curl copied" +else + wget github.com/moparisthebest/static-curl/releases/download/v7.83.1/curl-amd64 -O "$OUTDIR/curl" + log "curl downloaded" +fi; + +chmod +x "$OUTDIR/curl" +log "curl chmoded" + +if [ -f /tmp/coreutils ]; then + cp /tmp/coreutils $OUTDIR/coreutils + log "coreutils copied" +else + wget -qO- github.com/uutils/coreutils/releases/download/0.0.17/coreutils-0.0.17-x86_64-unknown-linux-musl.tar.gz | tar -xz -C $OUTDIR --strip-components=1 coreutils-0.0.17-x86_64-unknown-linux-musl/coreutils + log "coreutils downloaded" +fi; + +chmod +x "$OUTDIR/coreutils" +log "coreutils chmoded" diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/shared/scripts/zombie-wrapper.sh b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/shared/scripts/zombie-wrapper.sh new file mode 100755 index 00000000..656bf455 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/shared/scripts/zombie-wrapper.sh @@ -0,0 +1,178 @@ +#!/bin/bash +set -uxo pipefail + +if [ -f /cfg/coreutils ]; then + RM="/cfg/coreutils rm" + MKFIFO="/cfg/coreutils mkfifo" + MKNOD="/cfg/coreutils mknod" + LS="/cfg/coreutils ls" + KILL="/cfg/coreutils kill" + SLEEP="/cfg/coreutils sleep" + ECHO="/cfg/coreutils echo" +elif [ -f /helpers/coreutils ]; then +# used for docker/podman to have a single volume sharing helper binaries +# across nodes independent from the /cfg where some files are stored +# by the node itself + RM="/helpers/coreutils rm" + MKFIFO="/helpers/coreutils mkfifo" + MKNOD="/helpers/coreutils mknod" + LS="/helpers/coreutils ls" + KILL="/helpers/coreutils kill" + SLEEP="/helpers/coreutils sleep" + ECHO="/helpers/coreutils echo" +else + RM="rm" + MKFIFO="mkfifo" + MKNOD="mknod" + LS="ls" + KILL="kill" + SLEEP="sleep" + ECHO="echo" +fi + +echo "COMMANDS DEFINED" + +# add /cfg as first `looking dir` to allow to overrides commands. +export PATH="/cfg":$PATH + +echo "EXPORT PATH" + +# setup pipe +pipe=/tmp/zombiepipe +trap "$RM -f $pipe" EXIT + +# try mkfifo first and allow to fail +if [[ ! -p $pipe ]]; then + $MKFIFO $pipe +fi + +# set immediately exit on any non 0 exit code +set -e + +# if fails try mknod +if [[ ! -p $pipe ]]; then + $MKNOD $pipe p +fi + +echo "PIPE CREATED" + +# init empty +child_pid="" + +# get the command to exec +CMD=($@) + +# File to store CMD (and update from there) +ZOMBIE_CMD_FILE=/tmp/zombie.cmd +ZOMBIE_CMD_PID=/tmp/zombie.pid + +# Store the cmd and make it available to later usage +# NOTE: echo without new line to allow to customize the cmd later +$ECHO -n "${CMD[@]}" > $ZOMBIE_CMD_FILE + +echo "COMMAND TO RUN IS: $CMD" + +start() { + # redirect the output to be expored to loki + "${CMD[@]}" >> /proc/1/fd/1 2>> /proc/1/fd/2 & + if [[ "$CMD" != "cat" ]]; then + child_pid="$!" + + $ECHO $(cat $ZOMBIE_CMD_FILE) + # store pid + $ECHO ${child_pid} > $ZOMBIE_CMD_PID + + # sleep a few secs to detect errors bootstraping the node + sleep 3 + + # check if the process is running + if ! $LS /proc/$child_pid > /dev/null 2>&1 ; then + echo "child process doesn't exist, quiting..."; + exit 1; + else + echo "PID: $child_pid alive"; + fi; + else + echo "Process not started, PID not stored, since was 'cat'"; + fi; + +} + +restart() { + if [ ! -z "${child_pid}" ]; then + $KILL -9 "$child_pid" + fi + + # check if we have timeout + if [[ "$1" ]]; then + $SLEEP "$1" + fi + + start +} + +pause() { + if [ ! -z "${child_pid}" ]; then + echo "send -STOP to process $child_pid" + $KILL -STOP "$child_pid" + echo "result $?" + + # Wait until the process is actually stopped (state 'T') + for i in {1..10}; do + local state + state=$(awk '{print $3}' /proc/$child_pid/stat 2>/dev/null) + if [ "$state" = "T" ]; then + echo "Process $child_pid is paused (state: $state)" + return + fi + $SLEEP 0.2 + done + + echo "Warning: Process $child_pid not paused after SIGSTOP" + fi +} + +resume() { + if [ ! -z "${child_pid}" ]; then + echo "send -CONT to process $child_pid" + $KILL -CONT "$child_pid" + echo "result $?" + + # Wait until the process is actually resumed (state not 'T') + for i in {1..10}; do + local state + state=$(awk '{print $3}' /proc/$child_pid/stat 2>/dev/null) + if [ "$state" != "T" ] && [ -n "$state" ]; then + echo "Process $child_pid is resumed (state: $state)" + return + fi + $SLEEP 0.2 + done + + echo "Warning: Process $child_pid not resumed after SIGCONT" + fi +} + +# keep listening from the pipe +while read line <$pipe +echo "read line: ${line}" +do + if [[ "$line" == "start" ]]; then + start + elif [[ "$line" == "quit" ]]; then + break + elif [[ "$line" =~ "restart" ]]; then + # check if we have timeout between restart + if [[ $line =~ [^0-9]+([0-9]+) ]]; then + restart "${BASH_REMATCH[1]}" + else + restart 0 + fi; + elif [[ "$line" == "pause" ]]; then + pause + elif [[ "$line" == "resume" ]]; then + resume + fi +done + +exit 0 diff --git a/vendor/pezkuwi-zombienet-sdk/crates/provider/src/shared/types.rs b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/shared/types.rs new file mode 100644 index 00000000..82d53b04 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/provider/src/shared/types.rs @@ -0,0 +1,375 @@ +use std::{ + collections::HashMap, + path::{Path, PathBuf}, + process::ExitStatus, +}; + +use configuration::{shared::resources::Resources, types::AssetLocation}; +use serde::{Deserialize, Serialize}; + +pub type Port = u16; + +pub type ExecutionResult = Result; + +#[derive(Debug, Clone, PartialEq)] +pub struct ProviderCapabilities { + // default ports internal + /// Ensure that we have an image for each node (k8s/podman/docker) + pub requires_image: bool, + /// Allow to customize the resources through manifest (k8s). + pub has_resources: bool, + /// Used in native to prefix filepath with fullpath + pub prefix_with_full_path: bool, + /// Use default ports in node cmd/args. + /// NOTE: generally used in k8s/dockers since the images expose those ports. + pub use_default_ports_in_cmd: bool, +} + +#[derive(Debug, Clone)] +pub struct SpawnNodeOptions { + /// Name of the node + pub name: String, + /// Image of the node (IFF is supported by the provider) + pub image: Option, + /// Resources to apply to the node (IFF is supported by the provider) + pub resources: Option, + /// Main command to execute + pub program: String, + /// Arguments to pass to the main command + pub args: Vec, + /// Environment to set when running the `program` + pub env: Vec<(String, String)>, + // TODO: rename startup_files + /// Files to inject at startup + pub injected_files: Vec, + /// Paths to create before start the node (e.g keystore) + /// should be created with `create_dir_all` in order + /// to create the full path even when we have missing parts + pub created_paths: Vec, + /// Database snapshot to be injected (should be a tgz file) + /// Could be a local or remote asset + pub db_snapshot: Option, + pub port_mapping: Option>, + /// Optionally specify a log path for the node + pub node_log_path: Option, +} + +impl SpawnNodeOptions { + pub fn new(name: S, program: S) -> Self + where + S: AsRef, + { + Self { + name: name.as_ref().to_string(), + image: None, + resources: None, + program: program.as_ref().to_string(), + args: vec![], + env: vec![], + injected_files: vec![], + created_paths: vec![], + db_snapshot: None, + port_mapping: None, + node_log_path: None, + } + } + + pub fn image(mut self, image: S) -> Self + where + S: AsRef, + { + self.image = Some(image.as_ref().to_string()); + self + } + + pub fn resources(mut self, resources: Resources) -> Self { + self.resources = Some(resources); + self + } + + pub fn db_snapshot(mut self, db_snap: Option) -> Self { + self.db_snapshot = db_snap; + self + } + + pub fn args(mut self, args: I) -> Self + where + S: AsRef, + I: IntoIterator, + { + self.args = args.into_iter().map(|s| s.as_ref().to_string()).collect(); + self + } + + pub fn env(mut self, env: I) -> Self + where + S: AsRef, + I: IntoIterator, + { + self.env = env + .into_iter() + .map(|(name, value)| (name.as_ref().to_string(), value.as_ref().to_string())) + .collect(); + self + } + + pub fn injected_files(mut self, injected_files: I) -> Self + where + I: IntoIterator, + { + self.injected_files = injected_files.into_iter().collect(); + self + } + + pub fn created_paths(mut self, created_paths: I) -> Self + where + P: AsRef, + I: IntoIterator, + { + self.created_paths = created_paths + .into_iter() + .map(|path| path.as_ref().into()) + .collect(); + self + } + + pub fn port_mapping(mut self, ports: HashMap) -> Self { + self.port_mapping = Some(ports); + self + } + + pub fn node_log_path(mut self, path: Option) -> Self { + self.node_log_path = path; + self + } +} + +#[derive(Debug)] +pub struct GenerateFileCommand { + pub program: String, + pub args: Vec, + pub env: Vec<(String, String)>, + pub local_output_path: PathBuf, +} + +impl GenerateFileCommand { + pub fn new(program: S, local_output_path: P) -> Self + where + S: AsRef, + P: AsRef, + { + Self { + program: program.as_ref().to_string(), + args: vec![], + env: vec![], + local_output_path: local_output_path.as_ref().into(), + } + } + + pub fn args(mut self, args: I) -> Self + where + S: AsRef, + I: IntoIterator, + { + self.args = args.into_iter().map(|s| s.as_ref().to_string()).collect(); + self + } + + pub fn env(mut self, env: I) -> Self + where + S: AsRef, + I: IntoIterator, + { + self.env = env + .into_iter() + .map(|(name, value)| (name.as_ref().to_string(), value.as_ref().to_string())) + .collect(); + self + } +} + +#[derive(Debug)] +pub struct GenerateFilesOptions { + pub commands: Vec, + pub image: Option, + pub injected_files: Vec, + // Allow to control the name of the node used to create the files. + pub temp_name: Option, + pub expected_path: Option, +} + +impl GenerateFilesOptions { + pub fn new(commands: I, image: Option, expected_path: Option) -> Self + where + I: IntoIterator, + { + Self { + commands: commands.into_iter().collect(), + injected_files: vec![], + image, + temp_name: None, + expected_path, + } + } + + pub fn with_files( + commands: I, + image: Option, + injected_files: &[TransferedFile], + expected_path: Option, + ) -> Self + where + I: IntoIterator, + { + Self { + commands: commands.into_iter().collect(), + injected_files: injected_files.into(), + image, + temp_name: None, + expected_path, + } + } + + pub fn image(mut self, image: S) -> Self + where + S: AsRef, + { + self.image = Some(image.as_ref().to_string()); + self + } + + pub fn injected_files(mut self, injected_files: I) -> Self + where + I: IntoIterator, + { + self.injected_files = injected_files.into_iter().collect(); + self + } + + pub fn temp_name(mut self, name: impl Into) -> Self { + self.temp_name = Some(name.into()); + self + } +} + +#[derive(Debug)] +pub struct RunCommandOptions { + pub program: String, + pub args: Vec, + pub env: Vec<(String, String)>, +} + +impl RunCommandOptions { + pub fn new(program: S) -> Self + where + S: AsRef, + { + Self { + program: program.as_ref().to_string(), + args: vec![], + env: vec![], + } + } + + pub fn args(mut self, args: I) -> Self + where + S: AsRef, + I: IntoIterator, + { + self.args = args.into_iter().map(|s| s.as_ref().to_string()).collect(); + self + } + + pub fn env(mut self, env: I) -> Self + where + S: AsRef, + I: IntoIterator, + { + self.env = env + .into_iter() + .map(|(name, value)| (name.as_ref().to_string(), value.as_ref().to_string())) + .collect(); + self + } +} + +pub struct RunScriptOptions { + pub local_script_path: PathBuf, + pub args: Vec, + pub env: Vec<(String, String)>, +} + +impl RunScriptOptions { + pub fn new

(local_script_path: P) -> Self + where + P: AsRef, + { + Self { + local_script_path: local_script_path.as_ref().into(), + args: vec![], + env: vec![], + } + } + + pub fn args(mut self, args: I) -> Self + where + S: AsRef, + I: IntoIterator, + { + self.args = args.into_iter().map(|s| s.as_ref().to_string()).collect(); + self + } + + pub fn env(mut self, env: I) -> Self + where + S: AsRef, + I: IntoIterator, + { + self.env = env + .into_iter() + .map(|(name, value)| (name.as_ref().to_string(), value.as_ref().to_string())) + .collect(); + self + } +} + +// TODO(team): I think we can rename it to FileMap? +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TransferedFile { + pub local_path: PathBuf, + pub remote_path: PathBuf, + // TODO: Can be narrowed to have strict typing on this? + pub mode: String, +} + +impl TransferedFile { + pub fn new

(local_path: P, remote_path: P) -> Self + where + P: AsRef, + { + Self { + local_path: local_path.as_ref().into(), + remote_path: remote_path.as_ref().into(), + mode: "0644".to_string(), // default to rw-r--r-- + } + } + + pub fn mode(mut self, mode: S) -> Self + where + S: AsRef, + { + self.mode = mode.as_ref().to_string(); + self + } +} + +impl std::fmt::Display for TransferedFile { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "File to transfer (local: {}, remote: {})", + self.local_path.display(), + self.remote_path.display() + ) + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/sdk/Cargo.toml b/vendor/pezkuwi-zombienet-sdk/crates/sdk/Cargo.toml new file mode 100644 index 00000000..9a5bc46d --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/sdk/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "zombienet-sdk" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +publish = true +license.workspace = true +repository.workspace = true +description = "Zombienet SDK, entrypoint for using zombienet" +keywords = ["zombienet", "sdk"] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +async-trait = { workspace = true } +tokio = { workspace = true } +futures = { workspace = true } +lazy_static = { workspace = true } +pezkuwi-subxt = { workspace = true } +pezkuwi-subxt-signer = { workspace = true, features = ["subxt"] } + +# Zombienet deps +configuration = { workspace = true } +orchestrator = { workspace = true } +provider = { workspace = true } +support = { workspace = true } + +[dev-dependencies] +tracing-subscriber = { workspace = true } +kube = { workspace = true, features = ["ws", "runtime"] } +k8s-openapi = { workspace = true, features = ["v1_27"] } +serde_json = {workspace = true } diff --git a/vendor/pezkuwi-zombienet-sdk/crates/sdk/src/environment.rs b/vendor/pezkuwi-zombienet-sdk/crates/sdk/src/environment.rs new file mode 100644 index 00000000..21eda890 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/sdk/src/environment.rs @@ -0,0 +1,90 @@ +//! Helpers functions to get configuration (e.g. Provider and images) from the env vars +use std::{env, future::Future, path::PathBuf, pin::Pin}; + +use crate::{ + AttachToLive, AttachToLiveNetwork, LocalFileSystem, Network, NetworkConfig, NetworkConfigExt, + OrchestratorError, +}; + +const DEFAULT_POLKADOT_IMAGE: &str = "docker.io/parity/polkadot:latest"; +const DEFAULT_CUMULUS_IMAGE: &str = "docker.io/parity/polkadot-parachain:latest"; + +#[derive(Debug, Default)] +pub struct Images { + pub polkadot: String, + pub cumulus: String, +} + +impl Images { + /// Alias for polkadot field - returns reference to pezkuwi/polkadot image + pub fn pezkuwi(&self) -> &str { + &self.polkadot + } + + /// Alias for cumulus field - returns reference to pezcumulus/cumulus image + pub fn pezcumulus(&self) -> &str { + &self.cumulus + } +} + +pub enum Provider { + Native, + K8s, + Docker, +} + +impl Provider { + pub fn get_spawn_fn( + &self, + ) -> fn(NetworkConfig) -> Pin + Send>> { + match self { + Provider::Native => NetworkConfigExt::spawn_native, + Provider::K8s => NetworkConfigExt::spawn_k8s, + Provider::Docker => NetworkConfigExt::spawn_docker, + } + } +} + +// Use `docker` as default provider +impl From for Provider { + fn from(value: String) -> Self { + match value.to_ascii_lowercase().as_ref() { + "native" => Provider::Native, + "k8s" => Provider::K8s, + _ => Provider::Docker, // default provider + } + } +} + +pub fn get_images_from_env() -> Images { + let polkadot = env::var("POLKADOT_IMAGE").unwrap_or(DEFAULT_POLKADOT_IMAGE.into()); + let cumulus = env::var("CUMULUS_IMAGE").unwrap_or(DEFAULT_CUMULUS_IMAGE.into()); + Images { polkadot, cumulus } +} + +pub fn get_provider_from_env() -> Provider { + env::var("ZOMBIE_PROVIDER").unwrap_or_default().into() +} + +pub type SpawnResult = Result, OrchestratorError>; +pub fn get_spawn_fn() -> fn(NetworkConfig) -> Pin + Send>> { + let provider = get_provider_from_env(); + + match provider { + Provider::Native => NetworkConfigExt::spawn_native, + Provider::K8s => NetworkConfigExt::spawn_k8s, + Provider::Docker => NetworkConfigExt::spawn_docker, + } +} + +pub type AttachResult = Result, OrchestratorError>; + +pub fn get_attach_fn() -> fn(PathBuf) -> Pin + Send>> { + let provider = get_provider_from_env(); + + match provider { + Provider::Native => AttachToLiveNetwork::attach_native, + Provider::K8s => AttachToLiveNetwork::attach_k8s, + Provider::Docker => AttachToLiveNetwork::attach_docker, + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/sdk/src/lib.rs b/vendor/pezkuwi-zombienet-sdk/crates/sdk/src/lib.rs new file mode 100644 index 00000000..ce0cb212 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/sdk/src/lib.rs @@ -0,0 +1,131 @@ +use std::path::PathBuf; + +use async_trait::async_trait; +pub use configuration::{ + GlobalSettings, GlobalSettingsBuilder, NetworkConfig, NetworkConfigBuilder, + RegistrationStrategy, WithRelaychain, +}; +pub use orchestrator::{ + errors::OrchestratorError, + network::{node::NetworkNode, Network}, + sc_chain_spec, AddCollatorOptions, AddNodeOptions, Orchestrator, +}; + +// Helpers used for interact with the network +pub mod tx_helper { + pub use orchestrator::{ + network::chain_upgrade::ChainUpgrade, shared::types::RuntimeUpgradeOptions, + }; +} + +use provider::{DockerProvider, KubernetesProvider, NativeProvider}; +pub use support::fs::local::LocalFileSystem; + +pub mod environment; +pub const PROVIDERS: [&str; 3] = ["k8s", "native", "docker"]; + +// re-export pezkuwi-subxt (with subxt alias for backwards compatibility) +pub use pezkuwi_subxt; +pub use pezkuwi_subxt as subxt; +pub use pezkuwi_subxt_signer; +pub use pezkuwi_subxt_signer as subxt_signer; + +#[async_trait] +pub trait NetworkConfigExt { + /// Spawns a network using the native or k8s provider. + /// + /// # Example: + /// ```rust + /// # use zombienet_sdk::{NetworkConfig, NetworkConfigExt}; + /// # async fn example() -> Result<(), zombienet_sdk::OrchestratorError> { + /// let network = NetworkConfig::load_from_toml("config.toml")? + /// .spawn_native() + /// .await?; + /// # Ok(()) + /// # } + /// ``` + async fn spawn_native(self) -> Result, OrchestratorError>; + async fn spawn_k8s(self) -> Result, OrchestratorError>; + async fn spawn_docker(self) -> Result, OrchestratorError>; +} + +#[async_trait] +pub trait AttachToLive { + /// Attaches to a running live network using the native, docker or k8s provider. + /// + /// # Example: + /// ```rust + /// # use zombienet_sdk::{AttachToLive, AttachToLiveNetwork}; + /// # use std::path::PathBuf; + /// # async fn example() -> Result<(), zombienet_sdk::OrchestratorError> { + /// let zombie_json_path = PathBuf::from("some/path/zombie.json"); + /// let network = AttachToLiveNetwork::attach_native(zombie_json_path).await?; + /// # Ok(()) + /// # } + /// ``` + async fn attach_native( + zombie_json_path: PathBuf, + ) -> Result, OrchestratorError>; + async fn attach_k8s( + zombie_json_path: PathBuf, + ) -> Result, OrchestratorError>; + async fn attach_docker( + zombie_json_path: PathBuf, + ) -> Result, OrchestratorError>; +} + +#[async_trait] +impl NetworkConfigExt for NetworkConfig { + async fn spawn_native(self) -> Result, OrchestratorError> { + let filesystem = LocalFileSystem; + let provider = NativeProvider::new(filesystem.clone()); + let orchestrator = Orchestrator::new(filesystem, provider); + orchestrator.spawn(self).await + } + + async fn spawn_k8s(self) -> Result, OrchestratorError> { + let filesystem = LocalFileSystem; + let provider = KubernetesProvider::new(filesystem.clone()).await; + let orchestrator = Orchestrator::new(filesystem, provider); + orchestrator.spawn(self).await + } + + async fn spawn_docker(self) -> Result, OrchestratorError> { + let filesystem = LocalFileSystem; + let provider = DockerProvider::new(filesystem.clone()).await; + let orchestrator = Orchestrator::new(filesystem, provider); + orchestrator.spawn(self).await + } +} + +pub struct AttachToLiveNetwork; + +#[async_trait] +impl AttachToLive for AttachToLiveNetwork { + async fn attach_native( + zombie_json_path: PathBuf, + ) -> Result, OrchestratorError> { + let filesystem = LocalFileSystem; + let provider = NativeProvider::new(filesystem.clone()); + let orchestrator = Orchestrator::new(filesystem, provider); + orchestrator.attach_to_live(zombie_json_path.as_ref()).await + } + + async fn attach_k8s( + zombie_json_path: PathBuf, + ) -> Result, OrchestratorError> { + let filesystem = LocalFileSystem; + let provider = KubernetesProvider::new(filesystem.clone()).await; + let orchestrator = Orchestrator::new(filesystem, provider); + orchestrator.attach_to_live(zombie_json_path.as_ref()).await + } + + async fn attach_docker( + zombie_json_path: PathBuf, + ) -> Result, OrchestratorError> { + let filesystem = LocalFileSystem; + let provider = DockerProvider::new(filesystem.clone()).await; + let orchestrator = Orchestrator::new(filesystem, provider); + orchestrator.attach_to_live(zombie_json_path.as_ref()).await + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/sdk/tests/chain_spec_runtime_omni_node.rs b/vendor/pezkuwi-zombienet-sdk/crates/sdk/tests/chain_spec_runtime_omni_node.rs new file mode 100644 index 00000000..6a4f5106 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/sdk/tests/chain_spec_runtime_omni_node.rs @@ -0,0 +1,75 @@ +use futures::StreamExt; +use zombienet_sdk::{environment::get_spawn_fn, NetworkConfigBuilder}; + +const BEST_BLOCK_METRIC: &str = "block_height{status=\"best\"}"; + +#[tokio::test(flavor = "multi_thread")] +async fn rococo_local_with_omni_node_and_wasm_runtime() { + let _ = tracing_subscriber::fmt::try_init(); + + let config = NetworkConfigBuilder::new() + .with_relaychain(|relaychain| { + relaychain + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image("docker.io/parity/polkadot:latest") + .with_validator(|node| node.with_name("alice")) + .with_validator(|node| node.with_name("bob")) + }) + .with_parachain(|parachain| { + parachain + .with_id(100).cumulus_based(true) + .with_chain("asset-hub-rococo-local") + .with_default_command("polkadot-omni-node") + .with_default_image("docker.io/parity/polkadot-omni-node:latest") + .with_chain_spec_runtime("https://github.com/polkadot-fellows/runtimes/releases/download/v1.9.2/asset-hub-polkadot_runtime-v1009002.compact.compressed.wasm", None ) + .with_collator(|collator| collator.with_name("omni-collator-1")) + .with_collator(|collator| collator.with_name("omni-collator-2")) + }) + .build() + .unwrap(); + + let spawn_fn = get_spawn_fn(); + let network = spawn_fn(config).await.unwrap(); + + println!("🚀🚀🚀🚀 network deployed"); + + // wait 2 blocks + let alice = network.get_node("alice").unwrap(); + assert!(alice + .wait_metric(BEST_BLOCK_METRIC, |b| b > 2_f64) + .await + .is_ok()); + + // omni-collator-1 + let collator = network.get_node("omni-collator-1").unwrap(); + let client = collator + .wait_client::() + .await + .unwrap(); + + // wait 1 blocks + let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(1); + while let Some(block) = blocks.next().await { + println!( + "Block (omni-collator-1) #{}", + block.unwrap().header().number + ); + } + + // omni-collator-2 + let collator = network.get_node("omni-collator-2").unwrap(); + let client = collator + .wait_client::() + .await + .unwrap(); + + // wait 1 blocks + let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(1); + while let Some(block) = blocks.next().await { + println!( + "Block (omni-collator-2) #{}", + block.unwrap().header().number + ); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/sdk/tests/chain_spec_runtime_polkadot.rs b/vendor/pezkuwi-zombienet-sdk/crates/sdk/tests/chain_spec_runtime_polkadot.rs new file mode 100644 index 00000000..1aba06d5 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/sdk/tests/chain_spec_runtime_polkadot.rs @@ -0,0 +1,76 @@ +use futures::StreamExt; +use zombienet_sdk::{environment::get_spawn_fn, NetworkConfigBuilder}; + +const BEST_BLOCK_METRIC: &str = "block_height{status=\"best\"}"; + +#[tokio::test(flavor = "multi_thread")] +async fn polkadot_local_with_chain_spec_runtime() { + let _ = tracing_subscriber::fmt::try_init(); + + let config = NetworkConfigBuilder::new() + .with_relaychain(|relaychain| { + relaychain + .with_chain("polkadot-local") + .with_default_command("polkadot") + .with_default_image("docker.io/parity/polkadot:latest") + .with_chain_spec_runtime("https://github.com/polkadot-fellows/runtimes/releases/download/v1.9.3/polkadot_runtime-v1009003.compact.compressed.wasm", None) + .with_validator(|node| node.with_name("alice")) + .with_validator(|node| node.with_name("bob")) + }) + .with_parachain(|parachain| { + parachain + .with_id(100) + .with_chain("asset-hub-polkadot-local") + .with_default_command("polkadot-parachain") + .with_default_image("docker.io/parity/polkadot-parachain:latest") + .with_chain_spec_runtime("https://github.com/polkadot-fellows/runtimes/releases/download/v1.9.2/asset-hub-polkadot_runtime-v1009002.compact.compressed.wasm", None) + .with_collator(|collator| collator.with_name("asset-hub-collator-1")) + .with_collator(|collator| collator.with_name("asset-hub-collator-2")) + }) + .build() + .unwrap(); + + let spawn_fn = get_spawn_fn(); + let network = spawn_fn(config).await.unwrap(); + + println!("🚀🚀🚀🚀 network deployed"); + + // wait 2 blocks + let alice = network.get_node("alice").unwrap(); + assert!(alice + .wait_metric(BEST_BLOCK_METRIC, |b| b > 2_f64) + .await + .is_ok()); + + // asset-hub-collator-1 + let collator = network.get_node("asset-hub-collator-1").unwrap(); + let client = collator + .wait_client::() + .await + .unwrap(); + + // wait 1 blocks + let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(1); + while let Some(block) = blocks.next().await { + println!( + "Block (asset-hub-collator-1) #{}", + block.unwrap().header().number + ); + } + + // asset-hub-collator-2 + let collator = network.get_node("asset-hub-collator-2").unwrap(); + let client = collator + .wait_client::() + .await + .unwrap(); + + // wait 1 blocks + let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(1); + while let Some(block) = blocks.next().await { + println!( + "Block (asset-hub-collator-2) #{}", + block.unwrap().header().number + ); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/sdk/tests/smoke-native.rs b/vendor/pezkuwi-zombienet-sdk/crates/sdk/tests/smoke-native.rs new file mode 100644 index 00000000..b21f0195 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/sdk/tests/smoke-native.rs @@ -0,0 +1,57 @@ +use std::time::Instant; + +use configuration::{NetworkConfig, NetworkConfigBuilder}; +use zombienet_sdk::environment::get_spawn_fn; + +fn small_network() -> NetworkConfig { + NetworkConfigBuilder::new() + .with_relaychain(|r| { + r.with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image("docker.io/parity/polkadot:v1.20.2") + .with_validator(|node| node.with_name("alice")) + .with_validator(|node| node.with_name("bob")) + }) + .with_parachain(|p| { + p.with_id(2000) + .cumulus_based(true) + .with_default_image("docker.io/parity/polkadot-parachain:v1.20.2") + .with_collator(|n| n.with_name("collator").with_command("polkadot-parachain")) + }) + .with_parachain(|p| { + p.with_id(3000) + .cumulus_based(true) + .with_default_image("docker.io/parity/polkadot-omni-node:v1.20.2") + .with_chain_spec_runtime("https://github.com/polkadot-fellows/runtimes/releases/download/v1.9.2/asset-hub-polkadot_runtime-v1009002.compact.compressed.wasm", None) + .with_collator(|n| n.with_name("collator-omni").with_command("polkadot-omni-node")) + }) + .build() + .unwrap() +} + +#[tokio::test(flavor = "multi_thread")] +async fn ci_native_smoke_should_works() { + tracing_subscriber::fmt::init(); + const BEST_BLOCK_METRIC: &str = "block_height{status=\"best\"}"; + let now = Instant::now(); + let config = small_network(); + let spawn_fn = get_spawn_fn(); + + let network = spawn_fn(config).await.unwrap(); + + let elapsed = now.elapsed(); + println!("🚀🚀🚀🚀 network deployed in {elapsed:.2?}"); + + network.wait_until_is_up(20).await.unwrap(); + + let elapsed = now.elapsed(); + println!("✅✅✅✅ network is up in {elapsed:.2?}"); + + // Get a ref to the node + let alice = network.get_node("alice").unwrap(); + // wait 10 blocks + alice + .wait_metric(BEST_BLOCK_METRIC, |x| x > 9_f64) + .await + .unwrap(); +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/sdk/tests/smoke.rs b/vendor/pezkuwi-zombienet-sdk/crates/sdk/tests/smoke.rs new file mode 100644 index 00000000..4c7ac263 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/sdk/tests/smoke.rs @@ -0,0 +1,179 @@ +use std::{path::PathBuf, time::Instant}; + +use configuration::{NetworkConfig, NetworkConfigBuilder}; +use futures::{stream::StreamExt, try_join}; +use orchestrator::{AddCollatorOptions, AddNodeOptions}; +use zombienet_sdk::environment::{get_attach_fn, get_spawn_fn}; + +fn small_network() -> NetworkConfig { + NetworkConfigBuilder::new() + .with_relaychain(|r| { + r.with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image("docker.io/parity/polkadot:v1.20.2") + .with_validator(|node| node.with_name("alice")) + .with_validator(|node| node.with_name("bob")) + }) + .with_parachain(|p| { + p.with_id(2000).cumulus_based(true).with_collator(|n| { + n.with_name("collator") + .with_command("polkadot-parachain") + .with_image("docker.io/parity/polkadot-parachain:1.7.0") + }) + }) + .with_parachain(|p| { + p.with_id(3000).cumulus_based(true).with_collator(|n| { + n.with_name("collator-new") + .with_command("polkadot-parachain") + .with_image("docker.io/parity/polkadot-parachain:v1.20.2") + }) + }) + .with_global_settings(|g| { + g.with_base_dir(PathBuf::from("/tmp/zombie-1")) + .with_tear_down_on_failure(false) + }) + .build() + .unwrap() +} + +#[tokio::test(flavor = "multi_thread")] +async fn ci_k8s_basic_functionalities_should_works() { + let _ = tracing_subscriber::fmt::try_init(); + + const BEST_BLOCK_METRIC: &str = "block_height{status=\"best\"}"; + let now = Instant::now(); + + let config = small_network(); + let spawn_fn = get_spawn_fn(); + + let network = spawn_fn(config).await.unwrap(); + + let elapsed = now.elapsed(); + println!("🚀🚀🚀🚀 network deployed in {elapsed:.2?}"); + + // detach and attach to running + network.detach().await; + drop(network); + let attach_fn = get_attach_fn(); + let zombie_path = PathBuf::from("/tmp/zombie-1/zombie.json"); + let mut network = attach_fn(zombie_path).await.unwrap(); + + // Get a ref to the node + let alice = network.get_node("alice").unwrap(); + + let (_best_block_pass, client) = try_join!( + alice.wait_metric(BEST_BLOCK_METRIC, |x| x > 5_f64), + alice.wait_client::() + ) + .unwrap(); + + alice + .wait_log_line_count("*rted #1*", true, 10) + .await + .unwrap(); + + // check best block through metrics with timeout + assert!(alice + .wait_metric_with_timeout(BEST_BLOCK_METRIC, |x| x > 10_f64, 45_u32) + .await + .is_ok()); + + // ensure timeout error + let best_block = alice.reports(BEST_BLOCK_METRIC).await.unwrap(); + let res = alice + .wait_metric_with_timeout(BEST_BLOCK_METRIC, |x| x > (best_block * 2_f64), 10_u32) + .await; + + assert!(res.is_err()); + + // get single metric + let role = alice.reports("node_roles").await.unwrap(); + println!("Role is {role}"); + assert_eq!(role, 4.0); + + // subxt + // wait 3 blocks + let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(3); + while let Some(block) = blocks.next().await { + println!("Block #{}", block.unwrap().header().number); + } + + // drop the client + drop(client); + + // check best block through metrics + let best_block = alice + .reports("block_height{status=\"best\"}") + .await + .unwrap(); + + assert!(best_block >= 2.0, "Current best {best_block}"); + + // collator + let collator = network.get_node("collator").unwrap(); + let client = collator + .wait_client::() + .await + .unwrap(); + + // wait 3 blocks + let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(3); + while let Some(block) = blocks.next().await { + println!("Block (para) #{}", block.unwrap().header().number); + } + + // add node + let opts = AddNodeOptions { + rpc_port: Some(9444), + is_validator: true, + ..Default::default() + }; + + network.add_node("new1", opts).await.unwrap(); + + // add collator + let col_opts = AddCollatorOptions { + command: Some("polkadot-parachain".try_into().unwrap()), + image: Some( + "docker.io/parity/polkadot-parachain:1.7.0" + .try_into() + .unwrap(), + ), + ..Default::default() + }; + + network + .add_collator("new-col-1", col_opts, 2000) + .await + .unwrap(); + + // pause / resume + let alice = network.get_node("alice").unwrap(); + alice.pause().await.unwrap(); + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + let res_err = alice + .wait_metric_with_timeout(BEST_BLOCK_METRIC, |x| x > 5_f64, 5_u32) + .await; + + assert!(res_err.is_err()); + + alice.resume().await.unwrap(); + alice + .wait_metric_with_timeout(BEST_BLOCK_METRIC, |x| x > 5_f64, 5_u32) + .await + .unwrap(); + + // timeout connecting ws + let collator = network.get_node("collator").unwrap(); + collator.pause().await.unwrap(); + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + let r = collator + .wait_client_with_timeout::(1_u32) + .await; + assert!(r.is_err()); + + // tear down (optional if you don't detach the network) + network.destroy().await.unwrap(); +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/sdk/tests/two-paras-same-id.rs b/vendor/pezkuwi-zombienet-sdk/crates/sdk/tests/two-paras-same-id.rs new file mode 100644 index 00000000..9273dadf --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/sdk/tests/two-paras-same-id.rs @@ -0,0 +1,44 @@ +use zombienet_sdk::{environment::get_spawn_fn, NetworkConfigBuilder}; + +#[tokio::test(flavor = "multi_thread")] +async fn two_paras_same_id() { + tracing_subscriber::fmt::init(); + let spawn_fn = get_spawn_fn(); + let config = NetworkConfigBuilder::new() + .with_relaychain(|r| { + r.with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image("docker.io/parity/polkadot:v1.7.0") + .with_validator(|node| node.with_name("alice")) + .with_validator(|node| node.with_name("bob")) + }) + .with_parachain(|p| { + p.with_id(2000) + .with_default_command("polkadot-parachain") + .with_default_image("docker.io/parity/polkadot-parachain:1.7.0") + .with_collator(|n| n.with_name("collator")) + }) + .with_parachain(|p| { + p.with_id(2000) + .with_default_command("polkadot-parachain") + .with_default_image("docker.io/parity/polkadot-parachain:1.7.0") + .with_registration_strategy(zombienet_sdk::RegistrationStrategy::Manual) + .with_collator(|n| n.with_name("collator1")) + }) + .build() + .unwrap(); + + let network = spawn_fn(config).await.unwrap(); + + assert!(network.get_node("collator").is_ok()); + assert!(network.get_node("collator1").is_ok()); + + // First parachain (out of two) is fetched + assert_eq!(network.parachain(2000).unwrap().unique_id(), "2000"); + + // First and second parachain hav the same para_id + assert_eq!( + network.parachain_by_unique_id("2000").unwrap().para_id(), + network.parachain_by_unique_id("2000-1").unwrap().para_id(), + ); +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/support/.gitignore b/vendor/pezkuwi-zombienet-sdk/crates/support/.gitignore new file mode 100644 index 00000000..4fffb2f8 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/support/.gitignore @@ -0,0 +1,2 @@ +/target +/Cargo.lock diff --git a/vendor/pezkuwi-zombienet-sdk/crates/support/Cargo.toml b/vendor/pezkuwi-zombienet-sdk/crates/support/Cargo.toml new file mode 100644 index 00000000..40f979eb --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/support/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "zombienet-support" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +publish = true +license.workspace = true +repository.workspace = true +description = "Support crates with common traits/structs and helpers" +keywords = ["zombienet"] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +thiserror = { workspace = true } +anyhow = { workspace = true } +async-trait = { workspace = true } +futures = { workspace = true } +reqwest = { workspace = true } +tokio = { workspace = true, features = ["full"] } +uuid = { workspace = true, features = ["v4"] } +nix = { workspace = true, features = ["signal"] } +rand = { workspace = true } +regex = { workspace = true } +tracing = { workspace = true } +lazy_static = { workspace = true } +serde_json = { workspace = true } diff --git a/vendor/pezkuwi-zombienet-sdk/crates/support/src/constants.rs b/vendor/pezkuwi-zombienet-sdk/crates/support/src/constants.rs new file mode 100644 index 00000000..94203589 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/support/src/constants.rs @@ -0,0 +1,24 @@ +pub const VALID_REGEX: &str = "regex should be valid "; +pub const BORROWABLE: &str = "must be borrowable as mutable "; +pub const RELAY_NOT_NONE: &str = "typestate should ensure the relaychain isn't None at this point "; +pub const SHOULD_COMPILE: &str = "should compile with success "; +pub const INFAILABLE: &str = "infaillible "; +pub const NO_ERR_DEF_BUILDER: &str = "should have no errors for default builder "; +pub const RW_FAILED: &str = "should be able to read/write - failed "; +pub const DEFAULT_TYPESTATE: &str = "'default' overriding should be ensured by typestate "; +pub const VALIDATION_CHECK: &str = "validation failed "; + +pub const PREFIX_CANT_BE_NONE: &str = "name prefix can't be None if a value exists "; + +pub const GRAPH_CONTAINS_NAME: &str = + "graph contains node name; we initialize it with all node names"; +pub const GRAPH_CONTAINS_DEP: &str = "graph contains dep_name; we filter out deps not contained in by_name and populate the graph with all nodes"; +pub const INDEGREE_CONTAINS_NAME: &str = + "indegree contains node name; we initialize it with all node names"; +pub const QUEUE_NOT_EMPTY: &str = "queue is not empty; we're looping over its length"; + +pub const THIS_IS_A_BUG: &str = + "- this is a bug please report it: https://github.com/paritytech/zombienet-sdk/issues"; + +/// environment variable which can be used to override node spawn timeout +pub const ZOMBIE_NODE_SPAWN_TIMEOUT_SECONDS: &str = "ZOMBIE_NODE_SPAWN_TIMEOUT_SECONDS"; diff --git a/vendor/pezkuwi-zombienet-sdk/crates/support/src/fs.rs b/vendor/pezkuwi-zombienet-sdk/crates/support/src/fs.rs new file mode 100644 index 00000000..952486e0 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/support/src/fs.rs @@ -0,0 +1,60 @@ +use std::path::Path; + +use async_trait::async_trait; + +pub mod in_memory; +pub mod local; + +#[derive(Debug, thiserror::Error)] +#[error(transparent)] +pub struct FileSystemError(#[from] anyhow::Error); + +impl From for FileSystemError { + fn from(error: std::io::Error) -> Self { + Self(error.into()) + } +} + +pub type FileSystemResult = Result; + +#[async_trait] +pub trait FileSystem { + async fn create_dir

(&self, path: P) -> FileSystemResult<()> + where + P: AsRef + Send; + + async fn create_dir_all

(&self, path: P) -> FileSystemResult<()> + where + P: AsRef + Send; + + async fn read

(&self, path: P) -> FileSystemResult> + where + P: AsRef + Send; + + async fn read_to_string

(&self, path: P) -> FileSystemResult + where + P: AsRef + Send; + + async fn write(&self, path: P, contents: C) -> FileSystemResult<()> + where + P: AsRef + Send, + C: AsRef<[u8]> + Send; + + async fn append(&self, path: P, contents: C) -> FileSystemResult<()> + where + P: AsRef + Send, + C: AsRef<[u8]> + Send; + + async fn copy(&self, from: P1, to: P2) -> FileSystemResult<()> + where + P1: AsRef + Send, + P2: AsRef + Send; + + async fn set_mode

(&self, path: P, perm: u32) -> FileSystemResult<()> + where + P: AsRef + Send; + + async fn exists

(&self, path: P) -> bool + where + P: AsRef + Send; +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/support/src/fs/in_memory.rs b/vendor/pezkuwi-zombienet-sdk/crates/support/src/fs/in_memory.rs new file mode 100644 index 00000000..727b14a7 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/support/src/fs/in_memory.rs @@ -0,0 +1,879 @@ +use std::{collections::HashMap, ffi::OsString, path::Path, sync::Arc}; + +use anyhow::anyhow; +use async_trait::async_trait; +use tokio::sync::RwLock; + +use super::{FileSystem, FileSystemResult}; + +#[derive(Debug, Clone, PartialEq)] +pub enum InMemoryFile { + File { mode: u32, contents: Vec }, + Directory { mode: u32 }, +} + +impl InMemoryFile { + pub fn file(contents: C) -> Self + where + C: AsRef, + { + Self::file_raw(contents.as_ref()) + } + + pub fn file_raw(contents: C) -> Self + where + C: AsRef<[u8]>, + { + Self::File { + mode: 0o664, + contents: contents.as_ref().to_vec(), + } + } + + pub fn empty() -> Self { + Self::file_raw(vec![]) + } + + pub fn dir() -> Self { + Self::Directory { mode: 0o775 } + } + + pub fn mode(&self) -> u32 { + match *self { + Self::File { mode, .. } => mode, + Self::Directory { mode, .. } => mode, + } + } + + pub fn contents_raw(&self) -> Option> { + match self { + Self::File { contents, .. } => Some(contents.to_vec()), + Self::Directory { .. } => None, + } + } + + pub fn contents(&self) -> Option { + match self { + Self::File { contents, .. } => Some(String::from_utf8_lossy(contents).to_string()), + Self::Directory { .. } => None, + } + } +} + +#[derive(Default, Debug, Clone)] +pub struct InMemoryFileSystem { + pub files: Arc>>, +} + +impl InMemoryFileSystem { + pub fn new(files: HashMap) -> Self { + Self { + files: Arc::new(RwLock::new(files)), + } + } +} + +#[async_trait] +impl FileSystem for InMemoryFileSystem { + async fn create_dir

(&self, path: P) -> FileSystemResult<()> + where + P: AsRef + Send, + { + let path = path.as_ref(); + let os_path = path.as_os_str(); + match self.files.read().await.get(os_path) { + Some(InMemoryFile::File { .. }) => { + Err(anyhow!("file {:?} already exists", os_path.to_owned(),))? + }, + Some(InMemoryFile::Directory { .. }) => { + Err(anyhow!("directory {:?} already exists", os_path.to_owned(),))? + }, + None => {}, + }; + + for path in path.ancestors().skip(1) { + match self.files.read().await.get(path.as_os_str()) { + Some(InMemoryFile::Directory { .. }) => continue, + Some(InMemoryFile::File { .. }) => Err(anyhow!( + "ancestor {:?} is not a directory", + path.as_os_str(), + ))?, + None => Err(anyhow!("ancestor {:?} doesn't exists", path.as_os_str(),))?, + }; + } + + self.files + .write() + .await + .insert(os_path.to_owned(), InMemoryFile::dir()); + + Ok(()) + } + + async fn create_dir_all

(&self, path: P) -> FileSystemResult<()> + where + P: AsRef + Send, + { + let path = path.as_ref(); + let mut files = self.files.write().await; + let ancestors = path + .ancestors() + .collect::>() + .into_iter() + .rev() + .skip(1); + + for path in ancestors { + match files.get(path.as_os_str()) { + Some(InMemoryFile::Directory { .. }) => continue, + Some(InMemoryFile::File { .. }) => Err(anyhow!( + "ancestor {:?} is not a directory", + path.as_os_str().to_owned(), + ))?, + None => files.insert(path.as_os_str().to_owned(), InMemoryFile::dir()), + }; + } + + Ok(()) + } + + async fn read

(&self, path: P) -> FileSystemResult> + where + P: AsRef + Send, + { + let os_path = path.as_ref().as_os_str(); + + match self.files.read().await.get(os_path) { + Some(InMemoryFile::File { contents, .. }) => Ok(contents.clone()), + Some(InMemoryFile::Directory { .. }) => { + Err(anyhow!("file {os_path:?} is a directory").into()) + }, + None => Err(anyhow!("file {os_path:?} not found").into()), + } + } + + async fn read_to_string

(&self, path: P) -> FileSystemResult + where + P: AsRef + Send, + { + let os_path = path.as_ref().as_os_str().to_owned(); + let content = self.read(path).await?; + + String::from_utf8(content) + .map_err(|_| anyhow!("invalid utf-8 encoding for file {os_path:?}").into()) + } + + async fn write(&self, path: P, contents: C) -> FileSystemResult<()> + where + P: AsRef + Send, + C: AsRef<[u8]> + Send, + { + let path = path.as_ref(); + let os_path = path.as_os_str(); + let mut files = self.files.write().await; + + for path in path.ancestors().skip(1) { + match files.get(path.as_os_str()) { + Some(InMemoryFile::Directory { .. }) => continue, + Some(InMemoryFile::File { .. }) => Err(anyhow!( + "ancestor {:?} is not a directory", + path.as_os_str() + ))?, + None => Err(anyhow!("ancestor {:?} doesn't exists", path.as_os_str()))?, + }; + } + + if let Some(InMemoryFile::Directory { .. }) = files.get(os_path) { + return Err(anyhow!("file {os_path:?} is a directory").into()); + } + + files.insert(os_path.to_owned(), InMemoryFile::file_raw(contents)); + + Ok(()) + } + + async fn append(&self, path: P, contents: C) -> FileSystemResult<()> + where + P: AsRef + Send, + C: AsRef<[u8]> + Send, + { + let path = path.as_ref(); + let mut existing_contents = match self.read(path).await { + Ok(existing_contents) => existing_contents, + Err(err) if err.to_string() == format!("file {:?} not found", path.as_os_str()) => { + vec![] + }, + Err(err) => Err(err)?, + }; + existing_contents.append(&mut contents.as_ref().to_vec()); + + self.write(path, existing_contents).await + } + + async fn copy(&self, from: P1, to: P2) -> FileSystemResult<()> + where + P1: AsRef + Send, + P2: AsRef + Send, + { + let from_ref = from.as_ref(); + let to_ref = to.as_ref(); + let content = self.read(from_ref).await?; + + self.write(to_ref, content).await + } + + async fn set_mode

(&self, path: P, mode: u32) -> FileSystemResult<()> + where + P: AsRef + Send, + { + let os_path = path.as_ref().as_os_str(); + if let Some(file) = self.files.write().await.get_mut(os_path) { + match file { + InMemoryFile::File { mode: old_mode, .. } => { + *old_mode = mode; + }, + InMemoryFile::Directory { mode: old_mode, .. } => { + *old_mode = mode; + }, + }; + Ok(()) + } else { + Err(anyhow!("file {os_path:?} not found").into()) + } + } + + async fn exists

(&self, path: P) -> bool + where + P: AsRef + Send, + { + self.files + .read() + .await + .contains_key(path.as_ref().as_os_str()) + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use super::*; + + #[tokio::test] + async fn create_dir_should_create_a_directory_at_root() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/").unwrap(), + InMemoryFile::dir(), + )])); + + fs.create_dir("/dir").await.unwrap(); + + assert_eq!(fs.files.read().await.len(), 2); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/dir").unwrap()) + .unwrap(), + InMemoryFile::Directory { mode } if *mode == 0o775 + )); + } + + #[tokio::test] + async fn create_dir_should_return_an_error_if_directory_already_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/dir").unwrap(), InMemoryFile::dir()), + ])); + + let err = fs.create_dir("/dir").await.unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 2); + assert_eq!(err.to_string(), "directory \"/dir\" already exists"); + } + + #[tokio::test] + async fn create_dir_should_return_an_error_if_file_already_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/dir").unwrap(), InMemoryFile::empty()), + ])); + + let err = fs.create_dir("/dir").await.unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 2); + assert_eq!(err.to_string(), "file \"/dir\" already exists"); + } + + #[tokio::test] + async fn create_dir_should_create_a_directory_if_all_ancestors_exist() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), + ( + OsString::from_str("/path/to/my").unwrap(), + InMemoryFile::dir(), + ), + ])); + + fs.create_dir("/path/to/my/dir").await.unwrap(); + + assert_eq!(fs.files.read().await.len(), 5); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/path/to/my/dir").unwrap()) + .unwrap(), + InMemoryFile::Directory { mode} if *mode == 0o775 + )); + } + + #[tokio::test] + async fn create_dir_should_return_an_error_if_some_directory_ancestor_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), + ])); + + let err = fs.create_dir("/path/to/my/dir").await.unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 3); + assert_eq!(err.to_string(), "ancestor \"/path/to/my\" doesn't exists"); + } + + #[tokio::test] + async fn create_dir_should_return_an_error_if_some_ancestor_is_not_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path").unwrap(), InMemoryFile::empty()), + (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), + ( + OsString::from_str("/path/to/my").unwrap(), + InMemoryFile::dir(), + ), + ])); + + let err = fs.create_dir("/path/to/my/dir").await.unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 4); + assert_eq!(err.to_string(), "ancestor \"/path\" is not a directory"); + } + + #[tokio::test] + async fn create_dir_all_should_create_a_directory_and_all_its_ancestors_if_they_dont_exist() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/").unwrap(), + InMemoryFile::dir(), + )])); + + fs.create_dir_all("/path/to/my/dir").await.unwrap(); + + assert_eq!(fs.files.read().await.len(), 5); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/path").unwrap()) + .unwrap(), + InMemoryFile::Directory { mode } if *mode == 0o775 + )); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/path/to").unwrap()) + .unwrap(), + InMemoryFile::Directory { mode } if *mode == 0o775 + )); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/path/to/my").unwrap()) + .unwrap(), + InMemoryFile::Directory { mode } if *mode == 0o775 + )); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/path/to/my/dir").unwrap()) + .unwrap(), + InMemoryFile::Directory { mode } if *mode == 0o775 + )); + } + + #[tokio::test] + async fn create_dir_all_should_create_a_directory_and_some_of_its_ancestors_if_they_dont_exist() + { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), + ])); + + fs.create_dir_all("/path/to/my/dir").await.unwrap(); + + assert_eq!(fs.files.read().await.len(), 5); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/path/to/my").unwrap()) + .unwrap(), + InMemoryFile::Directory { mode } if *mode == 0o775 + )); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/path/to/my/dir").unwrap()) + .unwrap(), + InMemoryFile::Directory { mode } if *mode == 0o775 + )); + } + + #[tokio::test] + async fn create_dir_all_should_return_an_error_if_some_ancestor_is_not_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path").unwrap(), InMemoryFile::empty()), + (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), + ])); + + let err = fs.create_dir_all("/path/to/my/dir").await.unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 3); + assert_eq!(err.to_string(), "ancestor \"/path\" is not a directory"); + } + + #[tokio::test] + async fn read_should_return_the_file_content() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::file("content"), + )])); + + let content = fs.read("/myfile").await.unwrap(); + + assert_eq!(content, "content".as_bytes().to_vec()); + } + + #[tokio::test] + async fn read_should_return_an_error_if_file_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::new()); + + let err = fs.read("/myfile").await.unwrap_err(); + + assert_eq!(err.to_string(), "file \"/myfile\" not found"); + } + + #[tokio::test] + async fn read_should_return_an_error_if_file_is_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::dir(), + )])); + + let err = fs.read("/myfile").await.unwrap_err(); + + assert_eq!(err.to_string(), "file \"/myfile\" is a directory"); + } + + #[tokio::test] + async fn read_to_string_should_return_the_file_content_as_a_string() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::file("content"), + )])); + + let content = fs.read_to_string("/myfile").await.unwrap(); + + assert_eq!(content, "content"); + } + + #[tokio::test] + async fn read_to_string_should_return_an_error_if_file_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::new()); + + let err = fs.read_to_string("/myfile").await.unwrap_err(); + + assert_eq!(err.to_string(), "file \"/myfile\" not found"); + } + + #[tokio::test] + async fn read_to_string_should_return_an_error_if_file_is_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::dir(), + )])); + + let err = fs.read_to_string("/myfile").await.unwrap_err(); + + assert_eq!(err.to_string(), "file \"/myfile\" is a directory"); + } + + #[tokio::test] + async fn read_to_string_should_return_an_error_if_file_isnt_utf8_encoded() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::file_raw(vec![0xC3, 0x28]), + )])); + + let err = fs.read_to_string("/myfile").await.unwrap_err(); + + assert_eq!( + err.to_string(), + "invalid utf-8 encoding for file \"/myfile\"" + ); + } + + #[tokio::test] + async fn write_should_create_file_with_content_if_file_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/").unwrap(), + InMemoryFile::dir(), + )])); + + fs.write("/myfile", "my file content").await.unwrap(); + + assert_eq!(fs.files.read().await.len(), 2); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/myfile").unwrap()), + Some(InMemoryFile::File {mode, contents, .. }) if *mode == 0o664 && contents == "my file content".as_bytes() + )); + } + + #[tokio::test] + async fn write_should_overwrite_file_content_if_file_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + ( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::file("my file content"), + ), + ])); + + fs.write("/myfile", "my new file content").await.unwrap(); + + assert_eq!(fs.files.read().await.len(), 2); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/myfile").unwrap()), + Some(InMemoryFile::File { mode, contents, .. }) if *mode == 0o664 && contents == "my new file content".as_bytes() + )); + } + + #[tokio::test] + async fn write_should_return_an_error_if_file_is_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/myfile").unwrap(), InMemoryFile::dir()), + ])); + + let err = fs.write("/myfile", "my file content").await.unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 2); + assert_eq!(err.to_string(), "file \"/myfile\" is a directory"); + } + + #[tokio::test] + async fn write_should_return_an_error_if_file_is_new_and_some_ancestor_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), + ])); + + let err = fs + .write("/path/to/myfile", "my file content") + .await + .unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 2); + assert_eq!(err.to_string(), "ancestor \"/path\" doesn't exists"); + } + + #[tokio::test] + async fn write_should_return_an_error_if_file_is_new_and_some_ancestor_is_not_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path").unwrap(), InMemoryFile::empty()), + (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), + ])); + + let err = fs + .write("/path/to/myfile", "my file content") + .await + .unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 3); + assert_eq!(err.to_string(), "ancestor \"/path\" is not a directory"); + } + + #[tokio::test] + async fn append_should_update_file_content_if_file_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + ( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::file("my file content"), + ), + ])); + + fs.append("/myfile", " has been updated with new things") + .await + .unwrap(); + + assert_eq!(fs.files.read().await.len(), 2); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/myfile").unwrap()), + Some(InMemoryFile::File { mode, contents, .. }) if *mode == 0o664 && contents == "my file content has been updated with new things".as_bytes() + )); + } + + #[tokio::test] + async fn append_should_create_file_with_content_if_file_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/").unwrap(), + InMemoryFile::dir(), + )])); + + fs.append("/myfile", "my file content").await.unwrap(); + + assert_eq!(fs.files.read().await.len(), 2); + assert!(matches!( + fs.files + .read() + .await + .get(&OsString::from_str("/myfile").unwrap()), + Some(InMemoryFile::File { mode,contents, .. }) if *mode == 0o664 && contents == "my file content".as_bytes() + )); + } + + #[tokio::test] + async fn append_should_return_an_error_if_file_is_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::dir(), + )])); + + let err = fs.append("/myfile", "my file content").await.unwrap_err(); + + assert_eq!(err.to_string(), "file \"/myfile\" is a directory"); + } + + #[tokio::test] + async fn append_should_return_an_error_if_file_is_new_and_some_ancestor_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), + ])); + + let err = fs + .append("/path/to/myfile", "my file content") + .await + .unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 2); + assert_eq!(err.to_string(), "ancestor \"/path\" doesn't exists"); + } + + #[tokio::test] + async fn append_should_return_an_error_if_file_is_new_and_some_ancestor_is_not_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/path").unwrap(), InMemoryFile::empty()), + (OsString::from_str("/path/to").unwrap(), InMemoryFile::dir()), + ])); + + let err = fs + .append("/path/to/myfile", "my file content") + .await + .unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 3); + assert_eq!(err.to_string(), "ancestor \"/path\" is not a directory"); + } + + #[tokio::test] + async fn copy_should_creates_new_destination_file_if_it_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + ( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::file("my file content"), + ), + ])); + + fs.copy("/myfile", "/myfilecopy").await.unwrap(); + + assert_eq!(fs.files.read().await.len(), 3); + assert!( + matches!(fs.files.read().await.get(&OsString::from_str("/myfilecopy").unwrap()).unwrap(), InMemoryFile::File { mode, contents, .. } if *mode == 0o664 && contents == "my file content".as_bytes()) + ); + } + + #[tokio::test] + async fn copy_should_updates_the_file_content_of_the_destination_file_if_it_already_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + ( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::file("my new file content"), + ), + ( + OsString::from_str("/myfilecopy").unwrap(), + InMemoryFile::file("my file content"), + ), + ])); + + fs.copy("/myfile", "/myfilecopy").await.unwrap(); + + assert_eq!(fs.files.read().await.len(), 3); + assert!( + matches!(fs.files.read().await.get(&OsString::from_str("/myfilecopy").unwrap()).unwrap(), InMemoryFile::File { mode, contents, .. } if *mode == 0o664 && contents == "my new file content".as_bytes()) + ); + } + + #[tokio::test] + async fn copy_should_returns_an_error_if_source_file_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/").unwrap(), + InMemoryFile::dir(), + )])); + + let err = fs.copy("/myfile", "/mfilecopy").await.unwrap_err(); + + assert_eq!(err.to_string(), "file \"/myfile\" not found"); + } + + #[tokio::test] + async fn copy_should_returns_an_error_if_source_file_is_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/myfile").unwrap(), InMemoryFile::dir()), + ])); + + let err = fs.copy("/myfile", "/mfilecopy").await.unwrap_err(); + + assert_eq!(err.to_string(), "file \"/myfile\" is a directory"); + } + + #[tokio::test] + async fn copy_should_returns_an_error_if_destination_file_is_a_directory() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + ( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::file("my file content"), + ), + ( + OsString::from_str("/myfilecopy").unwrap(), + InMemoryFile::dir(), + ), + ])); + + let err = fs.copy("/myfile", "/myfilecopy").await.unwrap_err(); + + assert_eq!(err.to_string(), "file \"/myfilecopy\" is a directory"); + } + + #[tokio::test] + async fn copy_should_returns_an_error_if_destination_file_is_new_and_some_ancestor_doesnt_exists( + ) { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + ( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::file("my file content"), + ), + ])); + + let err = fs.copy("/myfile", "/somedir/myfilecopy").await.unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 2); + assert_eq!(err.to_string(), "ancestor \"/somedir\" doesn't exists"); + } + + #[tokio::test] + async fn copy_should_returns_an_error_if_destination_file_is_new_and_some_ancestor_is_not_a_directory( + ) { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + ( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::file("my file content"), + ), + ( + OsString::from_str("/mypath").unwrap(), + InMemoryFile::empty(), + ), + ])); + + let err = fs.copy("/myfile", "/mypath/myfilecopy").await.unwrap_err(); + + assert_eq!(fs.files.read().await.len(), 3); + assert_eq!(err.to_string(), "ancestor \"/mypath\" is not a directory"); + } + + #[tokio::test] + async fn set_mode_should_update_the_file_mode_at_path() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + ( + OsString::from_str("/myfile").unwrap(), + InMemoryFile::file("my file content"), + ), + ])); + assert!( + matches!(fs.files.read().await.get(&OsString::from_str("/myfile").unwrap()).unwrap(), InMemoryFile::File { mode, .. } if *mode == 0o664) + ); + + fs.set_mode("/myfile", 0o400).await.unwrap(); + + assert!( + matches!(fs.files.read().await.get(&OsString::from_str("/myfile").unwrap()).unwrap(), InMemoryFile::File { mode, .. } if *mode == 0o400) + ); + } + + #[tokio::test] + async fn set_mode_should_update_the_directory_mode_at_path() { + let fs = InMemoryFileSystem::new(HashMap::from([ + (OsString::from_str("/").unwrap(), InMemoryFile::dir()), + (OsString::from_str("/mydir").unwrap(), InMemoryFile::dir()), + ])); + assert!( + matches!(fs.files.read().await.get(&OsString::from_str("/mydir").unwrap()).unwrap(), InMemoryFile::Directory { mode } if *mode == 0o775) + ); + + fs.set_mode("/mydir", 0o700).await.unwrap(); + + assert!( + matches!(fs.files.read().await.get(&OsString::from_str("/mydir").unwrap()).unwrap(), InMemoryFile::Directory { mode } if *mode == 0o700) + ); + } + + #[tokio::test] + async fn set_mode_should_returns_an_error_if_file_doesnt_exists() { + let fs = InMemoryFileSystem::new(HashMap::from([( + OsString::from_str("/").unwrap(), + InMemoryFile::dir(), + )])); + // intentionally forget to create file + + let err = fs.set_mode("/myfile", 0o400).await.unwrap_err(); + + assert_eq!(err.to_string(), "file \"/myfile\" not found"); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/support/src/fs/local.rs b/vendor/pezkuwi-zombienet-sdk/crates/support/src/fs/local.rs new file mode 100644 index 00000000..6f7d0e56 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/support/src/fs/local.rs @@ -0,0 +1,390 @@ +use std::{fs::Permissions, os::unix::fs::PermissionsExt, path::Path}; + +use async_trait::async_trait; +use tokio::io::AsyncWriteExt; + +use super::{FileSystem, FileSystemError, FileSystemResult}; + +#[derive(Default, Debug, Clone)] +pub struct LocalFileSystem; + +#[async_trait] +impl FileSystem for LocalFileSystem { + async fn create_dir

(&self, path: P) -> FileSystemResult<()> + where + P: AsRef + Send, + { + tokio::fs::create_dir(path).await.map_err(Into::into) + } + + async fn create_dir_all

(&self, path: P) -> FileSystemResult<()> + where + P: AsRef + Send, + { + tokio::fs::create_dir_all(path).await.map_err(Into::into) + } + + async fn read

(&self, path: P) -> FileSystemResult> + where + P: AsRef + Send, + { + tokio::fs::read(path).await.map_err(Into::into) + } + + async fn read_to_string

(&self, path: P) -> FileSystemResult + where + P: AsRef + Send, + { + tokio::fs::read_to_string(path).await.map_err(Into::into) + } + + async fn write(&self, path: P, contents: C) -> FileSystemResult<()> + where + P: AsRef + Send, + C: AsRef<[u8]> + Send, + { + tokio::fs::write(path, contents).await.map_err(Into::into) + } + + async fn append(&self, path: P, contents: C) -> FileSystemResult<()> + where + P: AsRef + Send, + C: AsRef<[u8]> + Send, + { + let contents = contents.as_ref(); + let mut file = tokio::fs::OpenOptions::new() + .create(true) + .append(true) + .open(path) + .await + .map_err(Into::::into)?; + + file.write_all(contents) + .await + .map_err(Into::::into)?; + + file.flush().await.and(Ok(())).map_err(Into::into) + } + + async fn copy(&self, from: P1, to: P2) -> FileSystemResult<()> + where + P1: AsRef + Send, + P2: AsRef + Send, + { + tokio::fs::copy(from, to) + .await + .and(Ok(())) + .map_err(Into::into) + } + + async fn set_mode

(&self, path: P, mode: u32) -> FileSystemResult<()> + where + P: AsRef + Send, + { + tokio::fs::set_permissions(path, Permissions::from_mode(mode)) + .await + .map_err(Into::into) + } + + async fn exists

(&self, path: P) -> bool + where + P: AsRef + Send, + { + path.as_ref().exists() + } +} + +#[cfg(test)] +mod tests { + use uuid::Uuid; + + use super::*; + + const FILE_BITS: u32 = 0o100000; + const DIR_BITS: u32 = 0o40000; + + fn setup() -> String { + let test_dir = format!("/tmp/unit_test_{}", Uuid::new_v4()); + std::fs::create_dir(&test_dir).unwrap(); + test_dir + } + + fn teardown(test_dir: String) { + std::fs::remove_dir_all(test_dir).unwrap(); + } + + #[tokio::test] + async fn create_dir_should_create_a_new_directory_at_path() { + let test_dir = setup(); + let fs = LocalFileSystem; + + let new_dir = format!("{test_dir}/mynewdir"); + fs.create_dir(&new_dir).await.unwrap(); + + let new_dir_path = Path::new(&new_dir); + assert!(new_dir_path.exists() && new_dir_path.is_dir()); + teardown(test_dir); + } + + #[tokio::test] + async fn create_dir_should_bubble_up_error_if_some_happens() { + let test_dir = setup(); + let fs = LocalFileSystem; + + let new_dir = format!("{test_dir}/mynewdir"); + // intentionally create new dir before calling function to force error + std::fs::create_dir(&new_dir).unwrap(); + let err = fs.create_dir(&new_dir).await.unwrap_err(); + + assert_eq!(err.to_string(), "File exists (os error 17)"); + teardown(test_dir); + } + + #[tokio::test] + async fn create_dir_all_should_create_a_new_directory_and_all_of_it_ancestors_at_path() { + let test_dir = setup(); + let fs = LocalFileSystem; + + let new_dir = format!("{test_dir}/the/path/to/mynewdir"); + fs.create_dir_all(&new_dir).await.unwrap(); + + let new_dir_path = Path::new(&new_dir); + assert!(new_dir_path.exists() && new_dir_path.is_dir()); + teardown(test_dir); + } + + #[tokio::test] + async fn create_dir_all_should_bubble_up_error_if_some_happens() { + let test_dir = setup(); + let fs = LocalFileSystem; + + let new_dir = format!("{test_dir}/the/path/to/mynewdir"); + // intentionally create new file as ancestor before calling function to force error + std::fs::write(format!("{test_dir}/the"), b"test").unwrap(); + let err = fs.create_dir_all(&new_dir).await.unwrap_err(); + + assert_eq!(err.to_string(), "Not a directory (os error 20)"); + teardown(test_dir); + } + + #[tokio::test] + async fn read_should_return_the_contents_of_the_file_at_path() { + let test_dir = setup(); + let fs = LocalFileSystem; + + let file_path = format!("{test_dir}/myfile"); + std::fs::write(&file_path, b"Test").unwrap(); + let contents = fs.read(file_path).await.unwrap(); + + assert_eq!(contents, b"Test"); + teardown(test_dir); + } + + #[tokio::test] + async fn read_should_bubble_up_error_if_some_happens() { + let test_dir = setup(); + let fs = LocalFileSystem; + + let file_path = format!("{test_dir}/myfile"); + // intentionally forget to create file to force error + let err = fs.read(file_path).await.unwrap_err(); + + assert_eq!(err.to_string(), "No such file or directory (os error 2)"); + teardown(test_dir); + } + + #[tokio::test] + async fn read_to_string_should_return_the_contents_of_the_file_at_path_as_string() { + let test_dir = setup(); + let fs = LocalFileSystem; + + let file_path = format!("{test_dir}/myfile"); + std::fs::write(&file_path, b"Test").unwrap(); + let contents = fs.read_to_string(file_path).await.unwrap(); + + assert_eq!(contents, "Test"); + teardown(test_dir); + } + + #[tokio::test] + async fn read_to_string_should_bubble_up_error_if_some_happens() { + let test_dir = setup(); + let fs = LocalFileSystem; + + let file_path = format!("{test_dir}/myfile"); + // intentionally forget to create file to force error + let err = fs.read_to_string(file_path).await.unwrap_err(); + + assert_eq!(err.to_string(), "No such file or directory (os error 2)"); + teardown(test_dir); + } + + #[tokio::test] + async fn write_should_create_a_new_file_at_path_with_contents() { + let test_dir = setup(); + let fs = LocalFileSystem; + + let file_path = format!("{test_dir}/myfile"); + fs.write(&file_path, "Test").await.unwrap(); + + assert_eq!(std::fs::read_to_string(file_path).unwrap(), "Test"); + teardown(test_dir); + } + + #[tokio::test] + async fn write_should_overwrite_an_existing_file_with_contents() { + let test_dir = setup(); + let fs = LocalFileSystem; + + let file_path = format!("{test_dir}/myfile"); + std::fs::write(&file_path, "Test").unwrap(); + assert_eq!(std::fs::read_to_string(&file_path).unwrap(), "Test"); + fs.write(&file_path, "Test updated").await.unwrap(); + + assert_eq!(std::fs::read_to_string(file_path).unwrap(), "Test updated"); + teardown(test_dir); + } + + #[tokio::test] + async fn write_should_bubble_up_error_if_some_happens() { + let test_dir = setup(); + let fs = LocalFileSystem; + + let file_path = format!("{test_dir}/myfile"); + // intentionally create directory instead of file to force error + std::fs::create_dir(&file_path).unwrap(); + let err = fs.write(&file_path, "Test").await.unwrap_err(); + + assert_eq!(err.to_string(), "Is a directory (os error 21)"); + teardown(test_dir); + } + + #[tokio::test] + async fn append_should_create_a_new_file_at_path_with_contents() { + let test_dir = setup(); + let fs = LocalFileSystem; + + let file_path = format!("{test_dir}/myfile"); + fs.append(&file_path, "Test").await.unwrap(); + + assert_eq!(std::fs::read_to_string(file_path).unwrap(), "Test"); + teardown(test_dir); + } + + #[tokio::test] + async fn append_should_updates_an_existing_file_by_appending_contents() { + let test_dir = setup(); + let fs = LocalFileSystem; + + let file_path = format!("{test_dir}/myfile"); + std::fs::write(&file_path, "Test").unwrap(); + assert_eq!(std::fs::read_to_string(&file_path).unwrap(), "Test"); + fs.append(&file_path, " updated").await.unwrap(); + + assert_eq!(std::fs::read_to_string(file_path).unwrap(), "Test updated"); + teardown(test_dir); + } + + #[tokio::test] + async fn append_should_bubble_up_error_if_some_happens() { + let test_dir = setup(); + let fs = LocalFileSystem; + + let file_path = format!("{test_dir}/myfile"); + // intentionally create directory instead of file to force error + std::fs::create_dir(&file_path).unwrap(); + let err = fs.append(&file_path, "Test").await.unwrap_err(); + + assert_eq!(err.to_string(), "Is a directory (os error 21)"); + teardown(test_dir); + } + + #[tokio::test] + async fn copy_should_create_a_duplicate_of_source() { + let test_dir = setup(); + let fs = LocalFileSystem; + + let from_path = format!("{test_dir}/myfile"); + std::fs::write(&from_path, "Test").unwrap(); + let to_path = format!("{test_dir}/mycopy"); + fs.copy(&from_path, &to_path).await.unwrap(); + + assert_eq!(std::fs::read_to_string(to_path).unwrap(), "Test"); + teardown(test_dir); + } + + #[tokio::test] + async fn copy_should_ovewrite_destination_if_alread_exists() { + let test_dir = setup(); + let fs = LocalFileSystem; + + let from_path = format!("{test_dir}/myfile"); + std::fs::write(&from_path, "Test").unwrap(); + let to_path = format!("{test_dir}/mycopy"); + std::fs::write(&from_path, "Some content").unwrap(); + fs.copy(&from_path, &to_path).await.unwrap(); + + assert_eq!(std::fs::read_to_string(to_path).unwrap(), "Some content"); + teardown(test_dir); + } + + #[tokio::test] + async fn copy_should_bubble_up_error_if_some_happens() { + let test_dir = setup(); + let fs = LocalFileSystem; + + let from_path = format!("{test_dir}/nonexistentfile"); + let to_path = format!("{test_dir}/mycopy"); + let err = fs.copy(&from_path, &to_path).await.unwrap_err(); + + assert_eq!(err.to_string(), "No such file or directory (os error 2)"); + teardown(test_dir); + } + + #[tokio::test] + async fn set_mode_should_update_the_file_mode_at_path() { + let test_dir = setup(); + let fs = LocalFileSystem; + let path = format!("{test_dir}/myfile"); + std::fs::write(&path, "Test").unwrap(); + assert!(std::fs::metadata(&path).unwrap().permissions().mode() != (FILE_BITS + 0o400)); + + fs.set_mode(&path, 0o400).await.unwrap(); + + assert_eq!( + std::fs::metadata(&path).unwrap().permissions().mode(), + FILE_BITS + 0o400 + ); + teardown(test_dir); + } + + #[tokio::test] + async fn set_mode_should_update_the_directory_mode_at_path() { + let test_dir = setup(); + let fs = LocalFileSystem; + let path = format!("{test_dir}/mydir"); + std::fs::create_dir(&path).unwrap(); + assert!(std::fs::metadata(&path).unwrap().permissions().mode() != (DIR_BITS + 0o700)); + + fs.set_mode(&path, 0o700).await.unwrap(); + + assert_eq!( + std::fs::metadata(&path).unwrap().permissions().mode(), + DIR_BITS + 0o700 + ); + teardown(test_dir); + } + + #[tokio::test] + async fn set_mode_should_bubble_up_error_if_some_happens() { + let test_dir = setup(); + let fs = LocalFileSystem; + let path = format!("{test_dir}/somemissingfile"); + // intentionnally don't create file + + let err = fs.set_mode(&path, 0o400).await.unwrap_err(); + + assert_eq!(err.to_string(), "No such file or directory (os error 2)"); + teardown(test_dir); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/support/src/lib.rs b/vendor/pezkuwi-zombienet-sdk/crates/support/src/lib.rs new file mode 100644 index 00000000..b0b8e81b --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/support/src/lib.rs @@ -0,0 +1,4 @@ +pub mod constants; +pub mod fs; +pub mod net; +pub mod replacer; diff --git a/vendor/pezkuwi-zombienet-sdk/crates/support/src/net.rs b/vendor/pezkuwi-zombienet-sdk/crates/support/src/net.rs new file mode 100644 index 00000000..7869b024 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/support/src/net.rs @@ -0,0 +1,60 @@ +use std::{io::Cursor, str::FromStr, time::Duration}; + +use reqwest::{Method, Request, StatusCode, Url}; +use tracing::trace; + +use crate::constants::THIS_IS_A_BUG; + +type Result = std::result::Result>; + +pub async fn download_file(url: String, dest: String) -> Result<()> { + let response = reqwest::get(url).await?; + let mut file = std::fs::File::create(dest)?; + let mut content = Cursor::new(response.bytes().await?); + std::io::copy(&mut content, &mut file)?; + Ok(()) +} + +pub async fn wait_ws_ready(url: &str) -> Result<()> { + let mut parsed = Url::from_str(url)?; + parsed + .set_scheme("http") + .map_err(|_| anyhow::anyhow!("Can not set the scheme, {THIS_IS_A_BUG}"))?; + + let http_client = reqwest::Client::new(); + loop { + let req = Request::new(Method::OPTIONS, parsed.clone()); + let res = http_client.execute(req).await; + match res { + Ok(res) => { + if res.status() == StatusCode::OK { + // ready to go! + break; + } + + trace!("http_client status: {}, continuing...", res.status()); + }, + Err(e) => { + if !skip_err_while_waiting(&e) { + return Err(e.into()); + } + + trace!("http_client err: {}, continuing... ", e.to_string()); + }, + } + + tokio::time::sleep(Duration::from_secs(1)).await; + } + + Ok(()) +} + +pub fn skip_err_while_waiting(e: &reqwest::Error) -> bool { + // if the error is connecting/request could be the case that the node + // is not listening yet, so we keep waiting + // Skipped errs like: + // 'tcp connect error: Connection refused (os error 61)' + // 'operation was canceled: connection closed before message completed' + // 'connection error: Connection reset by peer (os error 54)' + e.is_connect() || e.is_request() +} diff --git a/vendor/pezkuwi-zombienet-sdk/crates/support/src/replacer.rs b/vendor/pezkuwi-zombienet-sdk/crates/support/src/replacer.rs new file mode 100644 index 00000000..4f5b1b56 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/crates/support/src/replacer.rs @@ -0,0 +1,197 @@ +use std::collections::{HashMap, HashSet}; + +use lazy_static::lazy_static; +use regex::{Captures, Regex}; +use tracing::{trace, warn}; + +use crate::constants::{SHOULD_COMPILE, THIS_IS_A_BUG}; + +lazy_static! { + static ref RE: Regex = Regex::new(r#"\{\{([a-zA-Z0-9_]*)\}\}"#) + .unwrap_or_else(|_| panic!("{SHOULD_COMPILE}, {THIS_IS_A_BUG}")); + static ref TOKEN_PLACEHOLDER: Regex = Regex::new(r#"\{\{ZOMBIE:(.*?):(.*?)\}\}"#) + .unwrap_or_else(|_| panic!("{SHOULD_COMPILE}, {THIS_IS_A_BUG}")); + static ref PLACEHOLDER_COMPAT: HashMap<&'static str, &'static str> = { + let mut m = HashMap::new(); + m.insert("multiAddress", "multiaddr"); + m.insert("wsUri", "ws_uri"); + m.insert("prometheusUri", "prometheus_uri"); + + m + }; +} + +/// Return true if the text contains any TOKEN_PLACEHOLDER +pub fn has_tokens(text: &str) -> bool { + TOKEN_PLACEHOLDER.is_match(text) +} + +pub fn apply_replacements(text: &str, replacements: &HashMap<&str, &str>) -> String { + let augmented_text = RE.replace_all(text, |caps: &Captures| { + if let Some(replacements_value) = replacements.get(&caps[1]) { + replacements_value.to_string() + } else { + caps[0].to_string() + } + }); + + augmented_text.to_string() +} + +pub fn apply_env_replacements(text: &str) -> String { + let augmented_text = RE.replace_all(text, |caps: &Captures| { + if let Ok(replacements_value) = std::env::var(&caps[1]) { + replacements_value + } else { + caps[0].to_string() + } + }); + + augmented_text.to_string() +} + +pub fn apply_running_network_replacements(text: &str, network: &serde_json::Value) -> String { + let augmented_text = TOKEN_PLACEHOLDER.replace_all(text, |caps: &Captures| { + trace!("appling replacements for caps: {caps:#?}"); + if let Some(node) = network.get(&caps[1]) { + trace!("caps1 {} - node: {node}", &caps[1]); + let field = *PLACEHOLDER_COMPAT.get(&caps[2]).unwrap_or(&&caps[2]); + if let Some(val) = node.get(field) { + trace!("caps2 {} - node: {node}", field); + val.as_str().unwrap_or("Invalid string").to_string() + } else { + warn!( + "⚠️ The node with name {} doesn't have the value {} in context", + &caps[1], &caps[2] + ); + caps[0].to_string() + } + } else { + warn!("⚠️ No node with name {} in context", &caps[1]); + caps[0].to_string() + } + }); + + augmented_text.to_string() +} + +pub fn get_tokens_to_replace(text: &str) -> HashSet { + let mut tokens = HashSet::new(); + + TOKEN_PLACEHOLDER + .captures_iter(text) + .for_each(|caps: Captures| { + tokens.insert(caps[1].to_string()); + }); + + tokens +} + +#[cfg(test)] +mod tests { + use serde_json::json; + + use super::*; + + #[test] + fn replace_should_works() { + let text = "some {{namespace}}"; + let mut replacements = HashMap::new(); + replacements.insert("namespace", "demo-123"); + let res = apply_replacements(text, &replacements); + assert_eq!("some demo-123".to_string(), res); + } + + #[test] + fn replace_env_should_works() { + let text = "some {{namespace}}"; + std::env::set_var("namespace", "demo-123"); + // let mut replacements = HashMap::new(); + // replacements.insert("namespace", "demo-123"); + let res = apply_env_replacements(text); + assert_eq!("some demo-123".to_string(), res); + } + + #[test] + fn replace_multiple_should_works() { + let text = r#"some {{namespace}} + other is {{other}}"#; + let augmented_text = r#"some demo-123 + other is other-123"#; + + let mut replacements = HashMap::new(); + replacements.insert("namespace", "demo-123"); + replacements.insert("other", "other-123"); + let res = apply_replacements(text, &replacements); + assert_eq!(augmented_text, res); + } + + #[test] + fn replace_multiple_with_missing_should_works() { + let text = r#"some {{namespace}} + other is {{other}}"#; + let augmented_text = r#"some demo-123 + other is {{other}}"#; + + let mut replacements = HashMap::new(); + replacements.insert("namespace", "demo-123"); + + let res = apply_replacements(text, &replacements); + assert_eq!(augmented_text, res); + } + + #[test] + fn replace_without_replacement_should_leave_text_unchanged() { + let text = "some {{namespace}}"; + let mut replacements = HashMap::new(); + replacements.insert("other", "demo-123"); + let res = apply_replacements(text, &replacements); + assert_eq!(text.to_string(), res); + } + + #[test] + fn replace_running_network_should_work() { + let network = json!({ + "alice" : { + "multiaddr": "some/demo/127.0.0.1" + } + }); + + let res = apply_running_network_replacements("{{ZOMBIE:alice:multiaddr}}", &network); + assert_eq!(res.as_str(), "some/demo/127.0.0.1"); + } + + #[test] + fn replace_running_network_with_compat_should_work() { + let network = json!({ + "alice" : { + "multiaddr": "some/demo/127.0.0.1" + } + }); + + let res = apply_running_network_replacements("{{ZOMBIE:alice:multiAddress}}", &network); + assert_eq!(res.as_str(), "some/demo/127.0.0.1"); + } + + #[test] + fn replace_running_network_with_missing_field_should_not_replace_nothing() { + let network = json!({ + "alice" : { + "multiaddr": "some/demo/127.0.0.1" + } + }); + + let res = apply_running_network_replacements("{{ZOMBIE:alice:someField}}", &network); + assert_eq!(res.as_str(), "{{ZOMBIE:alice:someField}}"); + } + + #[test] + fn get_tokens_to_replace_should_work() { + let res = get_tokens_to_replace("{{ZOMBIE:alice:multiaddr}} {{ZOMBIE:bob:multiaddr}}"); + let mut expected = HashSet::new(); + expected.insert("alice".to_string()); + expected.insert("bob".to_string()); + + assert_eq!(res, expected); + } +} diff --git a/vendor/pezkuwi-zombienet-sdk/rustfmt.toml b/vendor/pezkuwi-zombienet-sdk/rustfmt.toml new file mode 100644 index 00000000..5b09d4d6 --- /dev/null +++ b/vendor/pezkuwi-zombienet-sdk/rustfmt.toml @@ -0,0 +1,26 @@ +# https://rust-lang.github.io/rustfmt/?version=v1.7.0 + +# general +indent_style = "Block" + +# rewriting +condense_wildcard_suffixes = true +match_block_trailing_comma = true +use_field_init_shorthand = true +use_try_shorthand = true + +# normalization +normalize_comments = true +normalize_doc_attributes = true + +# reordering +reorder_impl_items = true +reorder_imports = true +reorder_modules = true +imports_granularity = "Crate" +group_imports = "StdExternalCrate" + +# additional formating +format_code_in_doc_comments = true +format_macro_matchers = true +format_macro_bodies = true \ No newline at end of file

, +) -> Result, RpcError> { + use jsonrpsee::wasm_client::WasmClientBuilder; + + let RpcClientBuilder { + id_kind, + max_concurrent_requests, + max_log_len, + request_timeout, + .. + } = builder; + + let ws_client_builder = WasmClientBuilder::new() + .max_buffer_capacity_per_subscription(tokio::sync::Semaphore::MAX_PERMITS) + .max_concurrent_requests(*max_concurrent_requests as usize) + .set_max_logging_length(*max_log_len) + .request_timeout(*request_timeout) + .id_format(*id_kind); + + let client = ws_client_builder.build(url.as_str()).await?; + + Ok(Arc::new(client)) +} diff --git a/vendor/pezkuwi-subxt/rpcs/src/client/reconnecting_rpc_client/tests.rs b/vendor/pezkuwi-subxt/rpcs/src/client/reconnecting_rpc_client/tests.rs new file mode 100644 index 00000000..cada9df0 --- /dev/null +++ b/vendor/pezkuwi-subxt/rpcs/src/client/reconnecting_rpc_client/tests.rs @@ -0,0 +1,271 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::*; +use futures::{future::Either, FutureExt}; + +use jsonrpsee::core::BoxError; +use jsonrpsee::server::{ + http, stop_channel, ws, ConnectionGuard, ConnectionState, HttpRequest, HttpResponse, RpcModule, + RpcServiceBuilder, ServerConfig, SubscriptionMessage, +}; + +#[tokio::test] +async fn call_works() { + let (_handle, addr) = run_server().await.unwrap(); + let client = RpcClient::builder().build(addr).await.unwrap(); + assert!(client.request("say_hello".to_string(), None).await.is_ok(),) +} + +#[tokio::test] +async fn sub_works() { + let (_handle, addr) = run_server().await.unwrap(); + + let client = RpcClient::builder() + .retry_policy(ExponentialBackoff::from_millis(50)) + .build(addr) + .await + .unwrap(); + + let mut sub = client + .subscribe( + "subscribe_lo".to_string(), + None, + "unsubscribe_lo".to_string(), + ) + .await + .unwrap(); + + assert!(sub.next().await.is_some()); +} + +#[tokio::test] +async fn sub_with_reconnect() { + let (handle, addr) = run_server().await.unwrap(); + let client = RpcClient::builder().build(addr.clone()).await.unwrap(); + + let sub = client + .subscribe( + "subscribe_lo".to_string(), + None, + "unsubscribe_lo".to_string(), + ) + .await + .unwrap(); + + // Tell server to shut down. + let _ = handle.send(()); + + // Drain any values from the subscription. We should end with a DisconnectedWillReconnect error, + // so that subscriptions have the opportunity to react to the fact that we were disconnected. + let sub_ended_with_disconnect_err = sub.fold(false, async |_, next| matches!(next, Err(DisconnectedWillReconnect(_)))); + let sub_ended_with_disconnect_err = tokio::time::timeout(tokio::time::Duration::from_secs(5), sub_ended_with_disconnect_err) + .await + .expect("timeout should not be hit"); + + assert!(sub_ended_with_disconnect_err, "DisconnectedWillReconnect err was last message in sub"); + + // Start a new server at the same address as the old one. (This will wait a bit for the addr to be free) + let (_handle, _) = run_server_with_settings(Some(&addr), false).await.unwrap(); + + // Hack to wait for the server to restart. + tokio::time::sleep(Duration::from_millis(100)).await; + + // We can subscribe again on the same client and it should work. + let mut sub = client + .subscribe( + "subscribe_lo".to_string(), + None, + "unsubscribe_lo".to_string(), + ) + .await + .unwrap(); + + assert!(matches!(sub.next().await, Some(Ok(_)))); +} + +#[tokio::test] +async fn call_with_reconnect() { + let (handle, addr) = run_server_with_settings(None, true).await.unwrap(); + + let client = Arc::new(RpcClient::builder().build(addr.clone()).await.unwrap()); + + let req_fut = client.request("say_hello".to_string(), None).boxed(); + let timeout_fut = tokio::time::sleep(Duration::from_secs(5)); + + // If the call isn't replied in 5 secs then it's regarded as it's still pending. + let req_fut = match futures::future::select(Box::pin(timeout_fut), req_fut).await { + Either::Left((_, f)) => f, + Either::Right(_) => panic!("RPC call finished"), + }; + + // Close the connection with a pending call. + let _ = handle.send(()); + + // Restart the server + let (_handle, _) = run_server_with_settings(Some(&addr), false).await.unwrap(); + + // Hack to wait for the server to restart. + tokio::time::sleep(Duration::from_millis(100)).await; + + // This call should fail because reconnect. + assert!(req_fut.await.is_err()); + // Future call should work after reconnect. + assert!(client.request("say_hello".to_string(), None).await.is_ok()); +} + +async fn run_server() -> Result<(tokio::sync::broadcast::Sender<()>, String), BoxError> { + run_server_with_settings(None, false).await +} + +async fn run_server_with_settings( + url: Option<&str>, + dont_respond_to_method_calls: bool, +) -> Result<(tokio::sync::broadcast::Sender<()>, String), BoxError> { + use jsonrpsee::server::HttpRequest; + + let sockaddr = match url { + Some(url) => url.strip_prefix("ws://").unwrap(), + None => "127.0.0.1:0", + }; + + let mut i = 0; + + let listener = loop { + if let Ok(l) = tokio::net::TcpListener::bind(sockaddr).await { + break l; + } + tokio::time::sleep(Duration::from_millis(100)).await; + + if i >= 100 { + panic!("Addr already in use"); + } + + i += 1; + }; + + let mut module = RpcModule::new(()); + + if dont_respond_to_method_calls { + module.register_async_method("say_hello", |_, _, _| async { + futures::future::pending::<()>().await; + "timeout" + })?; + } else { + module.register_async_method("say_hello", |_, _, _| async { "lo" })?; + } + + module.register_subscription( + "subscribe_lo", + "subscribe_lo", + "unsubscribe_lo", + |_params, pending, _ctx, _| async move { + let sink = pending.accept().await.unwrap(); + let i = 0; + + loop { + if sink + .send(SubscriptionMessage::from_json(&i).unwrap()) + .await + .is_err() + { + break; + } + tokio::time::sleep(std::time::Duration::from_secs(6)).await; + } + }, + )?; + + let (tx, mut rx) = tokio::sync::broadcast::channel(4); + let tx2 = tx.clone(); + let (stop_handle, server_handle) = stop_channel(); + let addr = listener.local_addr().expect("Could not find local addr"); + + tokio::spawn(async move { + loop { + let sock = tokio::select! { + res = listener.accept() => { + match res { + Ok((stream, _remote_addr)) => stream, + Err(e) => { + tracing::error!("Failed to accept connection: {:?}", e); + continue; + } + } + } + _ = rx.recv() => { + break + } + }; + + let module = module.clone(); + let rx2 = tx2.subscribe(); + let tx2 = tx2.clone(); + let stop_handle2 = stop_handle.clone(); + + let svc = tower::service_fn(move |req: HttpRequest| { + let module = module.clone(); + let tx = tx2.clone(); + let stop_handle = stop_handle2.clone(); + + let conn_permit = ConnectionGuard::new(1).try_acquire().unwrap(); + + if ws::is_upgrade_request(&req) { + let rpc_service = RpcServiceBuilder::new(); + let conn = ConnectionState::new(stop_handle, 1, conn_permit); + + async move { + let mut rx = tx.subscribe(); + + let (rp, conn_fut) = + ws::connect(req, ServerConfig::default(), module, conn, rpc_service) + .await + .unwrap(); + + tokio::spawn(async move { + tokio::select! { + _ = conn_fut => (), + _ = rx.recv() => {}, + } + }); + + Ok::<_, BoxError>(rp) + } + .boxed() + } else { + async { Ok(http::response::denied()) }.boxed() + } + }); + + tokio::spawn(serve_with_graceful_shutdown(sock, svc, rx2)); + } + + drop(server_handle); + }); + + Ok((tx, format!("ws://{addr}"))) +} + +async fn serve_with_graceful_shutdown( + io: I, + service: S, + mut rx: tokio::sync::broadcast::Receiver<()>, +) where + S: tower::Service, Response = HttpResponse> + + Clone + + Send + + 'static, + S::Future: Send, + S::Response: Send, + S::Error: Into, + B: http_body::Body + Send + 'static, + B::Error: Into, + I: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Unpin + 'static, +{ + if let Err(e) = + jsonrpsee::server::serve_with_graceful_shutdown(io, service, rx.recv().map(|_| ())).await + { + tracing::error!("Error while serving: {:?}", e); + } +} diff --git a/vendor/pezkuwi-subxt/rpcs/src/client/reconnecting_rpc_client/utils.rs b/vendor/pezkuwi-subxt/rpcs/src/client/reconnecting_rpc_client/utils.rs new file mode 100644 index 00000000..54304036 --- /dev/null +++ b/vendor/pezkuwi-subxt/rpcs/src/client/reconnecting_rpc_client/utils.rs @@ -0,0 +1,14 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Utils. + +use super::RpcError; + +pub fn display_close_reason(err: &RpcError) -> String { + match err { + RpcError::RestartNeeded(e) => e.to_string(), + other => other.to_string(), + } +} diff --git a/vendor/pezkuwi-subxt/rpcs/src/client/round_robin_rpc_client.rs b/vendor/pezkuwi-subxt/rpcs/src/client/round_robin_rpc_client.rs new file mode 100644 index 00000000..95cebecc --- /dev/null +++ b/vendor/pezkuwi-subxt/rpcs/src/client/round_robin_rpc_client.rs @@ -0,0 +1,91 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module exposes a [`RoundRobinRpcClient`], which is useful for load balancing +//! requests across multiple RPC clients. +//! +//! # Example +//! +//! ```rust,no_run +//! # async fn foo() -> Result<(), Box> { +//! use pezkuwi_subxt_rpcs::client::{RpcClient, RoundRobinRpcClient, jsonrpsee_client}; +//! +//! // Construct some RpcClients (we'll make some jsonrpsee clients here, but +//! // you could use anything which implements `RpcClientT`). +//! let client1 = jsonrpsee_client("http://localhost:8080").await.unwrap(); +//! let client2 = jsonrpsee_client("http://localhost:8081").await.unwrap(); +//! let client3 = jsonrpsee_client("http://localhost:8082").await.unwrap(); +//! +//! let round_robin_client = RoundRobinRpcClient::new(vec![client1, client2, client3]); +//! +//! // Build an RPC Client that can be used in Subxt or in conjunction with +//! // the RPC methods provided in this crate. +//! let rpc_client = RpcClient::new(round_robin_client); +//! # Ok(()) +//! # } +//! ``` + +use super::{RawRpcFuture, RawRpcSubscription, RpcClientT}; +use std::sync::{ + Arc, + atomic::{AtomicUsize, Ordering}, +}; + +/// A simple RPC client which is provided a set of clients on initialization and +/// will round-robin through them for each request. +#[derive(Clone, Debug)] +pub struct RoundRobinRpcClient { + inner: Arc>, +} + +#[derive(Debug)] +struct RoundRobinRpcClientInner { + clients: Vec, + next_index: AtomicUsize, +} + +impl RoundRobinRpcClient { + /// Create a new `RoundRobinRpcClient` with the given clients. + /// + /// # Panics + /// + /// Panics if the `clients` vector is empty. + pub fn new(clients: Vec) -> Self { + assert!(!clients.is_empty(), "At least one client must be provided"); + Self { + inner: Arc::new(RoundRobinRpcClientInner { clients, next_index: AtomicUsize::new(0) }), + } + } + + fn next_client(&self) -> &Client { + let idx = self.next_index(); + &self.inner.clients[idx] + } + + fn next_index(&self) -> usize { + // Note: fetch_add wraps on overflow so no need to handle this. + self.inner.next_index.fetch_add(1, Ordering::Relaxed) % self.inner.clients.len() + } +} + +impl RpcClientT for RoundRobinRpcClient { + fn request_raw<'a>( + &'a self, + method: &'a str, + params: Option>, + ) -> RawRpcFuture<'a, Box> { + let client = self.next_client(); + client.request_raw(method, params) + } + + fn subscribe_raw<'a>( + &'a self, + sub: &'a str, + params: Option>, + unsub: &'a str, + ) -> RawRpcFuture<'a, RawRpcSubscription> { + let client = self.next_client(); + client.subscribe_raw(sub, params, unsub) + } +} diff --git a/vendor/pezkuwi-subxt/rpcs/src/client/rpc_client.rs b/vendor/pezkuwi-subxt/rpcs/src/client/rpc_client.rs new file mode 100644 index 00000000..9df6219f --- /dev/null +++ b/vendor/pezkuwi-subxt/rpcs/src/client/rpc_client.rs @@ -0,0 +1,237 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::{RawRpcSubscription, RpcClientT}; +use crate::Error; +use futures::{Stream, StreamExt}; +use serde::{Serialize, de::DeserializeOwned}; +use serde_json::value::RawValue; +use std::{pin::Pin, sync::Arc, task::Poll}; + +/// A concrete wrapper around an [`RpcClientT`] which provides some higher level helper methods +/// and is cheaply cloneable. +#[derive(Clone)] +pub struct RpcClient { + client: Arc, +} + +impl RpcClient { + #[cfg(feature = "jsonrpsee")] + #[cfg_attr(docsrs, doc(cfg(feature = "jsonrpsee")))] + /// Create a default RPC client pointed at some URL, currently based on [`jsonrpsee`]. + /// + /// Errors if an insecure URL is provided. In this case, use [`RpcClient::from_insecure_url`] + /// instead. + pub async fn from_url>(url: U) -> Result { + crate::utils::validate_url_is_secure(url.as_ref())?; + RpcClient::from_insecure_url(url).await + } + + #[cfg(feature = "jsonrpsee")] + /// Create a default RPC client pointed at some URL, currently based on [`jsonrpsee`]. + /// + /// Allows insecure URLs without SSL encryption, e.g. (http:// and ws:// URLs). + pub async fn from_insecure_url>(url: U) -> Result { + let client = super::jsonrpsee_client(url.as_ref()) + .await + .map_err(|e| Error::Client(Box::new(e)))?; + Ok(Self::new(client)) + } + + /// Create a new [`RpcClient`] from an arbitrary [`RpcClientT`] implementation. + pub fn new(client: R) -> Self { + RpcClient { client: Arc::new(client) } + } + + /// Make an RPC request, given a method name and some parameters. + /// + /// See [`RpcParams`] and the [`rpc_params!`] macro for an example of how to + /// construct the parameters. + pub async fn request( + &self, + method: &str, + params: RpcParams, + ) -> Result { + let res = self.client.request_raw(method, params.build()).await?; + let val = serde_json::from_str(res.get()).map_err(Error::Deserialization)?; + Ok(val) + } + + /// Subscribe to an RPC endpoint, providing the parameters and the method to call to + /// unsubscribe from it again. + /// + /// See [`RpcParams`] and the [`rpc_params!`] macro for an example of how to + /// construct the parameters. + pub async fn subscribe( + &self, + sub: &str, + params: RpcParams, + unsub: &str, + ) -> Result, Error> { + let sub = self.client.subscribe_raw(sub, params.build(), unsub).await?; + Ok(RpcSubscription::new(sub)) + } +} + +impl From for RpcClient { + fn from(client: C) -> Self { + RpcClient::new(client) + } +} + +impl std::fmt::Debug for RpcClient { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("RpcClient").finish() + } +} + +impl std::ops::Deref for RpcClient { + type Target = dyn RpcClientT; + fn deref(&self) -> &Self::Target { + &*self.client + } +} + +/// Create some [`RpcParams`] to pass to our [`RpcClient`]. [`RpcParams`] +/// simply enforces that parameters handed to our [`RpcClient`] methods +/// are the correct shape. +/// +/// As with the [`serde_json::json!`] macro, this will panic if you provide +/// parameters which cannot successfully be serialized to JSON. +/// +/// # Example +/// +/// ```rust,standalone_crate +/// use pezkuwi_subxt_rpcs::client::{ rpc_params, RpcParams }; +/// +/// // If you provide no params you get `None` back +/// let params: RpcParams = rpc_params![]; +/// assert!(params.build().is_none()); +/// +/// // If you provide params you get `Some>` back. +/// let params: RpcParams = rpc_params![1, true, "foo"]; +/// assert_eq!(params.build().unwrap().get(), "[1,true,\"foo\"]"); +/// ``` +#[macro_export] +macro_rules! rpc_params { + ($($p:expr), *) => {{ + // May be unused if empty; no params. + #[allow(unused_mut)] + let mut params = $crate::client::RpcParams::new(); + $( + params.push($p).expect("values passed to rpc_params! must be serializable to JSON"); + )* + params + }} +} +pub use rpc_params; + +/// This represents the parameters passed to an [`RpcClient`], and exists to +/// enforce that parameters are provided in the correct format. +/// +/// Prefer to use the [`rpc_params!`] macro for simpler creation of these. +/// +/// # Example +/// +/// ```rust,standalone_crate +/// use pezkuwi_subxt_rpcs::client::RpcParams; +/// +/// let mut params = RpcParams::new(); +/// params.push(1).unwrap(); +/// params.push(true).unwrap(); +/// params.push("foo").unwrap(); +/// +/// assert_eq!(params.build().unwrap().get(), "[1,true,\"foo\"]"); +/// ``` +#[derive(Debug, Clone, Default)] +pub struct RpcParams(Vec); + +impl RpcParams { + /// Create a new empty set of [`RpcParams`]. + pub fn new() -> Self { + Self(Vec::new()) + } + /// Push a parameter into our [`RpcParams`]. This serializes it to JSON + /// in the process, and so will return an error if this is not possible. + pub fn push(&mut self, param: P) -> Result<(), Error> { + if self.0.is_empty() { + self.0.push(b'['); + } else { + self.0.push(b',') + } + serde_json::to_writer(&mut self.0, ¶m).map_err(Error::Deserialization)?; + Ok(()) + } + /// Build a [`RawValue`] from our params, returning `None` if no parameters + /// were provided. + pub fn build(mut self) -> Option> { + if self.0.is_empty() { + None + } else { + self.0.push(b']'); + let s = unsafe { String::from_utf8_unchecked(self.0) }; + Some(RawValue::from_string(s).expect("Should be valid JSON")) + } + } +} + +/// A generic RPC Subscription. This implements [`Stream`], and so most of +/// the functionality you'll need to interact with it comes from the +/// [`StreamExt`] extension trait. +pub struct RpcSubscription { + inner: RawRpcSubscription, + _marker: std::marker::PhantomData, +} + +impl std::fmt::Debug for RpcSubscription { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RpcSubscription") + .field("inner", &"RawRpcSubscription") + .field("_marker", &self._marker) + .finish() + } +} + +impl RpcSubscription { + /// Creates a new [`RpcSubscription`]. + pub fn new(inner: RawRpcSubscription) -> Self { + Self { inner, _marker: std::marker::PhantomData } + } + + /// Obtain the ID associated with this subscription. + pub fn subscription_id(&self) -> Option<&str> { + self.inner.id.as_deref() + } +} + +impl RpcSubscription { + /// Returns the next item in the stream. This is just a wrapper around + /// [`StreamExt::next()`] so that you can avoid the extra import. + pub async fn next(&mut self) -> Option> { + StreamExt::next(self).await + } +} + +impl std::marker::Unpin for RpcSubscription {} + +impl Stream for RpcSubscription { + type Item = Result; + + fn poll_next( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> Poll> { + let res = futures::ready!(self.inner.stream.poll_next_unpin(cx)); + + // Decode the inner RawValue to the type we're expecting and map + // any errors to the right shape: + let res = res.map(|r| { + r.and_then(|raw_val| { + serde_json::from_str(raw_val.get()).map_err(Error::Deserialization) + }) + }); + + Poll::Ready(res) + } +} diff --git a/vendor/pezkuwi-subxt/rpcs/src/client/rpc_client_t.rs b/vendor/pezkuwi-subxt/rpcs/src/client/rpc_client_t.rs new file mode 100644 index 00000000..78e68af1 --- /dev/null +++ b/vendor/pezkuwi-subxt/rpcs/src/client/rpc_client_t.rs @@ -0,0 +1,103 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::Error; +use futures::Stream; +use std::{future::Future, pin::Pin}; + +// Re-exporting for simplicity since it's used a bunch in the trait definition. +pub use serde_json::value::RawValue; + +/// A trait describing low level JSON-RPC interactions. Implementations of this can be +/// used to instantiate a [`super::RpcClient`], used for lower level RPC calls via eg +/// [`crate::methods::LegacyRpcMethods`] and [`crate::methods::ChainHeadRpcMethods`]. +/// +/// This is a low level interface whose methods expect an already-serialized set of params, +/// and return an owned but still-serialized [`RawValue`], deferring deserialization to +/// the caller. This is the case because we want the methods to be object-safe (which prohibits +/// generics), and want to avoid any unnecessary allocations in serializing/deserializing +/// parameters. +/// +/// # Panics +/// +/// Implementations are free to panic if the `RawValue`'s passed to `request_raw` or +/// `subscribe_raw` are not JSON arrays. Internally, we ensure that this is always the case. +pub trait RpcClientT: Send + Sync + 'static { + /// Make a raw request for which we expect a single response back from. Implementations + /// should expect that the params will either be `None`, or be an already-serialized + /// JSON array of parameters. + /// + /// See [`super::RpcParams`] and the [`super::rpc_params!`] macro for an example of how to + /// construct the parameters. + /// + /// Prefer to use the interface provided on [`super::RpcClient`] where possible. + fn request_raw<'a>( + &'a self, + method: &'a str, + params: Option>, + ) -> RawRpcFuture<'a, Box>; + + /// Subscribe to some method. Implementations should expect that the params will + /// either be `None`, or be an already-serialized JSON array of parameters. + /// + /// See [`super::RpcParams`] and the [`super::rpc_params!`] macro for an example of how to + /// construct the parameters. + /// + /// Prefer to use the interface provided on [`super::RpcClient`] where possible. + fn subscribe_raw<'a>( + &'a self, + sub: &'a str, + params: Option>, + unsub: &'a str, + ) -> RawRpcFuture<'a, RawRpcSubscription>; +} + +/// A boxed future that is returned from the [`RpcClientT`] methods. +pub type RawRpcFuture<'a, T> = Pin> + Send + 'a>>; + +/// The RPC subscription returned from [`RpcClientT`]'s `subscription` method. +pub struct RawRpcSubscription { + /// The subscription stream. + pub stream: Pin, Error>> + Send + 'static>>, + /// The ID associated with the subscription. + pub id: Option, +} + +impl RpcClientT for std::sync::Arc { + fn request_raw<'a>( + &'a self, + method: &'a str, + params: Option>, + ) -> RawRpcFuture<'a, Box> { + (**self).request_raw(method, params) + } + + fn subscribe_raw<'a>( + &'a self, + sub: &'a str, + params: Option>, + unsub: &'a str, + ) -> RawRpcFuture<'a, RawRpcSubscription> { + (**self).subscribe_raw(sub, params, unsub) + } +} + +impl RpcClientT for Box { + fn request_raw<'a>( + &'a self, + method: &'a str, + params: Option>, + ) -> RawRpcFuture<'a, Box> { + (**self).request_raw(method, params) + } + + fn subscribe_raw<'a>( + &'a self, + sub: &'a str, + params: Option>, + unsub: &'a str, + ) -> RawRpcFuture<'a, RawRpcSubscription> { + (**self).subscribe_raw(sub, params, unsub) + } +} diff --git a/vendor/pezkuwi-subxt/rpcs/src/lib.rs b/vendor/pezkuwi-subxt/rpcs/src/lib.rs new file mode 100644 index 00000000..ddd26717 --- /dev/null +++ b/vendor/pezkuwi-subxt/rpcs/src/lib.rs @@ -0,0 +1,148 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This crate provides a low level RPC interface to Bizinikiwi based nodes. +//! +//! See the [`client`] module for a [`client::RpcClient`] which is driven by implementations +//! of [`client::RpcClientT`] (several of which are provided behind feature flags). +//! +//! See the [`methods`] module for structs which implement sets of concrete RPC calls for +//! communicating with Bizinikiwi based nodes. These structs are all driven by a +//! [`client::RpcClient`]. +//! +//! The RPC clients/methods here are made use of in `subxt`. Enabling the `subxt` feature flag +//! ensures that all Subxt configurations are also valid RPC configurations. +//! +//! The provided RPC client implementations can be used natively (with the default `native` feature +//! flag) or in WASM based web apps (with the `web` feature flag). + +#![cfg_attr(docsrs, feature(doc_cfg))] + +#[cfg(any( + all(feature = "web", feature = "native"), + not(any(feature = "web", feature = "native")) +))] +compile_error!("subxt-rpcs: exactly one of the 'web' and 'native' features should be used."); + +mod macros; + +pub mod client; +pub mod methods; +pub mod utils; + +// Used to enable the js feature for wasm. +#[cfg(feature = "web")] +#[allow(unused_imports)] +pub use getrandom as _; + +// Expose the most common things at the top level: +pub use client::{RpcClient, RpcClientT}; +pub use methods::{ChainHeadRpcMethods, LegacyRpcMethods}; + +/// Configuration used by some of the RPC methods to determine the shape of +/// some of the inputs or responses. +pub trait RpcConfig { + /// The block header type. + type Header: Header; + /// The block hash type. + type Hash: Hash; + /// The Account ID type. + type AccountId: AccountId; +} + +/// A trait which is applied to any type that is a valid block header. +pub trait Header: std::fmt::Debug + codec::Decode + serde::de::DeserializeOwned {} +impl Header for T where T: std::fmt::Debug + codec::Decode + serde::de::DeserializeOwned {} + +/// A trait which is applied to any type that is a valid block hash. +pub trait Hash: serde::de::DeserializeOwned + serde::Serialize {} +impl Hash for T where T: serde::de::DeserializeOwned + serde::Serialize {} + +/// A trait which is applied to any type that is a valid Account ID. +pub trait AccountId: serde::Serialize {} +impl AccountId for T where T: serde::Serialize {} + +// When the subxt feature is enabled, ensure that any valid `pezkuwi_subxt::Config` +// is also a valid `RpcConfig`. +#[cfg(feature = "subxt")] +mod impl_config { + use super::*; + use pezkuwi_subxt_core::config::HashFor; + + impl RpcConfig for T + where + T: pezkuwi_subxt_core::Config, + { + type Header = T::Header; + type Hash = HashFor; + type AccountId = T::AccountId; + } +} + +/// This encapsulates any errors that could be emitted in this crate. +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum Error { + /// An error which indicates a user fault. + #[error("User error: {0}")] + User(#[from] UserError), + // Dev note: We need the error to be safely sent between threads + // for `subscribe_to_block_headers_filling_in_gaps` and friends. + /// An error coming from the underlying RPC Client. + #[error("RPC error: client error: {0}")] + Client(Box), + /// The connection was lost and the client will automatically reconnect. Clients + /// should only emit this if they are internally reconnecting, and will buffer any + /// calls made to them in the meantime until the connection is re-established. + #[error("RPC error: the connection was lost ({0}); reconnect automatically initiated")] + DisconnectedWillReconnect(String), + /// Cannot deserialize the response. + #[error("RPC error: cannot deserialize response: {0}")] + Deserialization(serde_json::Error), + /// Cannot SCALE decode some part of the response. + #[error("RPC error: cannot SCALE decode some part of the response: {0}")] + Decode(codec::Error), + /// The requested URL is insecure. + #[error("RPC error: insecure URL: {0}")] + InsecureUrl(String), +} + +impl Error { + /// Is the error the `DisconnectedWillReconnect` variant? This should be true + /// only if the underlying `RpcClient` implementation was disconnected and is + /// automatically reconnecting behind the scenes. + pub fn is_disconnected_will_reconnect(&self) -> bool { + matches!(self, Error::DisconnectedWillReconnect(_)) + } +} + +/// This error should be returned when the user is at fault making a call, +/// for instance because the method name was wrong, parameters invalid or some +/// invariant not upheld. Implementations of [`RpcClientT`] should turn any such +/// errors into this, so that they can be handled appropriately. By contrast, +/// [`Error::Client`] is emitted when the underlying RPC Client implementation +/// has some problem that isn't user specific (eg network issues or similar). +#[derive(Debug, Clone, serde::Deserialize, thiserror::Error)] +#[serde(deny_unknown_fields)] +pub struct UserError { + /// Code + pub code: i32, + /// Message + pub message: String, + /// Optional data + pub data: Option>, +} + +impl UserError { + /// Returns a standard JSON-RPC "method not found" error. + pub fn method_not_found() -> UserError { + UserError { code: -32601, message: "Method not found".to_owned(), data: None } + } +} + +impl core::fmt::Display for UserError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{} ({})", &self.message, &self.code) + } +} diff --git a/vendor/pezkuwi-subxt/rpcs/src/macros.rs b/vendor/pezkuwi-subxt/rpcs/src/macros.rs new file mode 100644 index 00000000..2641fc27 --- /dev/null +++ b/vendor/pezkuwi-subxt/rpcs/src/macros.rs @@ -0,0 +1,47 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +macro_rules! cfg_feature { + ($feature:literal, $($item:item)*) => { + $( + #[cfg(feature = $feature)] + #[cfg_attr(docsrs, doc(cfg(feature = $feature)))] + $item + )* + } +} + +macro_rules! cfg_unstable_light_client { + ($($item:item)*) => { + crate::macros::cfg_feature!("unstable-light-client", $($item)*); + }; +} + +macro_rules! cfg_jsonrpsee { + ($($item:item)*) => { + crate::macros::cfg_feature!("jsonrpsee", $($item)*); + }; +} + +macro_rules! cfg_reconnecting_rpc_client { + ($($item:item)*) => { + $( + #[cfg(all(feature = "reconnecting-rpc-client", any(feature = "native", feature = "web")))] + #[cfg_attr(docsrs, doc(cfg(feature = "reconnecting-rpc-client")))] + $item + )* + } +} + +macro_rules! cfg_mock_rpc_client { + ($($item:item)*) => { + crate::macros::cfg_feature!("mock-rpc-client", $($item)*); + }; +} + +pub(crate) use cfg_feature; +pub(crate) use cfg_jsonrpsee; +pub(crate) use cfg_mock_rpc_client; +pub(crate) use cfg_reconnecting_rpc_client; +pub(crate) use cfg_unstable_light_client; diff --git a/vendor/pezkuwi-subxt/rpcs/src/methods/chain_head.rs b/vendor/pezkuwi-subxt/rpcs/src/methods/chain_head.rs new file mode 100644 index 00000000..0fe45ed1 --- /dev/null +++ b/vendor/pezkuwi-subxt/rpcs/src/methods/chain_head.rs @@ -0,0 +1,1380 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! An interface to call the API methods. See +//! for details of the API +//! methods exposed here. + +use crate::{ + Error, Hash, RpcConfig, + client::{RpcClient, RpcSubscription, rpc_params}, +}; +use derive_where::derive_where; +use futures::{Stream, StreamExt}; +use serde::{Deserialize, Deserializer, Serialize}; +use std::{ + collections::{HashMap, VecDeque}, + task::Poll, +}; + +/// An interface to call the unstable RPC methods. This interface is instantiated with +/// some `T: Config` trait which determines some of the types that the RPC methods will +/// take or hand back. +#[derive_where(Clone, Debug)] +pub struct ChainHeadRpcMethods { + client: RpcClient, + _marker: std::marker::PhantomData, +} + +impl ChainHeadRpcMethods { + /// Instantiate the legacy RPC method interface. + pub fn new(client: RpcClient) -> Self { + ChainHeadRpcMethods { client, _marker: std::marker::PhantomData } + } + + /// Subscribe to `chainHead_v1_follow` to obtain all reported blocks by the chain. + /// + /// The subscription ID can be used to make queries for the + /// block's body ([`chainHead_v1_body`](ChainHeadRpcMethods::chainhead_v1_follow)), + /// block's header ([`chainHead_v1_header`](ChainHeadRpcMethods::chainhead_v1_header)), + /// block's storage ([`chainHead_v1_storage`](ChainHeadRpcMethods::chainhead_v1_storage)) and + /// submitting runtime API calls at this block + /// ([`chainHead_v1_call`](ChainHeadRpcMethods::chainhead_v1_call)). + /// + /// # Note + /// + /// When the user is no longer interested in a block, the user is responsible + /// for calling the [`chainHead_v1_unpin`](ChainHeadRpcMethods::chainhead_v1_unpin) method. + /// Failure to do so will result in the subscription being stopped by generating the `Stop` + /// event. + pub async fn chainhead_v1_follow( + &self, + with_runtime: bool, + ) -> Result, Error> { + let sub = self + .client + .subscribe("chainHead_v1_follow", rpc_params![with_runtime], "chainHead_v1_unfollow") + .await?; + + Ok(FollowSubscription { sub, done: false }) + } + + /// Resumes a storage fetch started with chainHead_v1_storage after it has generated an + /// `operationWaitingForContinue` event. + /// + /// Has no effect if the operationId is invalid or refers to an operation that has emitted a + /// `{"event": "operationInaccessible"` event, or if the followSubscription is invalid or stale. + pub async fn chainhead_v1_continue( + &self, + follow_subscription: &str, + operation_id: &str, + ) -> Result<(), Error> { + self.client + .request("chainHead_v1_continue", rpc_params![follow_subscription, operation_id]) + .await + } + + /// Stops an operation started with `chainHead_v1_body`, `chainHead_v1_call`, or + /// `chainHead_v1_storage¦. If the operation was still in progress, this interrupts it. + /// If the operation was already finished, this call has no effect. + /// + /// Has no effect if the `followSubscription` is invalid or stale. + pub async fn chainhead_v1_stop_operation( + &self, + follow_subscription: &str, + operation_id: &str, + ) -> Result<(), Error> { + self.client + .request("chainHead_v1_stopOperation", rpc_params![follow_subscription, operation_id]) + .await + } + + /// Call the `chainHead_v1_body` method and return an operation ID to obtain the block's body. + /// + /// The response events are provided on the `chainHead_follow` subscription and identified by + /// the returned operation ID. + /// + /// # Note + /// + /// The subscription ID is obtained from an open subscription created by + /// [`chainHead_v1_follow`](ChainHeadRpcMethods::chainhead_v1_follow). + pub async fn chainhead_v1_body( + &self, + subscription_id: &str, + hash: T::Hash, + ) -> Result { + let response = self + .client + .request("chainHead_v1_body", rpc_params![subscription_id, hash]) + .await?; + + Ok(response) + } + + /// Get the block's header using the `chainHead_v1_header` method. + /// + /// # Note + /// + /// The subscription ID is obtained from an open subscription created by + /// [`chainHead_v1_follow`](ChainHeadRpcMethods::chainhead_v1_follow). + pub async fn chainhead_v1_header( + &self, + subscription_id: &str, + hash: T::Hash, + ) -> Result, Error> { + // header returned as hex encoded SCALE encoded bytes. + let header: Option = self + .client + .request("chainHead_v1_header", rpc_params![subscription_id, hash]) + .await?; + + let header = header + .map(|h| codec::Decode::decode(&mut &*h.0)) + .transpose() + .map_err(Error::Decode)?; + Ok(header) + } + + /// Call the `chainHead_v1_storage` method and return an operation ID to obtain the block's + /// storage. + /// + /// The response events are provided on the `chainHead_follow` subscription and identified by + /// the returned operation ID. + /// + /// # Note + /// + /// The subscription ID is obtained from an open subscription created by + /// [`chainHead_v1_follow`](ChainHeadRpcMethods::chainhead_v1_follow). + pub async fn chainhead_v1_storage( + &self, + subscription_id: &str, + hash: T::Hash, + items: impl IntoIterator>, + child_key: Option<&[u8]>, + ) -> Result { + let items: Vec> = items + .into_iter() + .map(|item| StorageQuery { key: to_hex(item.key), query_type: item.query_type }) + .collect(); + + let response = self + .client + .request( + "chainHead_v1_storage", + rpc_params![subscription_id, hash, items, child_key.map(to_hex)], + ) + .await?; + + Ok(response) + } + + /// Call the `chainHead_v1_call` method and return an operation ID to obtain the runtime API + /// result. + /// + /// The response events are provided on the `chainHead_follow` subscription and identified by + /// the returned operation ID. + /// + /// # Note + /// + /// The subscription ID is obtained from an open subscription created by + /// [`chainHead_v1_follow`](ChainHeadRpcMethods::chainhead_v1_follow). + pub async fn chainhead_v1_call( + &self, + subscription_id: &str, + hash: T::Hash, + function: &str, + call_parameters: &[u8], + ) -> Result { + let response = self + .client + .request( + "chainHead_v1_call", + rpc_params![subscription_id, hash, function, to_hex(call_parameters)], + ) + .await?; + + Ok(response) + } + + /// Unpin a block reported by the `chainHead_follow` subscription. + /// + /// # Note + /// + /// The subscription ID is obtained from an open subscription created by + /// [`chainHead_v1_follow`](ChainHeadRpcMethods::chainhead_v1_follow). + pub async fn chainhead_v1_unpin( + &self, + subscription_id: &str, + hash: T::Hash, + ) -> Result<(), Error> { + self.client + .request("chainHead_v1_unpin", rpc_params![subscription_id, hash]) + .await + } + + /// Return the genesis hash. + pub async fn chainspec_v1_genesis_hash(&self) -> Result { + self.client.request("chainSpec_v1_genesisHash", rpc_params![]).await + } + + /// Return a string containing the human-readable name of the chain. + pub async fn chainspec_v1_chain_name(&self) -> Result { + self.client.request("chainSpec_v1_chainName", rpc_params![]).await + } + + /// Returns the JSON payload found in the chain specification under the key properties. + /// No guarantee is offered about the content of this object, and so it's up to the caller + /// to decide what to deserialize it into. + pub async fn chainspec_v1_properties( + &self, + ) -> Result { + self.client.request("chainSpec_v1_properties", rpc_params![]).await + } + + /// Returns an array of strings indicating the names of all the JSON-RPC functions supported by + /// the JSON-RPC server. + pub async fn rpc_methods(&self) -> Result, Error> { + self.client.request("rpc_methods", rpc_params![]).await + } + + /// Attempt to submit a transaction, returning events about its progress. + pub async fn transactionwatch_v1_submit_and_watch( + &self, + tx: &[u8], + ) -> Result, Error> { + let sub = self + .client + .subscribe( + "transactionWatch_v1_submitAndWatch", + rpc_params![to_hex(tx)], + "transactionWatch_v1_unwatch", + ) + .await?; + + Ok(TransactionSubscription { sub, done: false }) + } + + /// Broadcast the transaction on the p2p network until the + /// [`Self::transaction_v1_stop`] is called. + /// + /// Returns an operation ID that can be used to stop the broadcasting process. + /// Returns `None` if the server cannot handle the request at the moment. + pub async fn transaction_v1_broadcast(&self, tx: &[u8]) -> Result, Error> { + self.client.request("transaction_v1_broadcast", rpc_params![to_hex(tx)]).await + } + + /// Stop the broadcasting process of the transaction. + /// + /// The operation ID is obtained from the [`Self::transaction_v1_broadcast`] method. + /// + /// Returns an error if the operation ID does not correspond to any active transaction for this + /// connection. + pub async fn transaction_v1_stop(&self, operation_id: &str) -> Result<(), Error> { + self.client.request("transaction_v1_stop", rpc_params![operation_id]).await + } + + /// Fetch the block body (ie the extrinsics in the block) given its hash. + /// + /// Returns an array of the hexadecimal-encoded scale-encoded extrinsics found in the block, + /// or `None` if the block wasn't found. + pub async fn archive_v1_body(&self, block_hash: T::Hash) -> Result>, Error> { + self.client.request("archive_v1_body", rpc_params![block_hash]).await + } + + /// Call the `archive_v1_call` method and return the response. + pub async fn archive_v1_call( + &self, + block_hash: T::Hash, + function: &str, + call_parameters: &[u8], + ) -> Result { + use serde::de::Error as _; + + // We deserialize to this intermediate shape, since + // we can't have a boolean tag to denote variants. + #[derive(Deserialize)] + struct Response { + success: bool, + value: Option, + error: Option, + // This was accidentally used instead of value in Bizinikiwi, + // so to support those impls we try it here if needed: + result: Option, + } + + let res: Response = self + .client + .request("archive_v1_call", rpc_params![block_hash, function, to_hex(call_parameters)]) + .await?; + + let value = res.value.or(res.result); + match (res.success, value, res.error) { + (true, Some(value), _) => Ok(ArchiveCallResult::Success(value)), + (false, _, err) => Ok(ArchiveCallResult::Error(err.unwrap_or(String::new()))), + (true, None, _) => { + let m = "archive_v1_call: 'success: true' response should have `value: 0x1234` alongside it"; + Err(Error::Deserialization(serde_json::Error::custom(m))) + }, + } + } + + /// Return the finalized block height of the chain. + pub async fn archive_v1_finalized_height(&self) -> Result { + self.client.request("archive_v1_finalizedHeight", rpc_params![]).await + } + + /// Return the genesis hash. + pub async fn archive_v1_genesis_hash(&self) -> Result { + self.client.request("archive_v1_genesisHash", rpc_params![]).await + } + + /// Given a block height, return the hashes of the zero or more blocks at that height. + /// For blocks older than the latest finalized block, only one entry will be returned. For + /// blocks newer than the latest finalized block, it's possible to have 0, 1 or multiple blocks + /// at that height given that forks could occur. + pub async fn archive_v1_hash_by_height(&self, height: usize) -> Result, Error> { + self.client.request("archive_v1_hashByHeight", rpc_params![height]).await + } + + /// Fetch the header for a block with the given hash, or `None` if no block with that hash + /// exists. + pub async fn archive_v1_header(&self, block_hash: T::Hash) -> Result, Error> { + let maybe_encoded_header: Option = + self.client.request("archive_v1_header", rpc_params![block_hash]).await?; + + let Some(encoded_header) = maybe_encoded_header else { + return Ok(None); + }; + + let header = + ::decode(&mut &*encoded_header.0).map_err(Error::Decode)?; + Ok(Some(header)) + } + + /// Query the node storage and return a subscription which streams corresponding storage events + /// back. + pub async fn archive_v1_storage( + &self, + block_hash: T::Hash, + items: impl IntoIterator>, + child_key: Option<&[u8]>, + ) -> Result, Error> { + let items: Vec> = items + .into_iter() + .map(|item| StorageQuery { key: to_hex(item.key), query_type: item.query_type }) + .collect(); + + let sub = self + .client + .subscribe( + "archive_v1_storage", + rpc_params![block_hash, items, child_key.map(to_hex)], + "archive_v1_stopStorage", + ) + .await?; + + Ok(ArchiveStorageSubscription { sub, done: false }) + } + + // Dev note: we continue to support the latest "unstable" archive methods because + // they will be around for a while before the stable ones make it into a release. + // The below are just a copy-paste of the v1 methods, above, but calling the + // "unstable" RPCs instead. Eventually we'll remove them. + + /// Fetch the block body (ie the extrinsics in the block) given its hash. + /// + /// Returns an array of the hexadecimal-encoded scale-encoded extrinsics found in the block, + /// or `None` if the block wasn't found. + pub async fn archive_unstable_body( + &self, + block_hash: T::Hash, + ) -> Result>, Error> { + self.client.request("archive_unstable_body", rpc_params![block_hash]).await + } + + /// Call the `archive_unstable_call` method and return the response. + pub async fn archive_unstable_call( + &self, + block_hash: T::Hash, + function: &str, + call_parameters: &[u8], + ) -> Result { + use serde::de::Error as _; + + // We deserialize to this intermediate shape, since + // we can't have a boolean tag to denote variants. + #[derive(Deserialize)] + struct Response { + success: bool, + value: Option, + error: Option, + // This was accidentally used instead of value in Bizinikiwi, + // so to support those impls we try it here if needed: + result: Option, + } + + let res: Response = self + .client + .request( + "archive_unstable_call", + rpc_params![block_hash, function, to_hex(call_parameters)], + ) + .await?; + + let value = res.value.or(res.result); + match (res.success, value, res.error) { + (true, Some(value), _) => Ok(ArchiveCallResult::Success(value)), + (false, _, err) => Ok(ArchiveCallResult::Error(err.unwrap_or(String::new()))), + (true, None, _) => { + let m = "archive_unstable_call: 'success: true' response should have `value: 0x1234` alongside it"; + Err(Error::Deserialization(serde_json::Error::custom(m))) + }, + } + } + + /// Return the finalized block height of the chain. + pub async fn archive_unstable_finalized_height(&self) -> Result { + self.client.request("archive_unstable_finalizedHeight", rpc_params![]).await + } + + /// Return the genesis hash. + pub async fn archive_unstable_genesis_hash(&self) -> Result { + self.client.request("archive_unstable_genesisHash", rpc_params![]).await + } + + /// Given a block height, return the hashes of the zero or more blocks at that height. + /// For blocks older than the latest finalized block, only one entry will be returned. For + /// blocks newer than the latest finalized block, it's possible to have 0, 1 or multiple blocks + /// at that height given that forks could occur. + pub async fn archive_unstable_hash_by_height( + &self, + height: usize, + ) -> Result, Error> { + self.client.request("archive_unstable_hashByHeight", rpc_params![height]).await + } + + /// Fetch the header for a block with the given hash, or `None` if no block with that hash + /// exists. + pub async fn archive_unstable_header( + &self, + block_hash: T::Hash, + ) -> Result, Error> { + let maybe_encoded_header: Option = + self.client.request("archive_unstable_header", rpc_params![block_hash]).await?; + + let Some(encoded_header) = maybe_encoded_header else { + return Ok(None); + }; + + let header = + ::decode(&mut &*encoded_header.0).map_err(Error::Decode)?; + Ok(Some(header)) + } + + /// Query the node storage and return a subscription which streams corresponding storage events + /// back. + pub async fn archive_unstable_storage( + &self, + block_hash: T::Hash, + items: impl IntoIterator>, + child_key: Option<&[u8]>, + ) -> Result, Error> { + let items: Vec> = items + .into_iter() + .map(|item| StorageQuery { key: to_hex(item.key), query_type: item.query_type }) + .collect(); + + let sub = self + .client + .subscribe( + "archive_unstable_storage", + rpc_params![block_hash, items, child_key.map(to_hex)], + "archive_unstable_stopStorage", + ) + .await?; + + Ok(ArchiveStorageSubscription { sub, done: false }) + } +} + +/// This represents events generated by the `follow` method. +/// +/// The block events are generated in the following order: +/// 1. Initialized - generated only once to signal the latest finalized block +/// 2. NewBlock - a new block was added. +/// 3. BestBlockChanged - indicate that the best block is now the one from this event. The block was +/// announced priorly with the `NewBlock` event. +/// 4. Finalized - State the finalized and pruned blocks. +/// +/// The following events are related to operations: +/// - OperationBodyDone: The response of the `chainHead_body` +/// - OperationCallDone: The response of the `chainHead_call` +/// - OperationStorageItems: Items produced by the `chainHead_storage` +/// - OperationWaitingForContinue: Generated after OperationStorageItems and requires the user to +/// call `chainHead_continue` +/// - OperationStorageDone: The `chainHead_storage` method has produced all the results +/// - OperationInaccessible: The server was unable to provide the result, retries might succeed in +/// the future +/// - OperationError: The server encountered an error, retries will not succeed +/// +/// The stop event indicates that the JSON-RPC server was unable to provide a consistent list of +/// the blocks at the head of the chain. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +#[serde(tag = "event")] +pub enum FollowEvent { + /// The latest finalized block. + /// + /// This event is generated only once. + Initialized(Initialized), + /// A new non-finalized block was added. + NewBlock(NewBlock), + /// The best block of the chain. + BestBlockChanged(BestBlockChanged), + /// A list of finalized and pruned blocks. + Finalized(Finalized), + /// The response of the `chainHead_body` method. + OperationBodyDone(OperationBodyDone), + /// The response of the `chainHead_call` method. + OperationCallDone(OperationCallDone), + /// Yield one or more items found in the storage. + OperationStorageItems(OperationStorageItems), + /// Ask the user to call `chainHead_continue` to produce more events + /// regarding the operation id. + OperationWaitingForContinue(OperationId), + /// The responses of the `chainHead_storage` method have been produced. + OperationStorageDone(OperationId), + /// The RPC server was unable to provide the response of the following operation id. + /// + /// Repeating the same operation in the future might succeed. + OperationInaccessible(OperationId), + /// The RPC server encountered an error while processing an operation id. + /// + /// Repeating the same operation in the future will not succeed. + OperationError(OperationError), + /// The subscription is dropped and no further events + /// will be generated. + Stop, +} + +/// Contain information about the latest finalized block. +/// +/// # Note +/// +/// This is the first event generated by the `follow` subscription +/// and is submitted only once. +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Initialized { + /// The hashes of the last finalized blocks. + pub finalized_block_hashes: Vec, + /// The runtime version of the finalized block. + /// + /// # Note + /// + /// This is present only if the `with_runtime` flag is set for + /// the `follow` subscription. + pub finalized_block_runtime: Option, +} + +impl<'de, Hash: Deserialize<'de>> Deserialize<'de> for Initialized { + fn deserialize>(deserializer: D) -> Result { + // Custom struct that can deserialize both `finalizedBlockHash` and `finalizedBlockHashes`. + #[derive(Debug, Clone, PartialEq, Eq, Deserialize)] + #[serde(rename_all = "camelCase")] + struct InitializedIR { + finalized_block_hashes: Option>, + finalized_block_hash: Option, + finalized_block_runtime: Option, + } + + let ir = InitializedIR::deserialize(deserializer)?; + let finalized_block_hashes = ir + .finalized_block_hashes + .or_else(|| ir.finalized_block_hash.map(|hash| vec![hash])) + .ok_or_else(|| serde::de::Error::custom("Missing finalized block hashes"))?; + + Ok(Initialized { + finalized_block_hashes, + finalized_block_runtime: ir.finalized_block_runtime, + }) + } +} + +/// The runtime event generated if the `follow` subscription +/// has set the `with_runtime` flag. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +#[serde(tag = "type")] +pub enum RuntimeEvent { + /// The runtime version of this block. + Valid(RuntimeVersionEvent), + /// The runtime could not be obtained due to an error. + Invalid(ErrorEvent), +} + +/// The runtime specification of the current block. +/// +/// This event is generated for: +/// - the first announced block by the follow subscription +/// - blocks that suffered a change in runtime compared with their parents +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct RuntimeVersionEvent { + /// Details about this runtime. + pub spec: RuntimeSpec, +} + +/// This contains the runtime version information necessary to make transactions, and is obtained +/// from the "initialized" event of `chainHead_follow` if the `withRuntime` flag is set. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct RuntimeSpec { + /// Opaque string indicating the name of the chain. + pub spec_name: String, + + /// Opaque string indicating the name of the implementation of the chain. + pub impl_name: String, + + /// Opaque integer. The JSON-RPC client can assume that the Runtime API call to + /// `Metadata_metadata` will always produce the same output as long as the specVersion is the + /// same. + pub spec_version: u32, + + /// Opaque integer. Whenever the runtime code changes in a backwards-compatible way, the + /// implVersion is modified while the specVersion is left untouched. + pub impl_version: u32, + + /// Opaque integer. Necessary when building the bytes of a transaction. Transactions that have + /// been generated with a different `transaction_version` are incompatible. + pub transaction_version: u32, + + /// Object containing a list of "entry point APIs" supported by the runtime. Each key is an + /// opaque string indicating the API, and each value is an integer version number. Before + /// making a runtime call (using chainHead_call), you should make sure that this list contains + /// the entry point API corresponding to the call and with a known version number. + /// + /// **Note:** In Bizinikiwi, the keys in the apis field consists of the hexadecimal-encoded + /// 8-bytes blake2 hash of the name of the API. For example, the `TaggedTransactionQueue` API + /// is 0xd2bc9897eed08f15. + #[serde(with = "hashmap_as_tuple_list")] + pub apis: HashMap, +} + +/// The operation could not be processed due to an error. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ErrorEvent { + /// Reason of the error. + pub error: String, +} + +/// Indicate a new non-finalized block. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct NewBlock { + /// The hash of the new block. + pub block_hash: Hash, + /// The parent hash of the new block. + pub parent_block_hash: Hash, + /// The runtime version of the new block. + /// + /// # Note + /// + /// This is present only if the `with_runtime` flag is set for + /// the `follow` subscription. + pub new_runtime: Option, +} + +/// Indicate the block hash of the new best block. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct BestBlockChanged { + /// The block hash of the new best block. + pub best_block_hash: Hash, +} + +/// Indicate the finalized and pruned block hashes. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Finalized { + /// Block hashes that are finalized. + pub finalized_block_hashes: Vec, + /// Block hashes that are pruned (removed). + pub pruned_block_hashes: Vec, +} + +/// Indicate the operation id of the event. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct OperationId { + /// The operation id of the event. + pub operation_id: String, +} + +/// The response of the `chainHead_body` method. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct OperationBodyDone { + /// The operation id of the event. + pub operation_id: String, + /// Array of hexadecimal-encoded scale-encoded extrinsics found in the block. + pub value: Vec, +} + +/// The response of the `chainHead_call` method. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct OperationCallDone { + /// The operation id of the event. + pub operation_id: String, + /// Hexadecimal-encoded output of the runtime function call. + pub output: Bytes, +} + +/// The response of the `chainHead_call` method. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct OperationStorageItems { + /// The operation id of the event. + pub operation_id: String, + /// The resulting items. + pub items: VecDeque, +} + +/// Indicate a problem during the operation. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct OperationError { + /// The operation id of the event. + pub operation_id: String, + /// The reason of the error. + pub error: String, +} + +/// The storage result. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct StorageResult { + /// The hex-encoded key of the result. + pub key: Bytes, + /// The result of the query. + #[serde(flatten)] + pub result: StorageResultType, +} + +/// The type of the storage query. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub enum StorageResultType { + /// Fetch the value of the provided key. + Value(Bytes), + /// Fetch the hash of the value of the provided key. + Hash(Bytes), + /// Fetch the closest descendant merkle value. + ClosestDescendantMerkleValue(Bytes), +} + +/// The method response of `chainHead_body`, `chainHead_call` and `chainHead_storage`. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +#[serde(tag = "result")] +pub enum MethodResponse { + /// The method has started. + Started(MethodResponseStarted), + /// The RPC server cannot handle the request at the moment. + LimitReached, +} + +/// The `started` result of a method. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct MethodResponseStarted { + /// The operation id of the response. + pub operation_id: String, + /// The number of items from the back of the `chainHead_storage` that have been discarded. + pub discarded_items: Option, +} + +/// The storage item received as parameter. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct StorageQuery { + /// The provided key. + pub key: Key, + /// The type of the storage query. + #[serde(rename = "type")] + pub query_type: StorageQueryType, +} + +/// The type of the storage query. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum StorageQueryType { + /// Fetch the value of the provided key. + Value, + /// Fetch the hash of the value of the provided key. + Hash, + /// Fetch the closest descendant merkle value. + ClosestDescendantMerkleValue, + /// Fetch the values of all descendants of they provided key. + DescendantsValues, + /// Fetch the hashes of the values of all descendants of they provided key. + DescendantsHashes, +} + +/// A subscription which returns follow events, and ends when a Stop event occurs. +pub struct FollowSubscription { + sub: RpcSubscription>, + done: bool, +} + +impl FollowSubscription { + /// Fetch the next item in the stream. + pub async fn next(&mut self) -> Option<::Item> { + ::next(self).await + } + /// Fetch the subscription ID for the stream. + pub fn subscription_id(&self) -> Option<&str> { + self.sub.subscription_id() + } +} + +impl Stream for FollowSubscription { + type Item = > as Stream>::Item; + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + if self.done { + return Poll::Ready(None); + } + + let res = self.sub.poll_next_unpin(cx); + + if let Poll::Ready(Some(Ok(FollowEvent::Stop))) = &res { + // No more events will occur after this one. + self.done = true; + } + + res + } +} + +/// A subscription which returns transaction status events, stopping +/// when no more events will be sent. +pub struct TransactionSubscription { + sub: RpcSubscription>, + done: bool, +} + +impl TransactionSubscription { + /// Fetch the next item in the stream. + pub async fn next(&mut self) -> Option<::Item> { + ::next(self).await + } +} + +impl Stream for TransactionSubscription { + type Item = > as Stream>::Item; + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + if self.done { + return Poll::Ready(None); + } + + let res = self.sub.poll_next_unpin(cx); + + if let Poll::Ready(Some(Ok(res))) = &res { + if matches!( + res, + TransactionStatus::Dropped { .. } | + TransactionStatus::Error { .. } | + TransactionStatus::Invalid { .. } | + TransactionStatus::Finalized { .. } + ) { + // No more events will occur after these ones. + self.done = true + } + } + + res + } +} + +/// Transaction progress events +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +#[serde(tag = "event")] +pub enum TransactionStatus { + /// Transaction is part of the future queue. + Validated, + /// The transaction has been broadcast to other nodes. + /// + /// Note: This event is no longer expected to be returned as of + /// the chainHead_v1 spec, but we do so for compatibility with + /// older versions of Smoldot, which do return it. + Broadcasted, + /// Transaction has been included in block with given details. + /// Null is returned if the transaction is no longer in any block + /// of the best chain. + BestChainBlockIncluded { + /// Details of the block it's been seen in. + block: Option>, + }, + /// The transaction is in a block that's been finalized. + Finalized { + /// Details of the block it's been seen in. + block: TransactionBlockDetails, + }, + /// Something went wrong in the node. + Error { + /// Human readable message; what went wrong. + error: String, + }, + /// Transaction is invalid (bad nonce, signature etc). + Invalid { + /// Human readable message; why was it invalid. + error: String, + }, + /// The transaction was dropped. + Dropped { + /// Human readable message; why was it dropped. + error: String, + }, +} + +/// Details of a block that a transaction is seen in. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +pub struct TransactionBlockDetails { + /// The block hash. + pub hash: Hash, + /// The index of the transaction in the block. + #[serde(with = "unsigned_number_as_string")] + pub index: u64, +} + +/// The response from calling `archive_call`. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ArchiveCallResult { + /// The bytes returned from successfully making a call + Success(Bytes), + /// An error returned if the call was not successful. + Error(String), +} + +impl ArchiveCallResult { + /// Return the bytes on success, or `None` if not an [`ArchiveCallResult::Success`]. + pub fn as_success(self) -> Option { + match self { + ArchiveCallResult::Success(bytes) => Some(bytes), + _ => None, + } + } + + /// Return the error message on call failure, or `None` if not an [`ArchiveCallResult::Error`]. + pub fn as_error(self) -> Option { + match self { + ArchiveCallResult::Success(_) => None, + ArchiveCallResult::Error(e) => Some(e), + } + } +} + +/// A subscription which returns follow events, and ends when a Stop event occurs. +pub struct ArchiveStorageSubscription { + sub: RpcSubscription>, + done: bool, +} + +impl ArchiveStorageSubscription { + /// Fetch the next item in the stream. + pub async fn next(&mut self) -> Option<::Item> { + ::next(self).await + } + /// Fetch the subscription ID for the stream. + pub fn subscription_id(&self) -> Option<&str> { + self.sub.subscription_id() + } +} + +impl Stream for ArchiveStorageSubscription { + type Item = > as Stream>::Item; + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + if self.done { + return Poll::Ready(None); + } + + let res = self.sub.poll_next_unpin(cx); + + if let Poll::Ready(Some(Ok(ArchiveStorageEvent::Done | ArchiveStorageEvent::Error(..)))) = + &res + { + // No more events will occur after "done" or "error" events. + self.done = true; + } + + res + } +} + +/// Responses returned from [`ArchiveStorageSubscription`]. +#[derive(Debug, Deserialize)] +#[serde(tag = "event")] +pub enum ArchiveStorageEvent { + /// A storage response for one of the requested items. + #[serde(rename = "storage")] + Item(ArchiveStorageEventItem), + /// A human-readable error indicating what went wrong. No more storage events + /// will be emitted after this. + #[serde(rename = "storageError")] + Error(ArchiveStorageEventError), + /// No more storage events will be emitted after this. + #[serde(rename = "storageDone")] + Done, +} + +impl ArchiveStorageEvent { + /// Return a storage item or `None` if not an [`ArchiveStorageEvent::Item`]. + pub fn as_item(self) -> Option> { + match self { + ArchiveStorageEvent::Item(item) => Some(item), + _ => None, + } + } + + /// Return a storage error or `None` if not an [`ArchiveStorageEvent::Error`]. + pub fn as_error(self) -> Option { + match self { + ArchiveStorageEvent::Error(e) => Some(e), + _ => None, + } + } + + /// Is this an [`ArchiveStorageEvent::Done`]. + pub fn is_done(self) -> bool { + matches!(self, ArchiveStorageEvent::Done) + } +} + +/// Something went wrong during the [`ChainHeadRpcMethods::archive_unstable_storage()`] +/// subscription. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ArchiveStorageEventError { + /// The human readable error message indicating what went wrong. + pub error: String, +} + +/// A storage item returned from the [`ChainHeadRpcMethods::archive_unstable_storage()`] +/// subscription. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ArchiveStorageEventItem { + /// String containing the hexadecimal-encoded key of the storage entry. + pub key: Bytes, + /// String containing the hexadecimal-encoded value of the storage entry. + /// Returned when the request type is [`StorageQueryType::Value`] or + /// [`StorageQueryType::DescendantsValues`]. + pub value: Option, + /// String containing the hexadecimal-encoded hash of the storage entry. + /// Returned when the request type is [`StorageQueryType::Hash`] or + /// [`StorageQueryType::DescendantsHashes`]. + pub hash: Option, + /// String containing the hexadecimal-encoded Merkle value of the closest descendant of key + /// (including branch nodes). Returned when the request type is + /// [`StorageQueryType::ClosestDescendantMerkleValue`]. + pub closest_descendant_merkle_value: Option, + /// String containing the hexadecimal-encoded key of the child trie of the "default" namespace + /// if the storage entry is part of a child trie. If the storage entry is part of the main + /// trie, this field is not present. + pub child_trie_key: Option, +} + +/// Hex-serialized shim for `Vec`. +#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Hash, PartialOrd, Ord, Debug)] +pub struct Bytes(#[serde(with = "impl_serde::serialize")] pub Vec); +impl std::ops::Deref for Bytes { + type Target = [u8]; + fn deref(&self) -> &[u8] { + &self.0[..] + } +} +impl From> for Bytes { + fn from(s: Vec) -> Self { + Bytes(s) + } +} + +fn to_hex(bytes: impl AsRef<[u8]>) -> String { + format!("0x{}", hex::encode(bytes.as_ref())) +} + +/// Attempt to deserialize either a string or integer into an integer. +/// See +pub(crate) mod unsigned_number_as_string { + use serde::de::{Deserializer, Visitor}; + use std::fmt; + + /// Deserialize a number from a string or number. + pub fn deserialize<'de, N: From, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_any(NumberVisitor(std::marker::PhantomData)) + } + + struct NumberVisitor(std::marker::PhantomData); + + impl> Visitor<'_> for NumberVisitor { + type Value = N; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("an unsigned integer or a string containing one") + } + + fn visit_str(self, v: &str) -> Result { + let n: u64 = v.parse().map_err(serde::de::Error::custom)?; + Ok(n.into()) + } + + fn visit_u64(self, v: u64) -> Result { + Ok(v.into()) + } + } + + use serde::ser::Serializer; + + /// Serialize a number as string + pub fn serialize(item: &u64, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&item.to_string()) + } +} + +/// A temporary shim to decode "spec.apis" if it comes back as an array like: +/// +/// ```text +/// [["0xABC", 1], ["0xCDE", 2]] +/// ``` +/// +/// The expected format (which this also supports deserializing from) is: +/// +/// ```text +/// { "0xABC": 1, "0xCDE": 2 } +/// ``` +/// +/// We can delete this when the correct format is being returned. +/// +/// Adapted from +pub(crate) mod hashmap_as_tuple_list { + use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor}; + use std::{ + collections::HashMap, + fmt, + hash::{BuildHasher, Hash}, + marker::PhantomData, + }; + + /// Deserialize a [`HashMap`] from a list of tuples or object + pub fn deserialize<'de, K, V, BH, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + K: Eq + Hash + Deserialize<'de>, + V: Deserialize<'de>, + BH: BuildHasher + Default, + { + deserializer.deserialize_any(HashMapVisitor(PhantomData)) + } + + #[allow(clippy::type_complexity)] + struct HashMapVisitor(PhantomData HashMap>); + + impl<'de, K, V, BH> Visitor<'de> for HashMapVisitor + where + K: Deserialize<'de> + Eq + Hash, + V: Deserialize<'de>, + BH: BuildHasher + Default, + { + type Value = HashMap; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a list of key-value pairs") + } + + // Work with maps too: + fn visit_map(self, mut m: A) -> Result + where + A: serde::de::MapAccess<'de>, + { + let mut map = + HashMap::with_capacity_and_hasher(m.size_hint().unwrap_or(0), BH::default()); + while let Some((key, value)) = m.next_entry()? { + map.insert(key, value); + } + Ok(map) + } + + // The shim to also work with sequences of tuples. + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let mut map = + HashMap::with_capacity_and_hasher(seq.size_hint().unwrap_or(0), BH::default()); + while let Some((key, value)) = seq.next_element()? { + map.insert(key, value); + } + Ok(map) + } + } + + use serde::ser::{Serialize, SerializeSeq, Serializer}; + + /// Serialize hashmap as list of tuples + pub fn serialize( + item: &HashMap, + serializer: S, + ) -> Result + where + S: Serializer, + { + let mut seq = serializer.serialize_seq(None)?; + for i in item { + seq.serialize_element(&i)?; + } + seq.end() + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn can_deserialize_apis_from_tuple_or_object() { + let old_response = serde_json::json!({ + "authoringVersion": 10, + "specName": "zagros", + "implName": "parity-zagros", + "specVersion": 9122, + "implVersion": 0, + "stateVersion": 1, + "transactionVersion": 7, + "apis": [ + ["0xdf6acb689907609b", 3], + ["0x37e397fc7c91f5e4", 1], + ["0x40fe3ad401f8959a", 5], + ["0xd2bc9897eed08f15", 3], + ["0xf78b278be53f454c", 2], + ["0xaf2c0297a23e6d3d", 1], + ["0x49eaaf1b548a0cb0", 1], + ["0x91d5df18b0d2cf58", 1], + ["0xed99c5acb25eedf5", 3], + ["0xcbca25e39f142387", 2], + ["0x687ad44ad37f03c2", 1], + ["0xab3c0572291feb8b", 1], + ["0xbc9d89904f5b923f", 1], + ["0x37c8bb1350a9a2a8", 1] + ] + }); + let old_spec: RuntimeSpec = serde_json::from_value(old_response).unwrap(); + + let new_response = serde_json::json!({ + "specName": "zagros", + "implName": "parity-zagros", + "specVersion": 9122, + "implVersion": 0, + "transactionVersion": 7, + "apis": { + "0xdf6acb689907609b": 3, + "0x37e397fc7c91f5e4": 1, + "0x40fe3ad401f8959a": 5, + "0xd2bc9897eed08f15": 3, + "0xf78b278be53f454c": 2, + "0xaf2c0297a23e6d3d": 1, + "0x49eaaf1b548a0cb0": 1, + "0x91d5df18b0d2cf58": 1, + "0xed99c5acb25eedf5": 3, + "0xcbca25e39f142387": 2, + "0x687ad44ad37f03c2": 1, + "0xab3c0572291feb8b": 1, + "0xbc9d89904f5b923f": 1, + "0x37c8bb1350a9a2a8": 1 + } + }); + let new_spec: RuntimeSpec = serde_json::from_value(new_response).unwrap(); + + assert_eq!(old_spec, new_spec); + } + + #[test] + fn can_deserialize_from_number_or_string() { + #[derive(Debug, Deserialize)] + struct Foo64 { + #[serde(with = "super::unsigned_number_as_string")] + num: u64, + } + #[derive(Debug, Deserialize)] + struct Foo32 { + #[serde(with = "super::unsigned_number_as_string")] + num: u128, + } + + let from_string = serde_json::json!({ + "num": "123" + }); + let from_num = serde_json::json!({ + "num": 123 + }); + let from_err = serde_json::json!({ + "num": "123a" + }); + + let f1: Foo64 = + serde_json::from_value(from_string.clone()).expect("can deser string into u64"); + let f2: Foo32 = serde_json::from_value(from_string).expect("can deser string into u32"); + let f3: Foo64 = serde_json::from_value(from_num.clone()).expect("can deser num into u64"); + let f4: Foo32 = serde_json::from_value(from_num).expect("can deser num into u32"); + + assert_eq!(f1.num, 123); + assert_eq!(f2.num, 123); + assert_eq!(f3.num, 123); + assert_eq!(f4.num, 123); + + // Invalid things should lead to an error: + let _ = serde_json::from_value::(from_err) + .expect_err("can't deser invalid num into u32"); + } + + #[test] + fn chain_head_initialized() { + // Latest format version. + let event = serde_json::json!({ + "finalizedBlockHashes": ["0x1", "0x2"], + }); + let decoded: Initialized = serde_json::from_value(event).unwrap(); + assert_eq!(decoded.finalized_block_hashes, vec!["0x1".to_string(), "0x2".to_string()]); + + // Old format. + let event = serde_json::json!({ + "finalizedBlockHash": "0x1", + }); + let decoded: Initialized = serde_json::from_value(event).unwrap(); + assert_eq!(decoded.finalized_block_hashes, vec!["0x1".to_string()]); + + // Wrong format. + let event = serde_json::json!({ + "finalizedBlockHash": ["0x1"], + }); + let _ = serde_json::from_value::>(event).unwrap_err(); + } +} diff --git a/vendor/pezkuwi-subxt/rpcs/src/methods/legacy.rs b/vendor/pezkuwi-subxt/rpcs/src/methods/legacy.rs new file mode 100644 index 00000000..62f0d753 --- /dev/null +++ b/vendor/pezkuwi-subxt/rpcs/src/methods/legacy.rs @@ -0,0 +1,682 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! An interface to call the raw legacy RPC methods. + +use crate::{ + Error, RpcConfig, + client::{RpcClient, RpcSubscription, rpc_params}, +}; +use codec::Decode; +use derive_where::derive_where; +use frame_metadata::RuntimeMetadataPrefixed; +use primitive_types::U256; +use serde::{Deserialize, Serialize}; + +/// An interface to call the legacy RPC methods. This interface is instantiated with +/// some `T: Config` trait which determines some of the types that the RPC methods will +/// take or hand back. +#[derive_where(Clone, Debug)] +pub struct LegacyRpcMethods { + client: RpcClient, + _marker: std::marker::PhantomData, +} + +impl LegacyRpcMethods { + /// Instantiate the legacy RPC method interface. + pub fn new(client: RpcClient) -> Self { + LegacyRpcMethods { client, _marker: std::marker::PhantomData } + } + + /// Fetch the raw bytes for a given storage key + pub async fn state_get_storage( + &self, + key: &[u8], + hash: Option, + ) -> Result, Error> { + let params = rpc_params![to_hex(key), hash]; + let data: Option = self.client.request("state_getStorage", params).await?; + Ok(data.map(|b| b.0)) + } + + /// Returns the keys with prefix with pagination support. + /// Up to `count` keys will be returned. + /// If `start_key` is passed, return next keys in storage in lexicographic order. + pub async fn state_get_keys_paged( + &self, + key: &[u8], + count: u32, + start_key: Option<&[u8]>, + at: Option, + ) -> Result, Error> { + let start_key = start_key.map(to_hex); + let params = rpc_params![to_hex(key), count, start_key, at]; + let data: Vec = self.client.request("state_getKeysPaged", params).await?; + Ok(data.into_iter().map(|b| b.0).collect()) + } + + /// Query historical storage entries in the range from the start block to the end block, + /// defaulting the end block to the current best block if it's not given. The first + /// [`StorageChangeSet`] returned has all of the values for each key, and subsequent ones + /// only contain values for any keys which have changed since the last. + pub async fn state_query_storage( + &self, + keys: impl IntoIterator, + from: T::Hash, + to: Option, + ) -> Result>, Error> { + let keys: Vec = keys.into_iter().map(to_hex).collect(); + let params = rpc_params![keys, from, to]; + self.client.request("state_queryStorage", params).await + } + + /// Query storage entries at some block, using the best block if none is given. + /// This essentially provides a way to ask for a batch of values given a batch of keys, + /// despite the name of the [`StorageChangeSet`] type. + pub async fn state_query_storage_at( + &self, + keys: impl IntoIterator, + at: Option, + ) -> Result>, Error> { + let keys: Vec = keys.into_iter().map(to_hex).collect(); + let params = rpc_params![keys, at]; + self.client.request("state_queryStorageAt", params).await + } + + /// Fetch the genesis hash + pub async fn genesis_hash(&self) -> Result { + let block_zero = 0u32; + let params = rpc_params![block_zero]; + let genesis_hash: Option = + self.client.request("chain_getBlockHash", params).await?; + genesis_hash.ok_or_else(|| Error::Client("Genesis hash not found".into())) + } + + /// Fetch the metadata via the legacy `state_getMetadata` RPC method. + pub async fn state_get_metadata( + &self, + at: Option, + ) -> Result { + let bytes: Bytes = self.client.request("state_getMetadata", rpc_params![at]).await?; + Ok(StateGetMetadataResponse(bytes.0)) + } + + /// Fetch system health + pub async fn system_health(&self) -> Result { + self.client.request("system_health", rpc_params![]).await + } + + /// Fetch system chain + pub async fn system_chain(&self) -> Result { + self.client.request("system_chain", rpc_params![]).await + } + + /// Fetch system name + pub async fn system_name(&self) -> Result { + self.client.request("system_name", rpc_params![]).await + } + + /// Fetch system version + pub async fn system_version(&self) -> Result { + self.client.request("system_version", rpc_params![]).await + } + + /// Fetch system chain type + pub async fn system_chain_type(&self) -> Result { + self.client.request("system_chainType", rpc_params![]).await + } + + /// Fetch system properties + pub async fn system_properties(&self) -> Result { + self.client.request("system_properties", rpc_params![]).await + } + + /// Fetch next nonce for an Account + /// + /// Return account nonce adjusted for extrinsics currently in transaction pool + pub async fn system_account_next_index(&self, account_id: &T::AccountId) -> Result { + self.client.request("system_accountNextIndex", rpc_params![&account_id]).await + } + + /// Get a header + pub async fn chain_get_header( + &self, + hash: Option, + ) -> Result, Error> { + let params = rpc_params![hash]; + let header = self.client.request("chain_getHeader", params).await?; + Ok(header) + } + + /// Get a block hash, returns hash of latest _best_ block by default. + pub async fn chain_get_block_hash( + &self, + block_number: Option, + ) -> Result, Error> { + let params = rpc_params![block_number]; + let block_hash = self.client.request("chain_getBlockHash", params).await?; + Ok(block_hash) + } + + /// Get a block hash of the latest finalized block + pub async fn chain_get_finalized_head(&self) -> Result { + let hash = self.client.request("chain_getFinalizedHead", rpc_params![]).await?; + Ok(hash) + } + + /// Get a Block + pub async fn chain_get_block( + &self, + hash: Option, + ) -> Result>, Error> { + let params = rpc_params![hash]; + let block = self.client.request("chain_getBlock", params).await?; + Ok(block) + } + + /// Reexecute the specified `block_hash` and gather statistics while doing so. + /// + /// This function requires the specified block and its parent to be available + /// at the queried node. If either the specified block or the parent is pruned, + /// this function will return `None`. + pub async fn dev_get_block_stats( + &self, + block_hash: T::Hash, + ) -> Result, Error> { + let params = rpc_params![block_hash]; + let stats = self.client.request("dev_getBlockStats", params).await?; + Ok(stats) + } + + /// Get proof of storage entries at a specific block's state. + pub async fn state_get_read_proof( + &self, + keys: impl IntoIterator, + hash: Option, + ) -> Result, Error> { + let keys: Vec = keys.into_iter().map(to_hex).collect(); + let params = rpc_params![keys, hash]; + let proof = self.client.request("state_getReadProof", params).await?; + Ok(proof) + } + + /// Fetch the runtime version + pub async fn state_get_runtime_version( + &self, + at: Option, + ) -> Result { + let params = rpc_params![at]; + let version = self.client.request("state_getRuntimeVersion", params).await?; + Ok(version) + } + + /// Subscribe to all new best block headers. + pub async fn chain_subscribe_new_heads(&self) -> Result, Error> { + let subscription = self + .client + .subscribe( + // Despite the name, this returns a stream of all new blocks + // imported by the node that happen to be added to the current best chain + // (ie all best blocks). + "chain_subscribeNewHeads", + rpc_params![], + "chain_unsubscribeNewHeads", + ) + .await?; + + Ok(subscription) + } + + /// Subscribe to all new block headers. + pub async fn chain_subscribe_all_heads(&self) -> Result, Error> { + let subscription = self + .client + .subscribe( + // Despite the name, this returns a stream of all new blocks + // imported by the node that happen to be added to the current best chain + // (ie all best blocks). + "chain_subscribeAllHeads", + rpc_params![], + "chain_unsubscribeAllHeads", + ) + .await?; + + Ok(subscription) + } + + /// Subscribe to finalized block headers. + /// + /// Note: this may not produce _every_ block in the finalized chain; + /// sometimes multiple blocks are finalized at once, and in this case only the + /// latest one is returned. the higher level APIs that use this "fill in" the + /// gaps for us. + pub async fn chain_subscribe_finalized_heads( + &self, + ) -> Result, Error> { + let subscription = self + .client + .subscribe( + "chain_subscribeFinalizedHeads", + rpc_params![], + "chain_unsubscribeFinalizedHeads", + ) + .await?; + Ok(subscription) + } + + /// Subscribe to runtime version updates that produce changes in the metadata. + /// The first item emitted by the stream is the current runtime version. + pub async fn state_subscribe_runtime_version( + &self, + ) -> Result, Error> { + let subscription = self + .client + .subscribe( + "state_subscribeRuntimeVersion", + rpc_params![], + "state_unsubscribeRuntimeVersion", + ) + .await?; + Ok(subscription) + } + + /// Create and submit an extrinsic and return corresponding Hash if successful + pub async fn author_submit_extrinsic(&self, extrinsic: &[u8]) -> Result { + let params = rpc_params![to_hex(extrinsic)]; + let xt_hash = self.client.request("author_submitExtrinsic", params).await?; + Ok(xt_hash) + } + + /// Create and submit an extrinsic and return a subscription to the events triggered. + pub async fn author_submit_and_watch_extrinsic( + &self, + extrinsic: &[u8], + ) -> Result>, Error> { + let params = rpc_params![to_hex(extrinsic)]; + let subscription = self + .client + .subscribe("author_submitAndWatchExtrinsic", params, "author_unwatchExtrinsic") + .await?; + Ok(subscription) + } + + /// Insert a key into the keystore. + pub async fn author_insert_key( + &self, + key_type: String, + suri: String, + public: Vec, + ) -> Result<(), Error> { + let params = rpc_params![key_type, suri, Bytes(public)]; + self.client.request("author_insertKey", params).await + } + + /// Generate new session keys and returns the corresponding public keys. + pub async fn author_rotate_keys(&self) -> Result, Error> { + let bytes: Bytes = self.client.request("author_rotateKeys", rpc_params![]).await?; + Ok(bytes.0) + } + + /// Checks if the keystore has private keys for the given session public keys. + /// + /// `session_keys` is the SCALE encoded session keys object from the runtime. + /// + /// Returns `true` if all private keys could be found. + pub async fn author_has_session_keys(&self, session_keys: Vec) -> Result { + let params = rpc_params![Bytes(session_keys)]; + self.client.request("author_hasSessionKeys", params).await + } + + /// Checks if the keystore has private keys for the given public key and key type. + /// + /// Returns `true` if a private key could be found. + pub async fn author_has_key( + &self, + public_key: Vec, + key_type: String, + ) -> Result { + let params = rpc_params![Bytes(public_key), key_type]; + self.client.request("author_hasKey", params).await + } + + /// Execute a runtime API call via `state_call` RPC method. + pub async fn state_call( + &self, + function: &str, + call_parameters: Option<&[u8]>, + at: Option, + ) -> Result, Error> { + let call_parameters = call_parameters.unwrap_or_default(); + let bytes: Bytes = self + .client + .request("state_call", rpc_params![function, to_hex(call_parameters), at]) + .await?; + Ok(bytes.0) + } + + /// Submits the extrinsic to the dry_run RPC, to test if it would succeed. + /// + /// Returns a [`DryRunResult`], which is the result of performing the dry run. + pub async fn dry_run( + &self, + encoded_signed: &[u8], + at: Option, + ) -> Result { + let params = rpc_params![to_hex(encoded_signed), at]; + let result_bytes: Bytes = self.client.request("system_dryRun", params).await?; + Ok(DryRunResultBytes(result_bytes.0)) + } +} + +/// Response from the legacy `state_get_metadata` RPC call. +pub struct StateGetMetadataResponse(Vec); + +impl StateGetMetadataResponse { + /// Return the raw SCALE encoded metadata bytes + pub fn into_raw(self) -> Vec { + self.0 + } + /// Decode and return [`frame_metadata::RuntimeMetadataPrefixed`]. + pub fn to_frame_metadata( + &self, + ) -> Result { + RuntimeMetadataPrefixed::decode(&mut &*self.0) + } +} + +/// Storage key. +pub type StorageKey = Vec; + +/// Storage data. +pub type StorageData = Vec; + +/// Health struct returned by the RPC +#[derive(Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct SystemHealth { + /// Number of connected peers + pub peers: usize, + /// Is the node syncing + pub is_syncing: bool, + /// Should this node have any peers + /// + /// Might be false for local chains or when running without discovery. + pub should_have_peers: bool, +} + +/// System properties; an arbitrary JSON object. +pub type SystemProperties = serde_json::Map; + +/// A block number +pub type BlockNumber = NumberOrHex; + +/// The response from `chain_getBlock` +#[derive(Debug, Deserialize)] +#[serde(bound = "T: RpcConfig")] +pub struct BlockDetails { + /// The block itself. + pub block: Block, + /// Block justification. + pub justifications: Option>, +} + +/// Block details in the [`BlockDetails`]. +#[derive(Debug, Deserialize)] +pub struct Block { + /// The block header. + pub header: T::Header, + /// The accompanying extrinsics. + pub extrinsics: Vec, +} + +/// An abstraction over justification for a block's validity under a consensus algorithm. +pub type BlockJustification = (ConsensusEngineId, EncodedJustification); +/// Consensus engine unique ID. +pub type ConsensusEngineId = [u8; 4]; +/// The encoded justification specific to a consensus engine. +pub type EncodedJustification = Vec; + +/// This contains the runtime version information necessary to make transactions, as obtained from +/// the RPC call `state_getRuntimeVersion`, +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct RuntimeVersion { + /// Version of the runtime specification. A full-node will not attempt to use its native + /// runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, + /// `spec_version` and `authoring_version` are the same between Wasm and native. + pub spec_version: u32, + + /// All existing dispatches are fully compatible when this number doesn't change. If this + /// number changes, then `spec_version` must change, also. + /// + /// This number must change when an existing dispatchable (module ID, dispatch ID) is changed, + /// either through an alteration in its user-level semantics, a parameter + /// added/removed/changed, a dispatchable being removed, a module being removed, or a + /// dispatchable/module changing its index. + /// + /// It need *not* change when a new module is added or when a dispatchable is added. + pub transaction_version: u32, + + /// Fields unnecessary to Subxt are written out to this map. + #[serde(flatten)] + pub other: std::collections::HashMap, +} + +/// Possible transaction status events. +/// +/// # Note +/// +/// This is copied from `sp-transaction-pool` to avoid a dependency on that crate. Therefore it +/// must be kept compatible with that type from the target bizinikiwi version. +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum TransactionStatus { + /// Transaction is part of the future queue. + Future, + /// Transaction is part of the ready queue. + Ready, + /// The transaction has been broadcast to the given peers. + Broadcast(Vec), + /// Transaction has been included in block with given hash. + InBlock(Hash), + /// The block this transaction was included in has been retracted. + Retracted(Hash), + /// Maximum number of finality watchers has been reached, + /// old watchers are being removed. + FinalityTimeout(Hash), + /// Transaction has been finalized by a finality-gadget, e.g GRANDPA + Finalized(Hash), + /// Transaction has been replaced in the pool, by another transaction + /// that provides the same tags. (e.g. same (sender, nonce)). + Usurped(Hash), + /// Transaction has been dropped from the pool because of the limit. + Dropped, + /// Transaction is no longer valid in the current state. + Invalid, +} + +/// The decoded result returned from calling `system_dryRun` on some extrinsic. +#[derive(Debug, PartialEq, Eq)] +pub enum DryRunResult<'a> { + /// The transaction could be included in the block and executed. + Success, + /// The transaction could be included in the block, but the call failed to dispatch. + /// If Subxt is available, the bytes here can be further decoded by calling: + /// + /// ```rust,ignore + /// pezkuwi_subxt::error::DispatchError::decode_from(bytes, metadata)?; + /// ``` + /// + /// Where metadata is an instance of `pezkuwi_subxt::Metadata` that is valid for the runtime + /// version which returned this error. + DispatchError(&'a [u8]), + /// The transaction could not be included in the block. + TransactionValidityError, +} + +/// The bytes representing an error dry running an extrinsic. call +/// [`DryRunResultBytes::into_dry_run_result`] to attempt to decode this into something more +/// meaningful. +pub struct DryRunResultBytes(pub Vec); + +impl DryRunResultBytes { + /// Attempt to decode the error bytes into a [`DryRunResult`]. + pub fn into_dry_run_result(&self) -> Result, DryRunDecodeError> { + // dryRun returns an ApplyExtrinsicResult, which is basically a + // `Result, TransactionValidityError>`. + let bytes = &*self.0; + + // We expect at least 2 bytes. In case we got a naff response back (or + // manually constructed this struct), just error to avoid a panic: + if bytes.len() < 2 { + return Err(DryRunDecodeError::WrongNumberOfBytes); + } + + if bytes[0] == 0 && bytes[1] == 0 { + // Ok(Ok(())); transaction is valid and executed ok + Ok(DryRunResult::Success) + } else if bytes[0] == 0 && bytes[1] == 1 { + // Ok(Err(dispatch_error)); transaction is valid but execution failed + Ok(DryRunResult::DispatchError(&bytes[2..])) + } else if bytes[0] == 1 { + // Err(transaction_error); some transaction validity error (we ignore the details at the + // moment) + Ok(DryRunResult::TransactionValidityError) + } else { + // unable to decode the bytes; they aren't what we expect. + Err(DryRunDecodeError::InvalidBytes) + } + } +} + +/// An error which can be emitted when calling [`DryRunResultBytes::into_dry_run_result`]. +pub enum DryRunDecodeError { + /// The dry run result was less than 2 bytes, which is invalid. + WrongNumberOfBytes, + /// The dry run bytes are not valid. + InvalidBytes, +} + +/// Storage change set +#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Debug)] +#[serde(rename_all = "camelCase")] +pub struct StorageChangeSet { + /// Block hash + pub block: Hash, + /// A list of changes; tuples of storage key and optional storage data. + pub changes: Vec<(Bytes, Option)>, +} + +/// Statistics of a block returned by the `dev_getBlockStats` RPC. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct BlockStats { + /// The length in bytes of the storage proof produced by executing the block. + pub witness_len: u64, + /// The length in bytes of the storage proof after compaction. + pub witness_compact_len: u64, + /// Length of the block in bytes. + /// + /// This information can also be acquired by downloading the whole block. This merely + /// saves some complexity on the client side. + pub block_len: u64, + /// Number of extrinsics in the block. + /// + /// This information can also be acquired by downloading the whole block. This merely + /// saves some complexity on the client side. + pub num_extrinsics: u64, +} + +/// ReadProof struct returned by the RPC +/// +/// # Note +/// +/// This is copied from `sc-rpc-api` to avoid a dependency on that crate. Therefore it +/// must be kept compatible with that type from the target bizinikiwi version. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ReadProof { + /// Block hash used to generate the proof + pub at: Hash, + /// A proof used to prove that storage entries are included in the storage trie + pub proof: Vec, +} + +/// A number type that can be serialized both as a number or a string that encodes a number in a +/// string. +/// +/// We allow two representations of the block number as input. Either we deserialize to the type +/// that is specified in the block type or we attempt to parse given hex value. +/// +/// The primary motivation for having this type is to avoid overflows when using big integers in +/// JavaScript (which we consider as an important RPC API consumer). +#[derive(Copy, Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] +#[serde(untagged)] +pub enum NumberOrHex { + /// The number represented directly. + Number(u64), + /// Hex representation of the number. + Hex(U256), +} + +impl NumberOrHex { + /// Converts this number into an U256. + pub fn into_u256(self) -> U256 { + match self { + NumberOrHex::Number(n) => n.into(), + NumberOrHex::Hex(h) => h, + } + } +} + +impl From for U256 { + fn from(num_or_hex: NumberOrHex) -> U256 { + num_or_hex.into_u256() + } +} + +macro_rules! into_number_or_hex { + ($($t: ty)+) => { + $( + impl From<$t> for NumberOrHex { + fn from(x: $t) -> Self { + NumberOrHex::Number(x.into()) + } + } + )+ + } +} +into_number_or_hex!(u8 u16 u32 u64); + +impl From for NumberOrHex { + fn from(n: u128) -> Self { + NumberOrHex::Hex(n.into()) + } +} + +impl From for NumberOrHex { + fn from(n: U256) -> Self { + NumberOrHex::Hex(n) + } +} + +/// A quick helper to encode some bytes to hex. +fn to_hex(bytes: impl AsRef<[u8]>) -> String { + format!("0x{}", hex::encode(bytes.as_ref())) +} + +/// Hex-serialized shim for `Vec`. +#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Hash, PartialOrd, Ord, Debug)] +pub struct Bytes(#[serde(with = "impl_serde::serialize")] pub Vec); +impl std::ops::Deref for Bytes { + type Target = [u8]; + fn deref(&self) -> &[u8] { + &self.0[..] + } +} +impl From> for Bytes { + fn from(s: Vec) -> Self { + Bytes(s) + } +} diff --git a/vendor/pezkuwi-subxt/rpcs/src/methods/mod.rs b/vendor/pezkuwi-subxt/rpcs/src/methods/mod.rs new file mode 100644 index 00000000..40b72880 --- /dev/null +++ b/vendor/pezkuwi-subxt/rpcs/src/methods/mod.rs @@ -0,0 +1,20 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! RPC methods are defined in this module. At the moment we have: +//! +//! - [`ChainHeadRpcMethods`] (and the types in [`chain_head`]): these methods +//! implement the RPC spec at +//! +//! We also have (although their use is not advised): +//! +//! - [`LegacyRpcMethods`] (and the types in [`legacy`]): a collection of legacy RPCs. These are not +//! well specified and may change in implementations without warning, but for those methods we +//! expose, we make a best effort to work against latest Bizinikiwi versions. + +pub mod chain_head; +pub mod legacy; + +pub use chain_head::ChainHeadRpcMethods; +pub use legacy::LegacyRpcMethods; diff --git a/vendor/pezkuwi-subxt/rpcs/src/utils.rs b/vendor/pezkuwi-subxt/rpcs/src/utils.rs new file mode 100644 index 00000000..bc460227 --- /dev/null +++ b/vendor/pezkuwi-subxt/rpcs/src/utils.rs @@ -0,0 +1,30 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! A couple of utility methods that we make use of. + +use crate::Error; +use url::Url; + +/// A URL is considered secure if it uses a secure scheme ("https" or "wss") or is referring to +/// localhost. +/// +/// Returns an error if the string could not be parsed into a URL. +pub fn url_is_secure(url: &str) -> Result { + let url = Url::parse(url).map_err(|e| Error::Client(Box::new(e)))?; + + let secure_scheme = url.scheme() == "https" || url.scheme() == "wss"; + let is_localhost = url.host().is_some_and(|e| match e { + url::Host::Domain(e) => e == "localhost", + url::Host::Ipv4(e) => e.is_loopback(), + url::Host::Ipv6(e) => e.is_loopback(), + }); + + Ok(secure_scheme || is_localhost) +} + +/// Validates, that the given Url is secure ("https" or "wss" scheme) or is referring to localhost. +pub fn validate_url_is_secure(url: &str) -> Result<(), Error> { + if !url_is_secure(url)? { Err(Error::InsecureUrl(url.into())) } else { Ok(()) } +} diff --git a/vendor/pezkuwi-subxt/signer/Cargo.toml b/vendor/pezkuwi-subxt/signer/Cargo.toml new file mode 100644 index 00000000..4a136bd2 --- /dev/null +++ b/vendor/pezkuwi-subxt/signer/Cargo.toml @@ -0,0 +1,113 @@ +[package] +name = "pezkuwi-subxt-signer" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +publish = true + +license.workspace = true +readme = "README.md" +repository.workspace = true +documentation.workspace = true +homepage.workspace = true +description = "Sign extrinsics to be submitted by Subxt" +keywords = ["extrinsic", "parity", "signer", "subxt"] + +[features] +default = ["ecdsa", "sr25519", "std", "subxt"] +std = [ + "base64?/std", + "bip39/std", + "crypto_secretbox?/std", + "hmac/std", + "pbkdf2/std", + "regex/std", + "schnorrkel?/std", + "scrypt?/std", + "secp256k1?/std", + "serde?/std", + "serde_json?/std", + "sha2/std", +] + +# Pick the signer implementation(s) you need by enabling the +# corresponding features. Note: I had more difficulties getting +# ecdsa compiling to WASM on my mac; following this comment helped: +# https://github.com/rust-bitcoin/rust-bitcoin/issues/930#issuecomment-1215538699 +sr25519 = ["schnorrkel"] +ecdsa = ["secp256k1"] +unstable-eth = ["bip32", "ecdsa", "keccak-hash", "secp256k1"] + +# Enable support for loading key pairs from pezkuwi-js json. +pezkuwi-js-compat = [ + "base64", + "crypto_secretbox", + "scrypt", + "serde", + "serde_json", + "sr25519", + "std", + "subxt", +] + +# Make the keypair algorithms here compatible with Subxt's Signer trait, +# so that they can be used to sign transactions for compatible chains. +subxt = ["dep:pezkuwi-subxt-core"] + +# The getrandom package is used via schnorrkel. We need to enable the JS +# feature on it if compiling for the web. +web = ["getrandom/js"] + +[dependencies] +bip32 = { workspace = true, features = ["alloc", "secp256k1"], optional = true } +bip39 = { workspace = true } +cfg-if = { workspace = true } +codec = { package = "parity-scale-codec", workspace = true, features = [ + "derive", +] } +hex = { workspace = true } +hmac = { workspace = true } +keccak-hash = { workspace = true, optional = true } +pbkdf2 = { workspace = true } +pezkuwi-subxt-core = { workspace = true, optional = true, default-features = false } +pezsp-crypto-hashing = { workspace = true } +regex = { workspace = true, features = ["unicode"] } +schnorrkel = { workspace = true, optional = true, features = ["getrandom"] } +secp256k1 = { workspace = true, optional = true, features = [ + "alloc", + "recovery", +] } +secrecy = { workspace = true } +sha2 = { workspace = true } +thiserror = { workspace = true, default-features = false } +zeroize = { workspace = true } + +# These are used if the pezkuwi-js-compat feature is enabled +base64 = { workspace = true, optional = true, features = ["alloc"] } +crypto_secretbox = { workspace = true, optional = true, features = ["alloc", "salsa20"] } +scrypt = { workspace = true, default-features = false, optional = true } +serde = { workspace = true, optional = true } +serde_json = { workspace = true, optional = true } + +# We only pull this in to enable the JS flag for schnorrkel to use. +getrandom = { workspace = true, optional = true } + +[dev-dependencies] +hex-literal = { workspace = true } +pezsp-core = { workspace = true } +pezsp-keyring = { workspace = true } +proptest = { workspace = true } + +[package.metadata.cargo-machete] +ignored = ["getrandom"] + +[package.metadata.docs.rs] +default-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.playground] +default-features = true + +[lints] +workspace = true diff --git a/vendor/pezkuwi-subxt/signer/README.md b/vendor/pezkuwi-subxt/signer/README.md new file mode 100644 index 00000000..1bd77b47 --- /dev/null +++ b/vendor/pezkuwi-subxt/signer/README.md @@ -0,0 +1,5 @@ +# Subxt-signer + +This library exposes a small, WASM compatible signer implementation which can be used in conjunction with Subxt to sign transactions. + +This library can be used without Subxt by disabling the `subxt` feature flag, which is enabled by default. diff --git a/vendor/pezkuwi-subxt/signer/src/crypto/derive_junction.rs b/vendor/pezkuwi-subxt/signer/src/crypto/derive_junction.rs new file mode 100644 index 00000000..bfdbe4a9 --- /dev/null +++ b/vendor/pezkuwi-subxt/signer/src/crypto/derive_junction.rs @@ -0,0 +1,99 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use codec::{Decode, Encode}; + +// This code is taken from sp_core::crypto::DeriveJunction. The logic should be identical, +// though the API is tweaked a touch. + +/// The length of the junction identifier. Note that this is also referred to as the +/// `CHAIN_CODE_LENGTH` in the context of Schnorrkel. +pub const JUNCTION_ID_LEN: usize = 32; + +/// A since derivation junction description. It is the single parameter used when creating +/// a new secret key from an existing secret key and, in the case of `SoftRaw` and `SoftIndex` +/// a new public key from an existing public key. +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Encode, Decode)] +pub enum DeriveJunction { + /// Soft (vanilla) derivation. Public keys have a correspondent derivation. + Soft([u8; JUNCTION_ID_LEN]), + /// Hard ("hardened") derivation. Public keys do not have a correspondent derivation. + Hard([u8; JUNCTION_ID_LEN]), +} + +impl DeriveJunction { + /// Consume self to return a soft derive junction with the same chain code. + pub fn soften(self) -> Self { + DeriveJunction::Soft(self.into_inner()) + } + + /// Consume self to return a hard derive junction with the same chain code. + pub fn harden(self) -> Self { + DeriveJunction::Hard(self.into_inner()) + } + + /// Create a new soft (vanilla) DeriveJunction from a given, encodable, value. + /// + /// If you need a hard junction, use `hard()`. + pub fn soft(index: T) -> Self { + let mut cc: [u8; JUNCTION_ID_LEN] = Default::default(); + index.using_encoded(|data| { + if data.len() > JUNCTION_ID_LEN { + cc.copy_from_slice(&pezsp_crypto_hashing::blake2_256(data)); + } else { + cc[0..data.len()].copy_from_slice(data); + } + }); + DeriveJunction::Soft(cc) + } + + /// Create a new hard (hardened) DeriveJunction from a given, encodable, value. + /// + /// If you need a soft junction, use `soft()`. + pub fn hard(index: T) -> Self { + Self::soft(index).harden() + } + + /// Consume self to return the chain code. + pub fn into_inner(self) -> [u8; JUNCTION_ID_LEN] { + match self { + DeriveJunction::Hard(c) | DeriveJunction::Soft(c) => c, + } + } + + /// Get a reference to the inner junction id. + pub fn inner(&self) -> &[u8; JUNCTION_ID_LEN] { + match self { + DeriveJunction::Hard(c) | DeriveJunction::Soft(c) => c, + } + } + + /// Return `true` if the junction is soft. + pub fn is_soft(&self) -> bool { + matches!(*self, DeriveJunction::Soft(_)) + } + + /// Return `true` if the junction is hard. + pub fn is_hard(&self) -> bool { + matches!(*self, DeriveJunction::Hard(_)) + } +} + +impl> From for DeriveJunction { + fn from(j: T) -> DeriveJunction { + let j = j.as_ref(); + let (code, hard) = + if let Some(stripped) = j.strip_prefix('/') { (stripped, true) } else { (j, false) }; + + let res = if let Ok(n) = str::parse::(code) { + // number + DeriveJunction::soft(n) + } else { + // something else + DeriveJunction::soft(code) + }; + + if hard { res.harden() } else { res } + } +} diff --git a/vendor/pezkuwi-subxt/signer/src/crypto/mod.rs b/vendor/pezkuwi-subxt/signer/src/crypto/mod.rs new file mode 100644 index 00000000..0aebafbc --- /dev/null +++ b/vendor/pezkuwi-subxt/signer/src/crypto/mod.rs @@ -0,0 +1,18 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +// The crypto module contains code adapted from sp_core::crypto. + +mod derive_junction; +mod secret_uri; + +// No need for the cfg other than to avoid an unused_imports lint warning. +#[cfg(any(feature = "sr25519", feature = "ecdsa"))] +mod seed_from_entropy; + +pub use derive_junction::DeriveJunction; +pub use secret_uri::{DEV_PHRASE, SecretUri, SecretUriError}; + +#[cfg(any(feature = "sr25519", feature = "ecdsa"))] +pub use seed_from_entropy::seed_from_entropy; diff --git a/vendor/pezkuwi-subxt/signer/src/crypto/secret_uri.rs b/vendor/pezkuwi-subxt/signer/src/crypto/secret_uri.rs new file mode 100644 index 00000000..3c1f1e2f --- /dev/null +++ b/vendor/pezkuwi-subxt/signer/src/crypto/secret_uri.rs @@ -0,0 +1,155 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::DeriveJunction; +use alloc::{string::ToString, vec::Vec}; +use regex::Regex; +use secrecy::SecretString; + +use thiserror::Error as DeriveError; + +// This code is taken from sp_core::crypto::DeriveJunction. The logic should be identical, +// though the code is tweaked a touch! + +/// A secret uri (`SURI`) that can be used to generate a key pair. +/// +/// The `SURI` can be parsed from a string. The string takes this form: +/// +/// ```text +/// phrase/path0/path1///password +/// 111111 22222 22222 33333333 +/// ``` +/// +/// Where: +/// - 1 denotes a phrase or hex string. If this is not provided, the [`DEV_PHRASE`] is used instead. +/// - 2's denote optional "derivation junctions" which are used to derive keys. Each of these is +/// separated by "/". A derivation junction beginning with "/" (ie "//" in the original string) is +/// a "hard" path. +/// - 3 denotes an optional password which is used in conjunction with the phrase provided in 1 to +/// generate an initial key. If hex is provided for 1, it's ignored. +/// +/// Notes: +/// - If 1 is a `0x` prefixed 64-digit hex string, then we'll interpret it as hex, and treat the hex +/// bytes as a seed/MiniSecretKey directly, ignoring any password. +/// - Else if the phrase part is a valid BIP-39 phrase, we'll use the phrase (and password, if +/// provided) to generate a seed/MiniSecretKey. +/// - Uris like "//Alice" correspond to keys derived from a DEV_PHRASE, since no phrase part is +/// given. +/// +/// There is no correspondence mapping between `SURI` strings and the keys they represent. +/// Two different non-identical strings can actually lead to the same secret being derived. +/// Notably, integer junction indices may be legally prefixed with arbitrary number of zeros. +/// Similarly an empty password (ending the `SURI` with `///`) is perfectly valid and will +/// generally be equivalent to no password at all. +/// +/// # Examples +/// +/// Parse [`DEV_PHRASE`] secret URI with junction: +/// +/// ``` +/// # use pezkuwi_subxt_signer::{SecretUri, DeriveJunction, DEV_PHRASE, ExposeSecret}; +/// # use std::str::FromStr; +/// let suri = SecretUri::from_str("//Alice").expect("Parse SURI"); +/// +/// assert_eq!(vec![DeriveJunction::from("Alice").harden()], suri.junctions); +/// assert_eq!(DEV_PHRASE, suri.phrase.expose_secret()); +/// assert!(suri.password.is_none()); +/// ``` +/// +/// Parse [`DEV_PHRASE`] secret URI with junction and password: +/// +/// ``` +/// # use pezkuwi_subxt_signer::{SecretUri, DeriveJunction, DEV_PHRASE, ExposeSecret}; +/// # use std::str::FromStr; +/// let suri = SecretUri::from_str("//Alice///SECRET_PASSWORD").expect("Parse SURI"); +/// +/// assert_eq!(vec![DeriveJunction::from("Alice").harden()], suri.junctions); +/// assert_eq!(DEV_PHRASE, suri.phrase.expose_secret()); +/// assert_eq!("SECRET_PASSWORD", suri.password.unwrap().expose_secret()); +/// ``` +/// +/// Parse [`DEV_PHRASE`] secret URI with hex phrase and junction: +/// +/// ``` +/// # use pezkuwi_subxt_signer::{SecretUri, DeriveJunction, DEV_PHRASE, ExposeSecret}; +/// # use std::str::FromStr; +/// let suri = SecretUri::from_str("0xe5be9a5092b81bca64be81d212e7f2f9eba183bb7a90954f7b76361f6edb5c0a//Alice").expect("Parse SURI"); +/// +/// assert_eq!(vec![DeriveJunction::from("Alice").harden()], suri.junctions); +/// assert_eq!("0xe5be9a5092b81bca64be81d212e7f2f9eba183bb7a90954f7b76361f6edb5c0a", suri.phrase.expose_secret()); +/// assert!(suri.password.is_none()); +/// ``` +pub struct SecretUri { + /// The phrase to derive the private key. + /// + /// This can either be a 64-bit hex string or a BIP-39 key phrase. + pub phrase: SecretString, + /// Optional password as given as part of the uri. + pub password: Option, + /// The junctions as part of the uri. + pub junctions: Vec, +} + +impl core::str::FromStr for SecretUri { + type Err = SecretUriError; + + fn from_str(s: &str) -> Result { + let cap = secret_phrase_regex().captures(s).ok_or(SecretUriError::InvalidFormat)?; + + let junctions = junction_regex() + .captures_iter(&cap["path"]) + .map(|f| DeriveJunction::from(&f[1])) + .collect::>(); + + let phrase = cap.name("phrase").map(|r| r.as_str()).unwrap_or(DEV_PHRASE); + let password = cap.name("password"); + + Ok(Self { + phrase: SecretString::from(phrase.to_string()), + password: password.map(|v| SecretString::from(v.as_str().to_string())), + junctions, + }) + } +} + +/// This is returned if `FromStr` cannot parse a string into a `SecretUri`. +#[derive(Debug, Copy, Clone, PartialEq, DeriveError)] +pub enum SecretUriError { + /// Parsing the secret URI from a string failed; wrong format. + #[error("Invalid secret phrase format")] + InvalidFormat, +} + +once_static_cloned! { + /// Interpret a phrase like: + /// + /// ```text + /// foo bar wibble /path0/path1///password + /// 11111111111111 222222222222 33333333 + /// ``` + /// Where 1 is the phrase, 2 the path and 3 the password. + /// Taken from `sp_core::crypto::SECRET_PHRASE_REGEX`. + fn secret_phrase_regex() -> regex::Regex { + Regex::new(r"^(?P[\d\w ]+)?(?P(//?[^/]+)*)(///(?P.*))?$").unwrap() + } + + /// Interpret a part of a path into a "junction": + /// + /// ```text + /// //foo/bar/wibble + /// 1111 222 333333 + /// ``` + /// Where the numbers denote matching junctions. + /// + /// The leading "/" deliminates each part, and then a "/" beginning + /// a path piece denotes that it's a "hard" path. Taken from + /// `sp_core::crypto::JUNCTION_REGEX`. + fn junction_regex() -> regex::Regex { + Regex::new(r"/(/?[^/]+)").unwrap() + } +} + +/// The root phrase for our publicly known keys. +pub const DEV_PHRASE: &str = + "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; diff --git a/vendor/pezkuwi-subxt/signer/src/crypto/seed_from_entropy.rs b/vendor/pezkuwi-subxt/signer/src/crypto/seed_from_entropy.rs new file mode 100644 index 00000000..c62cb8db --- /dev/null +++ b/vendor/pezkuwi-subxt/signer/src/crypto/seed_from_entropy.rs @@ -0,0 +1,31 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use alloc::string::String; +use hmac::Hmac; +use pbkdf2::pbkdf2; +use sha2::Sha512; +use zeroize::Zeroize; + +/// This is taken from `bizinikiwi-bip39` so that we can keep dependencies in line, and +/// is the same logic that sp-core uses to go from mnemonic entropy to seed. Returns +/// `None` if invalid length. +#[allow(dead_code)] +pub fn seed_from_entropy(entropy: &[u8], password: &str) -> Option<[u8; 64]> { + if entropy.len() < 16 || entropy.len() > 32 || entropy.len() % 4 != 0 { + return None; + } + + let mut salt = String::with_capacity(8 + password.len()); + salt.push_str("mnemonic"); + salt.push_str(password); + + let mut seed = [0u8; 64]; + + pbkdf2::>(entropy, salt.as_bytes(), 2048, &mut seed).ok()?; + + salt.zeroize(); + + Some(seed) +} diff --git a/vendor/pezkuwi-subxt/signer/src/ecdsa.rs b/vendor/pezkuwi-subxt/signer/src/ecdsa.rs new file mode 100644 index 00000000..ac306f23 --- /dev/null +++ b/vendor/pezkuwi-subxt/signer/src/ecdsa.rs @@ -0,0 +1,443 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! An ecdsa keypair implementation. +use codec::Encode; + +use crate::crypto::{DeriveJunction, SecretUri, seed_from_entropy}; +use core::str::FromStr; +use hex::FromHex; +use secp256k1::{Message, Secp256k1, SecretKey, ecdsa::RecoverableSignature}; +use secrecy::ExposeSecret; + +use thiserror::Error as DeriveError; + +const SECRET_KEY_LENGTH: usize = 32; + +/// Seed bytes used to generate a key pair. +pub type SecretKeyBytes = [u8; SECRET_KEY_LENGTH]; + +/// A signature generated by [`Keypair::sign()`]. These bytes are equivalent +/// to a Bizinikiwi `MultiSignature::Ecdsa(bytes)`. +#[derive(Clone, Copy, PartialEq, Eq)] +pub struct Signature(pub [u8; 65]); + +impl AsRef<[u8]> for Signature { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +/// The (compressed) public key for an [`Keypair`] key pair. +#[derive(Debug, Clone)] +pub struct PublicKey(pub [u8; 33]); + +impl AsRef<[u8]> for PublicKey { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +/// An ecdsa keypair implementation. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Keypair(pub secp256k1::Keypair); + +impl Keypair { + /// Create an ecdsa keypair from a [`SecretUri`]. See the [`SecretUri`] docs for more. + /// + /// # Example + /// + /// ```rust,standalone_crate + /// use pezkuwi_subxt_signer::{ SecretUri, ecdsa::Keypair }; + /// use std::str::FromStr; + /// + /// let uri = SecretUri::from_str("//Alice").unwrap(); + /// let keypair = Keypair::from_uri(&uri).unwrap(); + /// + /// keypair.sign(b"Hello world!"); + /// ``` + pub fn from_uri(uri: &SecretUri) -> Result { + let SecretUri { junctions, phrase, password } = uri; + + // If the phrase is hex, convert bytes directly into a seed, ignoring password. + // Else, parse the phrase string taking the password into account. This is + // the same approach taken in sp_core::crypto::Pair::from_string_with_seed. + let key = if let Some(hex_str) = phrase.expose_secret().strip_prefix("0x") { + let seed = SecretKeyBytes::from_hex(hex_str)?; + Self::from_secret_key(seed)? + } else { + let phrase = bip39::Mnemonic::from_str(phrase.expose_secret())?; + let pass_str = password.as_ref().map(|p| p.expose_secret()); + Self::from_phrase(&phrase, pass_str)? + }; + + // Now, use any "junctions" to derive a new key from this root key. + key.derive(junctions.iter().copied()) + } + + /// Create an ecdsa keypair from a BIP-39 mnemonic phrase and optional password. + /// + /// # Example + /// + /// ```rust,standalone_crate + /// use pezkuwi_subxt_signer::{ bip39::Mnemonic, ecdsa::Keypair }; + /// + /// let phrase = "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; + /// let mnemonic = Mnemonic::parse(phrase).unwrap(); + /// let keypair = Keypair::from_phrase(&mnemonic, None).unwrap(); + /// + /// keypair.sign(b"Hello world!"); + /// ``` + pub fn from_phrase(mnemonic: &bip39::Mnemonic, password: Option<&str>) -> Result { + let (arr, len) = mnemonic.to_entropy_array(); + let big_seed = + seed_from_entropy(&arr[0..len], password.unwrap_or("")).ok_or(Error::InvalidSeed)?; + + let secret_key_bytes: SecretKeyBytes = + big_seed[..SECRET_KEY_LENGTH].try_into().expect("should be valid Seed"); + + Self::from_secret_key(secret_key_bytes) + } + + /// Turn a 32 byte seed into a keypair. + /// + /// # Warning + /// + /// This will only be secure if the seed is secure! + pub fn from_secret_key(secret_key: SecretKeyBytes) -> Result { + let secret = SecretKey::from_slice(&secret_key).map_err(|_| Error::InvalidSeed)?; + Ok(Self(secp256k1::Keypair::from_secret_key(&Secp256k1::signing_only(), &secret))) + } + + /// Derive a child key from this one given a series of junctions. + /// + /// # Example + /// + /// ```rust,standalone_crate + /// use pezkuwi_subxt_signer::{ bip39::Mnemonic, ecdsa::Keypair, DeriveJunction }; + /// + /// let phrase = "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; + /// let mnemonic = Mnemonic::parse(phrase).unwrap(); + /// let keypair = Keypair::from_phrase(&mnemonic, None).unwrap(); + /// + /// // Equivalent to the URI path '//Alice//stash': + /// let new_keypair = keypair.derive([ + /// DeriveJunction::hard("Alice"), + /// DeriveJunction::hard("stash") + /// ]); + /// ``` + pub fn derive>( + &self, + junctions: Js, + ) -> Result { + let mut acc = self.0.secret_key().clone().secret_bytes(); + for junction in junctions { + match junction { + DeriveJunction::Soft(_) => return Err(Error::SoftJunction), + DeriveJunction::Hard(junction_bytes) => + acc = ("Secp256k1HDKD", acc, junction_bytes) + .using_encoded(pezsp_crypto_hashing::blake2_256), + } + } + Self::from_secret_key(acc) + } + + /// Obtain the [`PublicKey`] part of this key pair, which can be used in calls to [`verify()`]. + /// or otherwise converted into an address. In case of ECDSA, the public key bytes are not + /// equivalent to a Bizinikiwi `AccountId32`. They have to be hashed to obtain `AccountId32`. + pub fn public_key(&self) -> PublicKey { + PublicKey(self.0.public_key().serialize()) + } + + /// Obtain the [`SecretKey`] part of this key pair. This should be kept secret. + pub fn secret_key(&self) -> SecretKeyBytes { + *self.0.secret_key().as_ref() + } + + /// Sign some message. These bytes can be used directly in a Bizinikiwi + /// `MultiSignature::Ecdsa(..)`. + pub fn sign(&self, message: &[u8]) -> Signature { + self.sign_prehashed(&pezsp_crypto_hashing::blake2_256(message)) + } + + /// Signs a pre-hashed message. + pub fn sign_prehashed(&self, message_hash: &[u8; 32]) -> Signature { + let wrapped = Message::from_digest_slice(message_hash).expect("Message is 32 bytes; qed"); + Signature(internal::sign(&self.0.secret_key(), &wrapped)) + } +} + +/// Verify that some signature for a message was created by the owner of the [`PublicKey`]. +/// +/// ```rust,standalone_crate +/// use pezkuwi_subxt_signer::{ bip39::Mnemonic, ecdsa }; +/// +/// let keypair = ecdsa::dev::alice(); +/// let message = b"Hello!"; +/// +/// let signature = keypair.sign(message); +/// let public_key = keypair.public_key(); +/// assert!(ecdsa::verify(&signature, message, &public_key)); +/// ``` +pub fn verify>(sig: &Signature, message: M, pubkey: &PublicKey) -> bool { + let message_hash = pezsp_crypto_hashing::blake2_256(message.as_ref()); + let wrapped = Message::from_digest_slice(&message_hash).expect("Message is 32 bytes; qed"); + + internal::verify(&sig.0, &wrapped, pubkey) +} + +pub(crate) mod internal { + use super::*; + + pub fn sign(secret_key: &secp256k1::SecretKey, message: &Message) -> [u8; 65] { + let recsig: RecoverableSignature = + Secp256k1::signing_only().sign_ecdsa_recoverable(message, secret_key); + let (recid, sig): (_, [u8; 64]) = recsig.serialize_compact(); + let mut signature_bytes: [u8; 65] = [0; 65]; + signature_bytes[..64].copy_from_slice(&sig); + signature_bytes[64] = (i32::from(recid) & 0xFF) as u8; + signature_bytes + } + + pub fn verify(sig: &[u8; 65], message: &Message, pubkey: &PublicKey) -> bool { + let Ok(signature) = secp256k1::ecdsa::Signature::from_compact(&sig[..64]) else { + return false; + }; + let Ok(public) = secp256k1::PublicKey::from_slice(&pubkey.0) else { + return false; + }; + + Secp256k1::verification_only() + .verify_ecdsa(message, &signature, &public) + .is_ok() + } +} + +/// An error handed back if creating a keypair fails. +#[derive(Debug, PartialEq, DeriveError)] +pub enum Error { + /// Invalid seed. + #[error("Invalid seed (was it the wrong length?)")] + InvalidSeed, + /// Invalid seed. + #[error("Invalid seed for ECDSA, contained soft junction")] + SoftJunction, + /// Invalid phrase. + #[error("Cannot parse phrase: {0}")] + Phrase(bip39::Error), + /// Invalid hex. + #[error("Cannot parse hex string: {0}")] + Hex(hex::FromHexError), +} + +impl From for Error { + fn from(err: hex::FromHexError) -> Self { + Error::Hex(err) + } +} + +impl From for Error { + fn from(err: bip39::Error) -> Self { + Error::Phrase(err) + } +} + +/// Dev accounts, helpful for testing but not to be used in production, +/// since the secret keys are known. +pub mod dev { + use super::*; + + once_static_cloned! { + /// Equivalent to `{DEV_PHRASE}//Alice`. + pub fn alice() -> Keypair { + Keypair::from_uri(&SecretUri::from_str("//Alice").unwrap()).unwrap() + } + /// Equivalent to `{DEV_PHRASE}//Bob`. + pub fn bob() -> Keypair { + Keypair::from_uri(&SecretUri::from_str("//Bob").unwrap()).unwrap() + } + /// Equivalent to `{DEV_PHRASE}//Charlie`. + pub fn charlie() -> Keypair { + Keypair::from_uri(&SecretUri::from_str("//Charlie").unwrap()).unwrap() + } + /// Equivalent to `{DEV_PHRASE}//Dave`. + pub fn dave() -> Keypair { + Keypair::from_uri(&SecretUri::from_str("//Dave").unwrap()).unwrap() + } + /// Equivalent to `{DEV_PHRASE}//Eve`. + pub fn eve() -> Keypair { + Keypair::from_uri(&SecretUri::from_str("//Eve").unwrap()).unwrap() + } + /// Equivalent to `{DEV_PHRASE}//Ferdie`. + pub fn ferdie() -> Keypair { + Keypair::from_uri(&SecretUri::from_str("//Ferdie").unwrap()).unwrap() + } + /// Equivalent to `{DEV_PHRASE}//One`. + pub fn one() -> Keypair { + Keypair::from_uri(&SecretUri::from_str("//One").unwrap()).unwrap() + } + /// Equivalent to `{DEV_PHRASE}//Two`. + pub fn two() -> Keypair { + Keypair::from_uri(&SecretUri::from_str("//Two").unwrap()).unwrap() + } + } +} + +// Make `Keypair` usable to sign transactions in Subxt. This is optional so that +// `subxt-signer` can be used entirely independently of Subxt. +#[cfg(feature = "subxt")] +mod subxt_compat { + use super::*; + + use pezkuwi_subxt_core::{ + config::Config, + tx::signer::Signer as SignerT, + utils::{AccountId32, MultiAddress, MultiSignature}, + }; + + impl From for MultiSignature { + fn from(value: Signature) -> Self { + MultiSignature::Ecdsa(value.0) + } + } + + impl From for AccountId32 { + fn from(value: PublicKey) -> Self { + value.to_account_id() + } + } + + impl From for MultiAddress { + fn from(value: PublicKey) -> Self { + value.to_address() + } + } + + impl PublicKey { + /// A shortcut to obtain an [`AccountId32`] from a [`PublicKey`]. + /// We often want this type, and using this method avoids any + /// ambiguous type resolution issues. + pub fn to_account_id(self) -> AccountId32 { + AccountId32(pezsp_crypto_hashing::blake2_256(&self.0)) + } + /// A shortcut to obtain a [`MultiAddress`] from a [`PublicKey`]. + /// We often want this type, and using this method avoids any + /// ambiguous type resolution issues. + pub fn to_address(self) -> MultiAddress { + MultiAddress::Id(self.to_account_id()) + } + } + + impl SignerT for Keypair + where + T::AccountId: From, + T::Address: From, + T::Signature: From, + { + fn account_id(&self) -> T::AccountId { + self.public_key().into() + } + + fn sign(&self, signer_payload: &[u8]) -> T::Signature { + self.sign(signer_payload).into() + } + } +} + +#[cfg(test)] +mod test { + use std::str::FromStr; + + use super::*; + + use sp_core::{self, crypto::Pair as _, ecdsa::Pair as SpPair}; + + #[test] + fn check_from_phrase_matches() { + for _ in 0..20 { + let (sp_pair, phrase, _seed) = SpPair::generate_with_phrase(None); + let phrase = bip39::Mnemonic::parse(phrase).expect("valid phrase expected"); + let pair = Keypair::from_phrase(&phrase, None).expect("should be valid"); + + assert_eq!(sp_pair.public().0, pair.public_key().0); + } + } + + #[test] + fn check_from_phrase_with_password_matches() { + for _ in 0..20 { + let (sp_pair, phrase, _seed) = SpPair::generate_with_phrase(Some("Testing")); + let phrase = bip39::Mnemonic::parse(phrase).expect("valid phrase expected"); + let pair = Keypair::from_phrase(&phrase, Some("Testing")).expect("should be valid"); + + assert_eq!(sp_pair.public().0, pair.public_key().0); + } + } + + #[test] + fn check_from_secret_uri_matches() { + // Some derive junctions to check that the logic there aligns: + let uri_paths = ["//bar", "//0001", "//1", "//0001", "//foo//bar//wibble"]; + + for i in 0..2 { + for path in &uri_paths { + // Build an sp_core::Pair that includes a phrase, path and password: + let password = format!("Testing{i}"); + let (_sp_pair, phrase, _seed) = SpPair::generate_with_phrase(Some(&password)); + let uri = format!("{phrase}{path}///{password}"); + let sp_pair = SpPair::from_string(&uri, None).expect("should be valid"); + + // Now build a local Keypair using the equivalent API: + let uri = SecretUri::from_str(&uri).expect("should be valid secret URI"); + let pair = Keypair::from_uri(&uri).expect("should be valid"); + + // They should match: + assert_eq!(sp_pair.public().0, pair.public_key().0); + } + } + } + + #[test] + fn check_derive_errs_with_soft_junction() { + let uri_paths = ["/bar", "/1", "//foo//bar/wibble"]; + for path in &uri_paths { + let (_sp_pair, phrase, _seed) = SpPair::generate_with_phrase(None); + let uri = format!("{phrase}{path}"); + let uri = SecretUri::from_str(&uri).expect("should be valid secret URI"); + let result = Keypair::from_uri(&uri); + assert_eq!(result.err(), Some(Error::SoftJunction)); + } + } + + #[test] + fn check_signing_and_verifying_matches() { + use sp_core::ecdsa::Signature as SpSignature; + + for _ in 0..20 { + let (sp_pair, phrase, _seed) = SpPair::generate_with_phrase(Some("Testing")); + let phrase = bip39::Mnemonic::parse(phrase).expect("valid phrase expected"); + let pair = Keypair::from_phrase(&phrase, Some("Testing")).expect("should be valid"); + + let message = b"Hello world"; + let sp_sig = sp_pair.sign(message).0; + let sig: [u8; 65] = pair.sign(message).0; + + assert!(SpPair::verify(&SpSignature::from(sig), message, &sp_pair.public(),)); + assert!(verify(&Signature(sp_sig), message, &pair.public_key())); + } + } + + #[test] + fn check_hex_uris() { + // Hex URIs seem to ignore the password on sp_core and here. Check that this is consistent. + let uri_str = + "0x1122334455667788112233445566778811223344556677881122334455667788///SomePassword"; + + let uri = SecretUri::from_str(uri_str).expect("should be valid"); + let pair = Keypair::from_uri(&uri).expect("should be valid"); + let sp_pair = SpPair::from_string(uri_str, None).expect("should be valid"); + + assert_eq!(pair.public_key().0, sp_pair.public().0); + } +} diff --git a/vendor/pezkuwi-subxt/signer/src/eth.rs b/vendor/pezkuwi-subxt/signer/src/eth.rs new file mode 100644 index 00000000..c5047750 --- /dev/null +++ b/vendor/pezkuwi-subxt/signer/src/eth.rs @@ -0,0 +1,741 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! An ethereum keypair implementation. + +use crate::ecdsa; +use alloc::format; +use core::str::FromStr; +use keccak_hash::keccak; +use secp256k1::Message; + +use thiserror::Error as DeriveError; + +const SECRET_KEY_LENGTH: usize = 32; + +/// Bytes representing a private key. +pub type SecretKeyBytes = [u8; SECRET_KEY_LENGTH]; + +/// The public key for an [`Keypair`] key pair. This is the uncompressed variant of +/// [`ecdsa::PublicKey`]. +pub struct PublicKey(pub [u8; 65]); + +impl AsRef<[u8]> for PublicKey { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +/// An ethereum keypair implementation. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Keypair(ecdsa::Keypair); + +impl From for Keypair { + fn from(kp: ecdsa::Keypair) -> Self { + Self(kp) + } +} + +impl Keypair { + /// Create a keypair from a BIP-39 mnemonic phrase, optional password, account index, and + /// derivation type. + /// + /// **Note:** if the `std` feature is not enabled, we won't attempt to normalize the provided + /// password to NFKD first, and so this is your responsibility. This is not a concern if only + /// ASCII characters are used in the password. + /// + /// # Example + /// + /// ```rust,standalone_crate + /// use pezkuwi_subxt_signer::{ bip39::Mnemonic, eth::{ Keypair, DerivationPath } }; + /// + /// let phrase = "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; + /// let mnemonic = Mnemonic::parse(phrase).unwrap(); + /// let keypair = Keypair::from_phrase(&mnemonic, None, DerivationPath::eth(0,0)).unwrap(); + /// + /// keypair.sign(b"Hello world!"); + /// ``` + pub fn from_phrase( + mnemonic: &bip39::Mnemonic, + password: Option<&str>, + derivation_path: DerivationPath, + ) -> Result { + // `to_seed` isn't available unless std is enabled in bip39. + #[cfg(feature = "std")] + let seed = mnemonic.to_seed(password.unwrap_or("")); + #[cfg(not(feature = "std"))] + let seed = mnemonic.to_seed_normalized(password.unwrap_or("")); + + // TODO: Currently, we use bip32 to derive private keys which under the hood uses + // the Rust k256 crate. We _also_ use the secp256k1 crate (which is very similar). + // It'd be great if we could 100% use just one of the two crypto libs. bip32 has + // a feature flag to use secp256k1, but it's unfortunately a different version (older) + // than ours. + let private = bip32::XPrv::derive_from_path(seed, &derivation_path.inner) + .map_err(|_| Error::DeriveFromPath)?; + + Keypair::from_secret_key(private.to_bytes()) + } + + /// Turn a 16, 32 or 64 byte seed into a keypair. + /// + /// # Warning + /// + /// This will only be secure if the seed is secure! + pub fn from_seed(seed: &[u8]) -> Result { + let private = bip32::XPrv::new(seed).map_err(|_| Error::InvalidSeed)?; + Keypair::from_secret_key(private.to_bytes()) + } + + /// Turn a 32 byte secret key into a keypair. + /// + /// # Warning + /// + /// This will only be secure if the secret key is secure! + pub fn from_secret_key(secret_key: SecretKeyBytes) -> Result { + ecdsa::Keypair::from_secret_key(secret_key) + .map(Self) + .map_err(|_| Error::InvalidSeed) + } + + /// Obtain the [`ecdsa::SecretKeyBytes`] of this keypair. + pub fn secret_key(&self) -> SecretKeyBytes { + self.0.secret_key() + } + + /// Obtain the [`eth::PublicKey`] of this keypair. + pub fn public_key(&self) -> PublicKey { + let uncompressed = self.0.0.public_key().serialize_uncompressed(); + PublicKey(uncompressed) + } + + /// Signs an arbitrary message payload. + pub fn sign(&self, signer_payload: &[u8]) -> Signature { + self.sign_prehashed(&keccak(signer_payload).0) + } + + /// Signs a pre-hashed message. + pub fn sign_prehashed(&self, message_hash: &[u8; 32]) -> Signature { + Signature(self.0.sign_prehashed(message_hash).0) + } +} +/// A derivation path. This can be parsed from a valid derivation path string like +/// `"m/44'/60'/0'/0/0"`, or we can construct one using the helpers [`DerivationPath::empty()`] +/// and [`DerivationPath::eth()`]. +#[derive(Clone, Debug)] +pub struct DerivationPath { + inner: bip32::DerivationPath, +} + +impl DerivationPath { + /// An empty derivation path (in other words, just use the master-key as is). + pub fn empty() -> Self { + let inner = bip32::DerivationPath::from_str("m").unwrap(); + DerivationPath { inner } + } + + /// A BIP44 Ethereum compatible derivation using the path "m/44'/60'/account'/0/address_index". + /// + /// # Panics + /// + /// Panics if the `account` or `address_index` provided are >= 2^31. + pub fn eth(account: u32, address_index: u32) -> Self { + assert!(account < bip32::ChildNumber::HARDENED_FLAG, "account must be less than 2^31"); + assert!( + address_index < bip32::ChildNumber::HARDENED_FLAG, + "address_index must be less than 2^31" + ); + + let derivation_string = format!("m/44'/60'/{account}'/0/{address_index}"); + let inner = bip32::DerivationPath::from_str(&derivation_string).unwrap(); + DerivationPath { inner } + } +} + +impl FromStr for DerivationPath { + type Err = Error; + fn from_str(s: &str) -> Result { + let inner = bip32::DerivationPath::from_str(s).map_err(|_| Error::DeriveFromPath)?; + Ok(DerivationPath { inner }) + } +} + +/// A signature generated by [`Keypair::sign()`]. +#[derive(Debug, Clone, Copy, PartialEq, Eq, codec::Encode, codec::Decode)] +pub struct Signature(pub [u8; 65]); + +impl AsRef<[u8; 65]> for Signature { + fn as_ref(&self) -> &[u8; 65] { + &self.0 + } +} + +/// Verify that some signature for a message was created by the owner of the [`PublicKey`]. +/// +/// ```rust,standalone_crate +/// use pezkuwi_subxt_signer::{ bip39::Mnemonic, eth }; +/// +/// let keypair = eth::dev::alith(); +/// let message = b"Hello!"; +/// +/// let signature = keypair.sign(message); +/// let public_key = keypair.public_key(); +/// assert!(eth::verify(&signature, message, &public_key)); +/// ``` +pub fn verify>(sig: &Signature, message: M, pubkey: &PublicKey) -> bool { + let message_hash = keccak(message.as_ref()); + let wrapped = + Message::from_digest_slice(message_hash.as_bytes()).expect("Message is 32 bytes; qed"); + let Ok(signature) = secp256k1::ecdsa::Signature::from_compact(&sig.as_ref()[..64]) else { + return false; + }; + let Ok(pk) = secp256k1::PublicKey::from_slice(&pubkey.0) else { + return false; + }; + + secp256k1::Secp256k1::verification_only() + .verify_ecdsa(&wrapped, &signature, &pk) + .is_ok() +} + +/// An error handed back if creating a keypair fails. +#[derive(Debug, PartialEq, DeriveError)] +pub enum Error { + /// Invalid seed. + #[error("Invalid seed (was it the wrong length?)")] + InvalidSeed, + /// Invalid derivation path. + #[error("Could not derive from path; some values in the path may have been >= 2^31?")] + DeriveFromPath, +} + +/// Dev accounts, helpful for testing but not to be used in production, +/// since the secret keys are known. +pub mod dev { + use core::str::FromStr; + + use crate::DEV_PHRASE; + + use super::*; + + once_static_cloned! { + pub fn alith() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 0)).unwrap() + } + pub fn baltathar() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 1)).unwrap() + } + pub fn charleth() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 2)).unwrap() + } + pub fn dorothy() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 3)).unwrap() + } + pub fn ethan() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 4)).unwrap() + } + pub fn faith() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 5)).unwrap() + } + pub fn gareth() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 6)).unwrap() + } + pub fn heather() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 7)).unwrap() + } + pub fn ithelia() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 8)).unwrap() + } + pub fn jethro() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 9)).unwrap() + } + pub fn keith() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 10)).unwrap() + } + pub fn luther() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 11)).unwrap() + } + pub fn martha() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 12)).unwrap() + } + pub fn nathan() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 13)).unwrap() + } + pub fn othello() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 14)).unwrap() + } + pub fn perth() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 15)).unwrap() + } + pub fn ruth() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 16)).unwrap() + } + pub fn seth() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 17)).unwrap() + } + pub fn thomas() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 18)).unwrap() + } + pub fn uthman() -> Keypair { + Keypair::from_phrase( + &bip39::Mnemonic::from_str(DEV_PHRASE).unwrap(), None, DerivationPath::eth(0, 19)).unwrap() + } + + } +} + +#[cfg(feature = "subxt")] +mod subxt_compat { + use super::*; + use pezkuwi_subxt_core::{ + config::Config, + tx::signer::Signer as SignerT, + utils::{AccountId20, MultiAddress}, + }; + + impl SignerT for Keypair + where + T::AccountId: From, + T::Address: From, + T::Signature: From, + { + fn account_id(&self) -> T::AccountId { + self.public_key().into() + } + + fn sign(&self, signer_payload: &[u8]) -> T::Signature { + self.sign(signer_payload).into() + } + } + + impl PublicKey { + /// Obtains the public address of the account by taking the last 20 bytes + /// of the Keccak-256 hash of the public key. + pub fn to_account_id(&self) -> AccountId20 { + let hash = keccak(&self.0[1..]).0; + let hash20 = hash[12..].try_into().expect("should be 20 bytes"); + AccountId20(hash20) + } + /// A shortcut to obtain a [`MultiAddress`] from a [`PublicKey`]. + /// We often want this type, and using this method avoids any + /// ambiguous type resolution issues. + pub fn to_address(self) -> MultiAddress { + MultiAddress::Address20(self.to_account_id().0) + } + } + + impl From for AccountId20 { + fn from(value: PublicKey) -> Self { + value.to_account_id() + } + } + + impl From for MultiAddress { + fn from(value: PublicKey) -> Self { + let address: AccountId20 = value.into(); + MultiAddress::Address20(address.0) + } + } +} + +#[cfg(test)] +#[cfg(feature = "subxt")] +mod test { + use bip39::Mnemonic; + use pezkuwi_subxt_core::utils::AccountId20; + use proptest::prelude::*; + use secp256k1::Secp256k1; + + use pezkuwi_subxt_core::{config::*, tx::signer::Signer as SignerT}; + + use super::*; + + enum StubEthRuntimeConfig {} + + impl Config for StubEthRuntimeConfig { + type AccountId = AccountId20; + type Address = AccountId20; + type Signature = Signature; + type Hasher = bizinikiwi::BlakeTwo256; + type Header = bizinikiwi::BizinikiwiHeader; + type ExtrinsicParams = BizinikiwiExtrinsicParams; + type AssetId = u32; + } + + type SubxtSigner = dyn SignerT; + + prop_compose! { + fn keypair()(seed in any::<[u8; 32]>()) -> Keypair { + let secret = secp256k1::SecretKey::from_slice(&seed).expect("valid secret key"); + let inner = secp256k1::Keypair::from_secret_key( + &Secp256k1::new(), + &secret, + ); + + Keypair(ecdsa::Keypair(inner)) + } + } + + proptest! { + #[test] + fn check_from_phrase( + entropy in any::<[u8; 32]>(), + password in any::>(), + address in 1..(i32::MAX as u32), + account_idx in 1..(i32::MAX as u32), + ) { + let mnemonic = bip39::Mnemonic::from_entropy(&entropy).expect("valid mnemonic"); + let derivation_path = format!("m/44'/60'/{address}'/0/{account_idx}").parse().expect("valid derivation path"); + let private = bip32::XPrv::derive_from_path( + mnemonic.to_seed(password.clone().unwrap_or("".to_string())), + &derivation_path, + ).expect("valid private"); + + // Creating our own keypairs should be equivalent to using bip32 crate to do it: + assert_eq!( + Keypair::from_phrase(&mnemonic, password.as_deref(), DerivationPath::eth(address, account_idx)).expect("valid keypair"), + Keypair(ecdsa::Keypair::from_secret_key(private.to_bytes()).expect("valid ecdsa keypair")) + ); + } + + #[test] + fn check_from_phrase_bad_index( + address in (i32::MAX as u32)..=u32::MAX, + account_idx in (i32::MAX as u32)..=u32::MAX, + ) { + let derivation_path_err = format!("m/44'/60'/{address}'/0/{account_idx}").parse::().expect_err("bad path expected"); + + // Creating invalid derivation paths (ie values too large) will result in an error. + assert_eq!( + derivation_path_err, + Error::DeriveFromPath + ); + } + + #[test] + fn check_pezkuwi_subxt_signer_implementation_matches(keypair in keypair(), msg in ".*") { + let msg_as_bytes = msg.as_bytes(); + + assert_eq!(SubxtSigner::account_id(&keypair), keypair.public_key().to_account_id()); + assert_eq!(SubxtSigner::sign(&keypair, msg_as_bytes), keypair.sign(msg_as_bytes)); + } + + #[test] + fn check_account_id(keypair in keypair()) { + // https://github.com/ethereumbook/ethereumbook/blob/develop/04keys-addresses.asciidoc#ethereum-addresses + let account_id = { + let uncompressed = keypair.0.0.public_key().serialize_uncompressed(); + let hash = keccak(&uncompressed[1..]).0; + let hash20 = hash[12..].try_into().expect("should be 20 bytes"); + AccountId20(hash20) + }; + let account_id_derived_from_pk: AccountId20 = keypair.public_key().to_account_id(); + assert_eq!(account_id_derived_from_pk, account_id); + assert_eq!(keypair.public_key().to_account_id(), account_id); + + } + + #[test] + fn check_signing_and_verifying_matches(keypair in keypair(), msg in ".*") { + let sig = SubxtSigner::sign(&keypair, msg.as_bytes()); + + assert!(verify( + &sig, + msg, + &keypair.public_key()) + ); + } + } + + /// Test that the dev accounts match those listed in the moonbeam README. + /// https://github.com/moonbeam-foundation/moonbeam/blob/96cf8898874509d529b03c4da0e07b2787bacb18/README.md + #[test] + fn check_dev_accounts_match() { + let cases = [ + ( + dev::alith(), + "0xf24FF3a9CF04c71Dbc94D0b566f7A27B94566cac", + "0x5fb92d6e98884f76de468fa3f6278f8807c48bebc13595d45af5bdc4da702133", + ), + ( + dev::baltathar(), + "0x3Cd0A705a2DC65e5b1E1205896BaA2be8A07c6e0", + "0x8075991ce870b93a8870eca0c0f91913d12f47948ca0fd25b49c6fa7cdbeee8b", + ), + ( + dev::charleth(), + "0x798d4Ba9baf0064Ec19eB4F0a1a45785ae9D6DFc", + "0x0b6e18cafb6ed99687ec547bd28139cafdd2bffe70e6b688025de6b445aa5c5b", + ), + ( + dev::dorothy(), + "0x773539d4Ac0e786233D90A233654ccEE26a613D9", + "0x39539ab1876910bbf3a223d84a29e28f1cb4e2e456503e7e91ed39b2e7223d68", + ), + ( + dev::ethan(), + "0xFf64d3F6efE2317EE2807d223a0Bdc4c0c49dfDB", + "0x7dce9bc8babb68fec1409be38c8e1a52650206a7ed90ff956ae8a6d15eeaaef4", + ), + ( + dev::faith(), + "0xC0F0f4ab324C46e55D02D0033343B4Be8A55532d", + "0xb9d2ea9a615f3165812e8d44de0d24da9bbd164b65c4f0573e1ce2c8dbd9c8df", + ), + ( + dev::gareth(), + "0x7BF369283338E12C90514468aa3868A551AB2929", + "0x96b8a38e12e1a31dee1eab2fffdf9d9990045f5b37e44d8cc27766ef294acf18", + ), + ( + dev::heather(), + "0x931f3600a299fd9B24cEfB3BfF79388D19804BeA", + "0x0d6dcaaef49272a5411896be8ad16c01c35d6f8c18873387b71fbc734759b0ab", + ), + ( + dev::ithelia(), + "0xC41C5F1123ECCd5ce233578B2e7ebd5693869d73", + "0x4c42532034540267bf568198ccec4cb822a025da542861fcb146a5fab6433ff8", + ), + ( + dev::jethro(), + "0x2898FE7a42Be376C8BC7AF536A940F7Fd5aDd423", + "0x94c49300a58d576011096bcb006aa06f5a91b34b4383891e8029c21dc39fbb8b", + ), + ( + dev::keith(), + "0x583E6CFb24Ae212A36Db2766597fF8e6AC796751", + "0xff0071dbd1edf21f40baf55718b2c3b032027c202b57afbe10720aa751a9f40c", + ), + ( + dev::luther(), + "0xbb827670B9dCb162Daa8DbF3dFF63a71c844d17d", + "0x194f2d0cfcfecac3a224af9f534a5fe9f49ff4f28d939539e8bd244ce6fa79e4", + ), + ( + dev::martha(), + "0xD9E8D42eDD3Bc20871fA6662E069E71483fC167A", + "0x268896eff609f44f711db60441104f099dccfa5678171a71a61afb14047aefbb", + ), + ( + dev::nathan(), + "0x9702DF55600140d8E197AAdfffa622F2A80564fd", + "0x22bcd7b28c2d741f9b6d1afb84db16f39bdf6e6289b9adedf93ffa3763f62e31", + ), + ( + dev::othello(), + "0x9FC969aCe16Fe2757E04a8BD32136a9EC258db6D", + "0xd711903ccdbcb2e87ac43132a2ffd5f189057d535ddb2802d71fa77767a059ea", + ), + ( + dev::perth(), + "0xFe25AaD37c57C4b6Bc85d96a4349dac5046A06EE", + "0x26914ef14ae113743e48f24344146851036ff2ab663543947a366e36e781d79c", + ), + ( + dev::ruth(), + "0x11E8697Ef0f4BF2A4076ff46e42a0FdD8C4d6C41", + "0xd9ab86105fd3a2c3d7055ff0427564c3c30bb9175780b3bd1842b37f93227778", + ), + ( + dev::seth(), + "0x001eB6957Eae09433A380504b11f807611686669", + "0xd2e4efe30dd3a7aa9ea48efa838244515e74f210a6a3f2a1b4fd45631014502c", + ), + ( + dev::thomas(), + "0x0A2e55fd44d1cEe5fD482a2062A11C548C492E25", + "0xe0a97dde04b09d3c2d3e8959eae318a01ef45a3c8d2f56258bc847a84fb80fe0", + ), + ( + dev::uthman(), + "0x1B948eD0bbacC2ca68eEcb5A9FC9Ba2755669faF", + "0x1073cd4baa42f59545928c7e56bb5e14e31e4f5e911f9f2d99a1e092eab45f74", + ), + ]; + + for (case_idx, (keypair, exp_account_id, exp_priv_key)) in cases.into_iter().enumerate() { + let act_account_id = keypair.public_key().to_account_id().checksum(); + let act_priv_key = format!("0x{}", &keypair.0.0.display_secret()); + + assert_eq!(exp_account_id, act_account_id, "account ID mismatch in {case_idx}"); + assert_eq!(exp_priv_key, act_priv_key, "private key mismatch in {case_idx}"); + } + } + + // This is a part of the test set linked in BIP39 and copied from https://github.com/trezor/python-mnemonic/blob/f5a975ab10c035596d65d854d21164266ffed284/vectors.json. + // The passphrase is always TREZOR. We check that keys generated with the mnemonic (and no + // derivation path) line up with the seeds given. + #[test] + fn check_basic_bip39_compliance() { + let mnemonics_and_seeds = [ + ( + "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about", + "c55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04", + ), + ( + "legal winner thank year wave sausage worth useful legal winner thank yellow", + "2e8905819b8723fe2c1d161860e5ee1830318dbf49a83bd451cfb8440c28bd6fa457fe1296106559a3c80937a1c1069be3a3a5bd381ee6260e8d9739fce1f607", + ), + ( + "letter advice cage absurd amount doctor acoustic avoid letter advice cage above", + "d71de856f81a8acc65e6fc851a38d4d7ec216fd0796d0a6827a3ad6ed5511a30fa280f12eb2e47ed2ac03b5c462a0358d18d69fe4f985ec81778c1b370b652a8", + ), + ( + "zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo wrong", + "ac27495480225222079d7be181583751e86f571027b0497b5b5d11218e0a8a13332572917f0f8e5a589620c6f15b11c61dee327651a14c34e18231052e48c069", + ), + ( + "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon agent", + "035895f2f481b1b0f01fcf8c289c794660b289981a78f8106447707fdd9666ca06da5a9a565181599b79f53b844d8a71dd9f439c52a3d7b3e8a79c906ac845fa", + ), + ( + "legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth useful legal will", + "f2b94508732bcbacbcc020faefecfc89feafa6649a5491b8c952cede496c214a0c7b3c392d168748f2d4a612bada0753b52a1c7ac53c1e93abd5c6320b9e95dd", + ), + ( + "letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic avoid letter always", + "107d7c02a5aa6f38c58083ff74f04c607c2d2c0ecc55501dadd72d025b751bc27fe913ffb796f841c49b1d33b610cf0e91d3aa239027f5e99fe4ce9e5088cd65", + ), + ( + "zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo when", + "0cd6e5d827bb62eb8fc1e262254223817fd068a74b5b449cc2f667c3f1f985a76379b43348d952e2265b4cd129090758b3e3c2c49103b5051aac2eaeb890a528", + ), + ( + "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art", + "bda85446c68413707090a52022edd26a1c9462295029f2e60cd7c4f2bbd3097170af7a4d73245cafa9c3cca8d561a7c3de6f5d4a10be8ed2a5e608d68f92fcc8", + ), + ( + "legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth title", + "bc09fca1804f7e69da93c2f2028eb238c227f2e9dda30cd63699232578480a4021b146ad717fbb7e451ce9eb835f43620bf5c514db0f8add49f5d121449d3e87", + ), + ( + "letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic bless", + "c0c519bd0e91a2ed54357d9d1ebef6f5af218a153624cf4f2da911a0ed8f7a09e2ef61af0aca007096df430022f7a2b6fb91661a9589097069720d015e4e982f", + ), + ( + "zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo vote", + "dd48c104698c30cfe2b6142103248622fb7bb0ff692eebb00089b32d22484e1613912f0a5b694407be899ffd31ed3992c456cdf60f5d4564b8ba3f05a69890ad", + ), + ( + "ozone drill grab fiber curtain grace pudding thank cruise elder eight picnic", + "274ddc525802f7c828d8ef7ddbcdc5304e87ac3535913611fbbfa986d0c9e5476c91689f9c8a54fd55bd38606aa6a8595ad213d4c9c9f9aca3fb217069a41028", + ), + ( + "gravity machine north sort system female filter attitude volume fold club stay feature office ecology stable narrow fog", + "628c3827a8823298ee685db84f55caa34b5cc195a778e52d45f59bcf75aba68e4d7590e101dc414bc1bbd5737666fbbef35d1f1903953b66624f910feef245ac", + ), + ( + "hamster diagram private dutch cause delay private meat slide toddler razor book happy fancy gospel tennis maple dilemma loan word shrug inflict delay length", + "64c87cde7e12ecf6704ab95bb1408bef047c22db4cc7491c4271d170a1b213d20b385bc1588d9c7b38f1b39d415665b8a9030c9ec653d75e65f847d8fc1fc440", + ), + ( + "scheme spot photo card baby mountain device kick cradle pact join borrow", + "ea725895aaae8d4c1cf682c1bfd2d358d52ed9f0f0591131b559e2724bb234fca05aa9c02c57407e04ee9dc3b454aa63fbff483a8b11de949624b9f1831a9612", + ), + ( + "horn tenant knee talent sponsor spell gate clip pulse soap slush warm silver nephew swap uncle crack brave", + "fd579828af3da1d32544ce4db5c73d53fc8acc4ddb1e3b251a31179cdb71e853c56d2fcb11aed39898ce6c34b10b5382772db8796e52837b54468aeb312cfc3d", + ), + ( + "panda eyebrow bullet gorilla call smoke muffin taste mesh discover soft ostrich alcohol speed nation flash devote level hobby quick inner drive ghost inside", + "72be8e052fc4919d2adf28d5306b5474b0069df35b02303de8c1729c9538dbb6fc2d731d5f832193cd9fb6aeecbc469594a70e3dd50811b5067f3b88b28c3e8d", + ), + ( + "cat swing flag economy stadium alone churn speed unique patch report train", + "deb5f45449e615feff5640f2e49f933ff51895de3b4381832b3139941c57b59205a42480c52175b6efcffaa58a2503887c1e8b363a707256bdd2b587b46541f5", + ), + ( + "light rule cinnamon wrap drastic word pride squirrel upgrade then income fatal apart sustain crack supply proud access", + "4cbdff1ca2db800fd61cae72a57475fdc6bab03e441fd63f96dabd1f183ef5b782925f00105f318309a7e9c3ea6967c7801e46c8a58082674c860a37b93eda02", + ), + ( + "all hour make first leader extend hole alien behind guard gospel lava path output census museum junior mass reopen famous sing advance salt reform", + "26e975ec644423f4a4c4f4215ef09b4bd7ef924e85d1d17c4cf3f136c2863cf6df0a475045652c57eb5fb41513ca2a2d67722b77e954b4b3fc11f7590449191d", + ), + ( + "vessel ladder alter error federal sibling chat ability sun glass valve picture", + "2aaa9242daafcee6aa9d7269f17d4efe271e1b9a529178d7dc139cd18747090bf9d60295d0ce74309a78852a9caadf0af48aae1c6253839624076224374bc63f", + ), + ( + "scissors invite lock maple supreme raw rapid void congress muscle digital elegant little brisk hair mango congress clump", + "7b4a10be9d98e6cba265566db7f136718e1398c71cb581e1b2f464cac1ceedf4f3e274dc270003c670ad8d02c4558b2f8e39edea2775c9e232c7cb798b069e88", + ), + ( + "void come effort suffer camp survey warrior heavy shoot primary clutch crush open amazing screen patrol group space point ten exist slush involve unfold", + "01f5bced59dec48e362f2c45b5de68b9fd6c92c6634f44d6d40aab69056506f0e35524a518034ddc1192e1dacd32c1ed3eaa3c3b131c88ed8e7e54c49a5d0998", + ), + ]; + + for (idx, (m, s)) in mnemonics_and_seeds.into_iter().enumerate() { + let m = Mnemonic::parse(m).expect("mnemonic should be valid"); + let pair1 = Keypair::from_phrase(&m, Some("TREZOR"), DerivationPath::empty()).unwrap(); + let s = hex::decode(s).expect("seed hex should be valid"); + let pair2 = Keypair::from_seed(&s).unwrap(); + + assert_eq!(pair1, pair2, "pair1 and pair2 at index {idx} don't match"); + } + } + + /// Test the same accounts from moonbeam so we know for sure that this implementation is working + /// https://github.com/moonbeam-foundation/moonbeam/blob/e70ee0d427dfee8987d5a5671a66416ee6ec38aa/primitives/account/src/lib.rs#L217 + mod moonbeam_sanity_tests { + use hex_literal::hex; + + use super::*; + + const KEY_1: [u8; 32] = + hex!("502f97299c472b88754accd412b7c9a6062ef3186fba0c0388365e1edec24875"); + const KEY_2: [u8; 32] = + hex!("0f02ba4d7f83e59eaa32eae9c3c4d99b68ce76decade21cdab7ecce8f4aef81a"); + const KEY_3: [u8; 32] = + hex!("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"); + + #[test] + fn test_account_derivation_1() { + let kp = Keypair::from_secret_key(KEY_1).expect("valid keypair"); + assert_eq!( + kp.public_key().to_account_id().checksum(), + "0x976f8456E4e2034179B284A23C0e0c8f6d3da50c" + ); + } + + #[test] + fn test_account_derivation_2() { + let kp = Keypair::from_secret_key(KEY_2).expect("valid keypair"); + assert_eq!( + kp.public_key().to_account_id().checksum(), + "0x420e9F260B40aF7E49440ceAd3069f8e82A5230f" + ); + } + + #[test] + fn test_account_derivation_3() { + let kp = Keypair::from_secret_key(KEY_3).expect("valid keypair"); + assert_eq!( + kp.public_key().to_account_id().checksum(), + "0x9cce34F7aB185c7ABA1b7C8140d620B4BDA941d6" + ); + } + } +} diff --git a/vendor/pezkuwi-subxt/signer/src/lib.rs b/vendor/pezkuwi-subxt/signer/src/lib.rs new file mode 100644 index 00000000..8e09f1b3 --- /dev/null +++ b/vendor/pezkuwi-subxt/signer/src/lib.rs @@ -0,0 +1,55 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! # Subxt-signer +//! +//! The main output from this crate is the [`sr25519::Keypair`], which can +//! be constructed from a bip39 phrase, secret URI or raw seed, and used to +//! sign and verify arbitrary messages. This crate is aligned with how Bizinikiwi's +//! `sp_core` crate constructs and signs keypairs, but is lighter on dependencies +//! and can support compilation to WASM with the `web` feature. +//! +//! Enable the `subxt` feature to enable use of this [`sr25519::Keypair`] in signing +//! subxt transactions for chains supporting sr25519 signatures. + +#![cfg_attr(docsrs, feature(doc_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +#[macro_use] +mod utils; +mod crypto; + +// An sr25519 key pair implementation. +#[cfg(feature = "sr25519")] +#[cfg_attr(docsrs, doc(cfg(feature = "sr25519")))] +pub mod sr25519; + +// An ecdsa key pair implementation. +#[cfg(feature = "ecdsa")] +#[cfg_attr(docsrs, doc(cfg(feature = "ecdsa")))] +pub mod ecdsa; + +// An ethereum signer implementation. +#[cfg(feature = "unstable-eth")] +#[cfg_attr(docsrs, doc(cfg(feature = "unstable-eth")))] +pub mod eth; + +/// A pezkuwi-js account json loader. +#[cfg(feature = "pezkuwi-js-compat")] +#[cfg_attr(docsrs, doc(cfg(feature = "pezkuwi-js-compat")))] +pub mod pezkuwi_js_compat; + +// Re-export useful bits and pieces for generating a Pair from a phrase, +// namely the Mnemonic struct. +pub use bip39; + +// Used to hold strings in a more secure manner in memory for a little extra +// protection. +pub use secrecy::{ExposeSecret, SecretString}; + +// SecretUri's can be parsed from strings and used to generate key pairs. +// DeriveJunctions are the "path" part of these SecretUris. +pub use crypto::{DEV_PHRASE, DeriveJunction, SecretUri, SecretUriError}; diff --git a/vendor/pezkuwi-subxt/signer/src/pezkuwi_js_compat.rs b/vendor/pezkuwi-subxt/signer/src/pezkuwi_js_compat.rs new file mode 100644 index 00000000..70b1a37b --- /dev/null +++ b/vendor/pezkuwi-subxt/signer/src/pezkuwi_js_compat.rs @@ -0,0 +1,192 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! A Pezkuwi-JS account loader. + +use base64::Engine; +use crypto_secretbox::{ + Key, Nonce, XSalsa20Poly1305, + aead::{Aead, KeyInit}, +}; +use pezkuwi_subxt_core::utils::AccountId32; +use serde::Deserialize; + +use thiserror::Error as DeriveError; + +use crate::sr25519; + +/// Given a JSON keypair as exported from Pezkuwi-JS, this returns an [`sr25519::Keypair`] +pub fn decrypt_json(json: &str, password: &str) -> Result { + let pair_json: KeyringPairJson = serde_json::from_str(json)?; + Ok(pair_json.decrypt(password)?) +} + +/// Error +#[derive(Debug, DeriveError)] +pub enum Error { + /// Error decoding JSON. + #[error("Invalid JSON: {0}")] + Json(#[from] serde_json::Error), + /// The keypair has an unsupported encoding. + #[error("Unsupported encoding.")] + UnsupportedEncoding, + /// Base64 decoding error. + #[error("Base64 decoding error: {0}")] + Base64(#[from] base64::DecodeError), + /// Wrong Scrypt parameters + #[error("Unsupported Scrypt parameters: N: {n}, p: {p}, r: {r}")] + UnsupportedScryptParameters { + /// N + n: u32, + /// p + p: u32, + /// r + r: u32, + }, + /// Decryption error. + #[error("Decryption error: {0}")] + Secretbox(#[from] crypto_secretbox::Error), + /// sr25519 keypair error. + #[error(transparent)] + Sr25519(#[from] sr25519::Error), + /// The decrypted keys are not valid. + #[error("The decrypted keys are not valid.")] + InvalidKeys, +} + +#[derive(Deserialize)] +struct EncryptionMetadata { + /// Descriptor for the content + content: Vec, + /// The encoding (in current/latest versions this is always an array) + r#type: Vec, + /// The version of encoding applied + version: String, +} + +/// https://github.com/pezkuwi-js/common/blob/37fa211fdb141d4f6eb32e8f377a4651ed2d9068/packages/keyring/src/types.ts#L67 +#[derive(Deserialize)] +struct KeyringPairJson { + /// The encoded string + encoded: String, + /// The encoding used + encoding: EncryptionMetadata, + /// The ss58 encoded address or the hex-encoded version (the latter is for ETH-compat chains) + address: AccountId32, +} + +// This can be removed once split_array is stabilized. +fn slice_to_u32(slice: &[u8]) -> u32 { + u32::from_le_bytes(slice.try_into().expect("Slice should be 4 bytes.")) +} + +impl KeyringPairJson { + /// Decrypt JSON keypair. + fn decrypt(self, password: &str) -> Result { + // Check encoding. + // https://github.com/pezkuwi-js/common/blob/37fa211fdb141d4f6eb32e8f377a4651ed2d9068/packages/keyring/src/keyring.ts#L166 + if self.encoding.version != "3" || + !self.encoding.content.contains(&"pkcs8".to_owned()) || + !self.encoding.content.contains(&"sr25519".to_owned()) || + !self.encoding.r#type.contains(&"scrypt".to_owned()) || + !self.encoding.r#type.contains(&"xsalsa20-poly1305".to_owned()) + { + return Err(Error::UnsupportedEncoding); + } + + // Decode from Base64. + let decoded = base64::engine::general_purpose::STANDARD.decode(self.encoded)?; + let params: [u8; 68] = decoded[..68].try_into().map_err(|_| Error::UnsupportedEncoding)?; + + // Extract scrypt parameters. + // https://github.com/pezkuwi-js/common/blob/master/packages/util-crypto/src/scrypt/fromU8a.ts + let salt = ¶ms[0..32]; + let n = slice_to_u32(¶ms[32..36]); + let p = slice_to_u32(¶ms[36..40]); + let r = slice_to_u32(¶ms[40..44]); + + // FIXME At this moment we assume these to be fixed params, this is not a great idea + // since we lose flexibility and updates for greater security. However we need some + // protection against carefully-crafted params that can eat up CPU since these are user + // inputs. So we need to get very clever here, but atm we only allow the defaults + // and if no match, bail out. + if n != 32768 || p != 1 || r != 8 { + return Err(Error::UnsupportedScryptParameters { n, p, r }); + } + + // Hash password. + let scrypt_params = + scrypt::Params::new(15, 8, 1, 32).expect("Provided parameters should be valid."); + let mut key = Key::default(); + scrypt::scrypt(password.as_bytes(), salt, &scrypt_params, &mut key) + .expect("Key should be 32 bytes."); + + // Decrypt keys. + // https://github.com/pezkuwi-js/common/blob/master/packages/util-crypto/src/json/decryptData.ts + let cipher = XSalsa20Poly1305::new(&key); + let nonce = Nonce::from_slice(¶ms[44..68]); + let ciphertext = &decoded[68..]; + let plaintext = cipher.decrypt(nonce, ciphertext)?; + + // https://github.com/pezkuwi-js/common/blob/master/packages/keyring/src/pair/decode.ts + if plaintext.len() != 117 { + return Err(Error::InvalidKeys); + } + + let header = &plaintext[0..16]; + let secret_key = &plaintext[16..80]; + let div = &plaintext[80..85]; + let public_key = &plaintext[85..117]; + + if header != [48, 83, 2, 1, 1, 48, 5, 6, 3, 43, 101, 112, 4, 34, 4, 32] || + div != [161, 35, 3, 33, 0] + { + return Err(Error::InvalidKeys); + } + + // Generate keypair. + let keypair = sr25519::Keypair::from_ed25519_bytes(secret_key)?; + + // Ensure keys are correct. + if keypair.public_key().0 != public_key || + keypair.public_key().to_account_id() != self.address + { + return Err(Error::InvalidKeys); + } + + Ok(keypair) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_get_keypair_sr25519() { + let json = r#" + { + "encoded": "DumgApKCTqoCty1OZW/8WS+sgo6RdpHhCwAkA2IoDBMAgAAAAQAAAAgAAAB6IG/q24EeVf0JqWqcBd5m2tKq5BlyY84IQ8oamLn9DZe9Ouhgunr7i36J1XxUnTI801axqL/ym1gil0U8440Qvj0lFVKwGuxq38zuifgoj0B3Yru0CI6QKEvQPU5xxj4MpyxdSxP+2PnTzYao0HDH0fulaGvlAYXfqtU89xrx2/z9z7IjSwS3oDFPXRQ9kAdDebtyCVreZ9Otw9v3", + "encoding": { + "content": [ + "pkcs8", + "sr25519" + ], + "type": [ + "scrypt", + "xsalsa20-poly1305" + ], + "version": "3" + }, + "address": "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + "meta": { + "genesisHash": "", + "name": "Alice", + "whenCreated": 1718265838755 + } + } + "#; + decrypt_json(json, "whoisalice").unwrap(); + } +} diff --git a/vendor/pezkuwi-subxt/signer/src/sr25519.rs b/vendor/pezkuwi-subxt/signer/src/sr25519.rs new file mode 100644 index 00000000..35472d48 --- /dev/null +++ b/vendor/pezkuwi-subxt/signer/src/sr25519.rs @@ -0,0 +1,444 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! An sr25519 keypair implementation. +//! +//! **Note:** This implementation requires the `getrandom` dependency to obtain randomness, +//! and will not compile on targets that it does not support. See the supported `getrandom` +//! targets here: . + +use core::str::FromStr; + +use crate::crypto::{DeriveJunction, SecretUri, seed_from_entropy}; + +use hex::FromHex; +use schnorrkel::{ + ExpansionMode, MiniSecretKey, + derive::{ChainCode, Derivation}, +}; +use secrecy::ExposeSecret; + +use thiserror::Error as DeriveError; + +const SECRET_KEY_LENGTH: usize = schnorrkel::keys::MINI_SECRET_KEY_LENGTH; +const SIGNING_CTX: &[u8] = b"bizinikiwi"; + +/// Seed bytes used to generate a key pair. +pub type SecretKeyBytes = [u8; SECRET_KEY_LENGTH]; + +/// A signature generated by [`Keypair::sign()`]. These bytes are equivalent +/// to a Bizinikiwi `MultiSignature::sr25519(bytes)`. +#[derive(Clone, Copy, PartialEq, Eq)] +pub struct Signature(pub [u8; 64]); + +impl AsRef<[u8]> for Signature { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +/// The public key for an [`Keypair`] key pair. This is equivalent to a +/// Bizinikiwi `AccountId32`. +pub struct PublicKey(pub [u8; 32]); + +impl AsRef<[u8]> for PublicKey { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +/// An sr25519 keypair implementation. While the API is slightly different, the logic for +/// this has been taken from `sp_core::sr25519` and we test against this to ensure conformity. +#[derive(Debug, Clone)] +pub struct Keypair(schnorrkel::Keypair); + +impl Keypair { + /// Create am sr25519 keypair from a [`SecretUri`]. See the [`SecretUri`] docs for more. + /// + /// # Example + /// + /// ```rust,standalone_crate + /// use pezkuwi_subxt_signer::{ SecretUri, sr25519::Keypair }; + /// use std::str::FromStr; + /// + /// let uri = SecretUri::from_str("//Alice").unwrap(); + /// let keypair = Keypair::from_uri(&uri).unwrap(); + /// + /// keypair.sign(b"Hello world!"); + /// ``` + pub fn from_uri(uri: &SecretUri) -> Result { + let SecretUri { junctions, phrase, password } = uri; + + // If the phrase is hex, convert bytes directly into a seed, ignoring password. + // Else, parse the phrase string taking the password into account. This is + // the same approach taken in sp_core::crypto::Pair::from_string_with_seed. + let key = if let Some(hex_str) = phrase.expose_secret().strip_prefix("0x") { + let seed = SecretKeyBytes::from_hex(hex_str)?; + Self::from_secret_key(seed)? + } else { + let phrase = bip39::Mnemonic::from_str(phrase.expose_secret())?; + let pass_str = password.as_ref().map(|p| p.expose_secret()); + Self::from_phrase(&phrase, pass_str)? + }; + + // Now, use any "junctions" to derive a new key from this root key. + Ok(key.derive(junctions.iter().copied())) + } + + /// Create am sr25519 keypair from a BIP-39 mnemonic phrase and optional password. + /// + /// # Example + /// + /// ```rust,standalone_crate + /// use pezkuwi_subxt_signer::{ bip39::Mnemonic, sr25519::Keypair }; + /// + /// let phrase = "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; + /// let mnemonic = Mnemonic::parse(phrase).unwrap(); + /// let keypair = Keypair::from_phrase(&mnemonic, None).unwrap(); + /// + /// keypair.sign(b"Hello world!"); + /// ``` + pub fn from_phrase(mnemonic: &bip39::Mnemonic, password: Option<&str>) -> Result { + let (arr, len) = mnemonic.to_entropy_array(); + let big_seed = + seed_from_entropy(&arr[0..len], password.unwrap_or("")).ok_or(Error::InvalidSeed)?; + + let seed: SecretKeyBytes = + big_seed[..SECRET_KEY_LENGTH].try_into().expect("should be valid Seed"); + + Self::from_secret_key(seed) + } + + /// Turn a 32 byte secret key into a keypair. + /// + /// # Warning + /// + /// This will only be secure if the seed is secure! + pub fn from_secret_key(secret_key_bytes: SecretKeyBytes) -> Result { + let keypair = MiniSecretKey::from_bytes(&secret_key_bytes) + .map_err(|_| Error::InvalidSeed)? + .expand_to_keypair(ExpansionMode::Ed25519); + + Ok(Keypair(keypair)) + } + + /// Construct a keypair from a slice of bytes, corresponding to + /// an Ed25519 expanded secret key. + #[cfg(feature = "pezkuwi-js-compat")] + pub(crate) fn from_ed25519_bytes(bytes: &[u8]) -> Result { + let secret_key = schnorrkel::SecretKey::from_ed25519_bytes(bytes)?; + + Ok(Keypair(schnorrkel::Keypair { public: secret_key.to_public(), secret: secret_key })) + } + + /// Derive a child key from this one given a series of junctions. + /// + /// # Example + /// + /// ```rust,standalone_crate + /// use pezkuwi_subxt_signer::{ bip39::Mnemonic, sr25519::Keypair, DeriveJunction }; + /// + /// let phrase = "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; + /// let mnemonic = Mnemonic::parse(phrase).unwrap(); + /// let keypair = Keypair::from_phrase(&mnemonic, None).unwrap(); + /// + /// // Equivalent to the URI path '//Alice/stash': + /// let new_keypair = keypair.derive([ + /// DeriveJunction::hard("Alice"), + /// DeriveJunction::soft("stash") + /// ]); + /// ``` + pub fn derive>(&self, junctions: Js) -> Self { + let init = self.0.secret.clone(); + let result = junctions.into_iter().fold(init, |acc, j| match j { + DeriveJunction::Soft(cc) => acc.derived_key_simple(ChainCode(cc), []).0, + DeriveJunction::Hard(cc) => { + let seed = acc.hard_derive_mini_secret_key(Some(ChainCode(cc)), b"").0; + seed.expand(ExpansionMode::Ed25519) + }, + }); + Self(result.into()) + } + + /// Obtain the [`PublicKey`] part of this key pair, which can be used in calls to [`verify()`]. + /// or otherwise converted into an address. The public key bytes are equivalent to a Bizinikiwi + /// `AccountId32`. + pub fn public_key(&self) -> PublicKey { + PublicKey(self.0.public.to_bytes()) + } + + /// Sign some message. These bytes can be used directly in a Bizinikiwi + /// `MultiSignature::sr25519(..)`. + pub fn sign(&self, message: &[u8]) -> Signature { + let context = schnorrkel::signing_context(SIGNING_CTX); + let signature = self.0.sign(context.bytes(message)); + Signature(signature.to_bytes()) + } +} + +/// Verify that some signature for a message was created by the owner of the [`PublicKey`]. +/// +/// ```rust,standalone_crate +/// use pezkuwi_subxt_signer::{ bip39::Mnemonic, sr25519 }; +/// +/// let keypair = sr25519::dev::alice(); +/// let message = b"Hello!"; +/// +/// let signature = keypair.sign(message); +/// let public_key = keypair.public_key(); +/// assert!(sr25519::verify(&signature, message, &public_key)); +/// ``` +pub fn verify>(sig: &Signature, message: M, pubkey: &PublicKey) -> bool { + let Ok(signature) = schnorrkel::Signature::from_bytes(&sig.0) else { + return false; + }; + let Ok(public) = schnorrkel::PublicKey::from_bytes(&pubkey.0) else { + return false; + }; + public.verify_simple(SIGNING_CTX, message.as_ref(), &signature).is_ok() +} + +/// An error handed back if creating a keypair fails. +#[derive(Debug, DeriveError)] +pub enum Error { + /// Invalid seed. + #[error("Invalid seed (was it the wrong length?)")] + InvalidSeed, + /// Invalid phrase. + #[error("Cannot parse phrase: {0}")] + Phrase(bip39::Error), + /// Invalid hex. + #[error("Cannot parse hex string: {0}")] + Hex(hex::FromHexError), + /// Signature error. + #[error("Signature error: {0}")] + Signature(schnorrkel::SignatureError), +} + +impl From for Error { + fn from(value: schnorrkel::SignatureError) -> Self { + Error::Signature(value) + } +} + +impl From for Error { + fn from(err: hex::FromHexError) -> Self { + Error::Hex(err) + } +} + +impl From for Error { + fn from(err: bip39::Error) -> Self { + Error::Phrase(err) + } +} + +/// Dev accounts, helpful for testing but not to be used in production, +/// since the secret keys are known. +pub mod dev { + use super::*; + + once_static_cloned! { + /// Equivalent to `{DEV_PHRASE}//Alice`. + pub fn alice() -> Keypair { + Keypair::from_uri(&SecretUri::from_str("//Alice").unwrap()).unwrap() + } + /// Equivalent to `{DEV_PHRASE}//Bob`. + pub fn bob() -> Keypair { + Keypair::from_uri(&SecretUri::from_str("//Bob").unwrap()).unwrap() + } + /// Equivalent to `{DEV_PHRASE}//Charlie`. + pub fn charlie() -> Keypair { + Keypair::from_uri(&SecretUri::from_str("//Charlie").unwrap()).unwrap() + } + /// Equivalent to `{DEV_PHRASE}//Dave`. + pub fn dave() -> Keypair { + Keypair::from_uri(&SecretUri::from_str("//Dave").unwrap()).unwrap() + } + /// Equivalent to `{DEV_PHRASE}//Eve`. + pub fn eve() -> Keypair { + Keypair::from_uri(&SecretUri::from_str("//Eve").unwrap()).unwrap() + } + /// Equivalent to `{DEV_PHRASE}//Ferdie`. + pub fn ferdie() -> Keypair { + Keypair::from_uri(&SecretUri::from_str("//Ferdie").unwrap()).unwrap() + } + /// Equivalent to `{DEV_PHRASE}//One`. + pub fn one() -> Keypair { + Keypair::from_uri(&SecretUri::from_str("//One").unwrap()).unwrap() + } + /// Equivalent to `{DEV_PHRASE}//Two`. + pub fn two() -> Keypair { + Keypair::from_uri(&SecretUri::from_str("//Two").unwrap()).unwrap() + } + } +} + +// Make `Keypair` usable to sign transactions in Subxt. This is optional so that +// `subxt-signer` can be used entirely independently of Subxt. +#[cfg(feature = "subxt")] +#[cfg_attr(docsrs, doc(cfg(feature = "subxt")))] +mod subxt_compat { + use super::*; + + use pezkuwi_subxt_core::{ + Config, + tx::signer::Signer as SignerT, + utils::{AccountId32, MultiAddress, MultiSignature}, + }; + + impl From for MultiSignature { + fn from(value: Signature) -> Self { + MultiSignature::Sr25519(value.0) + } + } + impl From for AccountId32 { + fn from(value: PublicKey) -> Self { + value.to_account_id() + } + } + impl From for MultiAddress { + fn from(value: PublicKey) -> Self { + value.to_address() + } + } + + impl PublicKey { + /// A shortcut to obtain an [`AccountId32`] from a [`PublicKey`]. + /// We often want this type, and using this method avoids any + /// ambiguous type resolution issues. + pub fn to_account_id(self) -> AccountId32 { + AccountId32(self.0) + } + /// A shortcut to obtain a [`MultiAddress`] from a [`PublicKey`]. + /// We often want this type, and using this method avoids any + /// ambiguous type resolution issues. + pub fn to_address(self) -> MultiAddress { + MultiAddress::Id(self.to_account_id()) + } + } + + impl SignerT for Keypair + where + T::AccountId: From, + T::Address: From, + T::Signature: From, + { + fn account_id(&self) -> T::AccountId { + self.public_key().into() + } + + fn sign(&self, signer_payload: &[u8]) -> T::Signature { + self.sign(signer_payload).into() + } + } +} + +#[cfg(test)] +mod test { + use std::str::FromStr; + + use super::*; + + use sp_core::{self, crypto::Pair as _, sr25519::Pair as SpPair}; + + #[test] + fn check_from_phrase_matches() { + for _ in 0..20 { + let (sp_pair, phrase, _seed) = SpPair::generate_with_phrase(None); + let phrase = bip39::Mnemonic::parse(phrase).expect("valid phrase expected"); + let pair = Keypair::from_phrase(&phrase, None).expect("should be valid"); + + assert_eq!(sp_pair.public().0, pair.public_key().0); + } + } + + #[test] + fn check_from_phrase_with_password_matches() { + for _ in 0..20 { + let (sp_pair, phrase, _seed) = SpPair::generate_with_phrase(Some("Testing")); + let phrase = bip39::Mnemonic::parse(phrase).expect("valid phrase expected"); + let pair = Keypair::from_phrase(&phrase, Some("Testing")).expect("should be valid"); + + assert_eq!(sp_pair.public().0, pair.public_key().0); + } + } + + #[test] + fn check_from_secret_uri_matches() { + // Some derive junctions to check that the logic there aligns: + let uri_paths = [ + "/foo", + "//bar", + "/1", + "/0001", + "//1", + "//0001", + "//foo//bar/wibble", + "//foo//001/wibble", + ]; + + for i in 0..2 { + for path in &uri_paths { + // Build an sp_core::Pair that includes a phrase, path and password: + let password = format!("Testing{i}"); + let (_sp_pair, phrase, _seed) = SpPair::generate_with_phrase(Some(&password)); + let uri = format!("{phrase}{path}///{password}"); + let sp_pair = SpPair::from_string(&uri, None).expect("should be valid"); + + // Now build a local Keypair using the equivalent API: + let uri = SecretUri::from_str(&uri).expect("should be valid secret URI"); + let pair = Keypair::from_uri(&uri).expect("should be valid"); + + // They should match: + assert_eq!(sp_pair.public().0, pair.public_key().0); + } + } + } + + #[test] + fn check_dev_accounts_match() { + use sp_keyring::sr25519::Keyring::*; + + assert_eq!(dev::alice().public_key().0, Alice.public().0); + assert_eq!(dev::bob().public_key().0, Bob.public().0); + assert_eq!(dev::charlie().public_key().0, Charlie.public().0); + assert_eq!(dev::dave().public_key().0, Dave.public().0); + assert_eq!(dev::eve().public_key().0, Eve.public().0); + assert_eq!(dev::ferdie().public_key().0, Ferdie.public().0); + assert_eq!(dev::one().public_key().0, One.public().0); + assert_eq!(dev::two().public_key().0, Two.public().0); + } + + #[test] + fn check_signing_and_verifying_matches() { + use sp_core::sr25519::Signature as SpSignature; + + for _ in 0..20 { + let (sp_pair, phrase, _seed) = SpPair::generate_with_phrase(Some("Testing")); + let phrase = bip39::Mnemonic::parse(phrase).expect("valid phrase expected"); + let pair = Keypair::from_phrase(&phrase, Some("Testing")).expect("should be valid"); + + let message = b"Hello world"; + let sp_sig = sp_pair.sign(message).0; + let sig = pair.sign(message).0; + + assert!(SpPair::verify(&SpSignature::from(sig), message, &sp_pair.public())); + assert!(verify(&Signature(sp_sig), message, &pair.public_key())); + } + } + + #[test] + fn check_hex_uris() { + // Hex URIs seem to ignore the password on sp_core and here. Check that this is consistent. + let uri_str = + "0x1122334455667788112233445566778811223344556677881122334455667788///SomePassword"; + + let uri = SecretUri::from_str(uri_str).expect("should be valid"); + let pair = Keypair::from_uri(&uri).expect("should be valid"); + let sp_pair = SpPair::from_string(uri_str, None).expect("should be valid"); + + assert_eq!(pair.public_key().0, sp_pair.public().0); + } +} diff --git a/vendor/pezkuwi-subxt/signer/src/utils.rs b/vendor/pezkuwi-subxt/signer/src/utils.rs new file mode 100644 index 00000000..0f912f5c --- /dev/null +++ b/vendor/pezkuwi-subxt/signer/src/utils.rs @@ -0,0 +1,37 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +#![allow(unused_macros)] + +/// Use like: +/// +/// ```rust,ignore +/// once_static_cloned!{ +/// /// Some documentation. +/// fn foo() -> Vec { +/// vec![1,2,3,4] +/// } +/// } +/// ``` +/// +/// Clones the item out of static storage. Useful if it +/// takes a while to create the item but cloning it is fairly cheap. +macro_rules! once_static_cloned { + ($($(#[$attr:meta])* $vis:vis fn $name:ident() -> $ty:ty { $expr:expr } )+) => { + $( + $(#[$attr])* + #[allow(missing_docs)] + $vis fn $name() -> $ty { + cfg_if::cfg_if! { + if #[cfg(feature = "std")] { + static VAR: std::sync::OnceLock<$ty> = std::sync::OnceLock::new(); + VAR.get_or_init(|| { $expr }).clone() + } else { + { $expr } + } + } + } + )+ + }; +} diff --git a/vendor/pezkuwi-subxt/signer/tests/no-std/.gitignore b/vendor/pezkuwi-subxt/signer/tests/no-std/.gitignore new file mode 100644 index 00000000..ea8c4bf7 --- /dev/null +++ b/vendor/pezkuwi-subxt/signer/tests/no-std/.gitignore @@ -0,0 +1 @@ +/target diff --git a/vendor/pezkuwi-subxt/signer/tests/no-std/Cargo.toml b/vendor/pezkuwi-subxt/signer/tests/no-std/Cargo.toml new file mode 100644 index 00000000..9c284027 --- /dev/null +++ b/vendor/pezkuwi-subxt/signer/tests/no-std/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "nostd-tests" +version = "0.1.0" +edition = "2021" +publish = false + +[dev-dependencies] + +# This crate is not a part of the workspace, to ensure that no features +# are enabled for it at the workspace level; which conflict with this test. +subxt-signer = { path = "../../", default-features = false, features = [ + "ecdsa", + "sr25519", + "unstable-eth", +] } + +# this shouldn't be needed, it's in workspace.exclude, but still +# I get the complaint unless I add it... +[workspace] diff --git a/vendor/pezkuwi-subxt/signer/tests/no-std/tests/no_std.rs b/vendor/pezkuwi-subxt/signer/tests/no-std/tests/no_std.rs new file mode 100644 index 00000000..743f1f6b --- /dev/null +++ b/vendor/pezkuwi-subxt/signer/tests/no-std/tests/no_std.rs @@ -0,0 +1,51 @@ +#![no_std] + +use pezkuwi_subxt_signer::{ecdsa, eth, sr25519}; + +// Run the tests by calling: +// +// ```text +// cargo test +// ``` +// +// These are independent of any other package to ensure that nothing +// else enabled the same feature flag that subxt-signer needs to work ok +// (subxt seems to, for instance). + +#[test] +fn sr25519_signing_works() { + let alice = sr25519::dev::alice(); + + // There's some non-determinism in the signing, so this ensures that + // the rand stuff is configured properly to run ok in wasm. + let signature = alice.sign(b"Hello there"); + assert!(sr25519::verify( + &signature, + b"Hello there", + &alice.public_key() + )); +} + +#[test] +fn ecdsa_signing_works() { + let alice = ecdsa::dev::alice(); + + // There's some non-determinism in the signing, so this ensures that + // the rand stuff is configured properly to run ok in wasm. + let signature = alice.sign(b"Hello there"); + assert!(ecdsa::verify( + &signature, + b"Hello there", + &alice.public_key() + )); +} + +#[test] +fn eth_signing_works() { + let alice = eth::dev::alith(); + + // There's some non-determinism in the signing, so this ensures that + // the rand stuff is configured properly to run ok in wasm. + let signature = alice.sign(b"Hello there"); + assert!(eth::verify(&signature, b"Hello there", &alice.public_key())); +} diff --git a/vendor/pezkuwi-subxt/signer/tests/wasm/.gitignore b/vendor/pezkuwi-subxt/signer/tests/wasm/.gitignore new file mode 100644 index 00000000..ea8c4bf7 --- /dev/null +++ b/vendor/pezkuwi-subxt/signer/tests/wasm/.gitignore @@ -0,0 +1 @@ +/target diff --git a/vendor/pezkuwi-subxt/signer/tests/wasm/Cargo.toml b/vendor/pezkuwi-subxt/signer/tests/wasm/Cargo.toml new file mode 100644 index 00000000..cdb56bc7 --- /dev/null +++ b/vendor/pezkuwi-subxt/signer/tests/wasm/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "wasm-tests" +version = "0.1.0" +edition = "2021" +publish = false + +[dev-dependencies] +console_error_panic_hook = "0.1.7" +tracing-wasm = "0.2.1" +wasm-bindgen-test = "0.3.24" + +# This crate is not a part of the workspace, because we want to +# enable the "web" feature here but don't want it enabled as part +# of workspace builds. Also disable the "subxt" feature here because +# we want to ensure it works in isolation of that. +subxt-signer = { path = "../../", default-features = false, features = [ + "ecdsa", + "sr25519", + "std", + "unstable-eth", + "web", +] } + +# this shouldn't be needed, it's in workspace.exclude, but still +# I get the complaint unless I add it... +[workspace] diff --git a/vendor/pezkuwi-subxt/signer/tests/wasm/tests/wasm.rs b/vendor/pezkuwi-subxt/signer/tests/wasm/tests/wasm.rs new file mode 100644 index 00000000..8e1fa884 --- /dev/null +++ b/vendor/pezkuwi-subxt/signer/tests/wasm/tests/wasm.rs @@ -0,0 +1,54 @@ +#![cfg(target_arch = "wasm32")] + +use pezkuwi_subxt_signer::{ecdsa, eth, sr25519}; +use wasm_bindgen_test::*; + +wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + +// Run the tests by calling: +// +// ```text +// wasm-pack test --firefox --headless +// ``` +// +// These are independent of any other package to ensure that nothing +// else enabled the same feature flag that subxt-signer needs to work ok +// (subxt seems to, for instance). + +#[wasm_bindgen_test] +async fn wasm_sr25519_signing_works() { + let alice = sr25519::dev::alice(); + + // There's some non-determinism in the signing, so this ensures that + // the rand stuff is configured properly to run ok in wasm. + let signature = alice.sign(b"Hello there"); + assert!(sr25519::verify( + &signature, + b"Hello there", + &alice.public_key() + )); +} + +#[wasm_bindgen_test] +async fn wasm_ecdsa_signing_works() { + let alice = ecdsa::dev::alice(); + + // There's some non-determinism in the signing, so this ensures that + // the rand stuff is configured properly to run ok in wasm. + let signature = alice.sign(b"Hello there"); + assert!(ecdsa::verify( + &signature, + b"Hello there", + &alice.public_key() + )); +} + +#[wasm_bindgen_test] +async fn wasm_eth_signing_works() { + let alice = eth::dev::alith(); + + // There's some non-determinism in the signing, so this ensures that + // the rand stuff is configured properly to run ok in wasm. + let signature = alice.sign(b"Hello there"); + assert!(eth::verify(&signature, b"Hello there", &alice.public_key())); +} diff --git a/vendor/pezkuwi-subxt/subxt/Cargo.toml b/vendor/pezkuwi-subxt/subxt/Cargo.toml new file mode 100644 index 00000000..99fffa01 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/Cargo.toml @@ -0,0 +1,166 @@ +[package] +name = "pezkuwi-subxt" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +publish = true + +license.workspace = true +readme = "../README.md" +repository.workspace = true +documentation.workspace = true +homepage.workspace = true +description = "Submit extrinsics (transactions) to a Pezkuwi/Bizinikiwi node via RPC" +keywords = ["bizinikiwi", "blockchain", "pezkuwi"] + +[lints] +workspace = true + +[features] +# For dev and documentation reasons we enable more features than are often desired. +# it's recommended to use `--no-default-features` and then select what you need. +default = ["jsonrpsee", "native"] + +# Enable this for native (ie non web/wasm builds). +# Exactly 1 of "web" and "native" is expected. +native = [ + "pezkuwi-subxt-lightclient?/native", + "pezkuwi-subxt-rpcs/native", + "pezsp-crypto-hashing/std", + "tokio-util", + "tokio?/sync", +] + +# Enable this for web/wasm builds. +# Exactly 1 of "web" and "native" is expected. +web = [ + "pezkuwi-subxt-lightclient?/web", + "pezkuwi-subxt-macro/web", + "pezkuwi-subxt-rpcs/web", + "tokio?/sync", +] + +# Feature flag to enable the default future executor. +# Technically it's a hack enable to both but simplifies the conditional compilation +# and subxt is selecting executor based on the used platform. +# +# For instance `wasm-bindgen-futures` panics if the platform isn't wasm32 and +# similar for tokio that requires a tokio runtime to be initialized. +runtime = ["tokio/rt", "wasm-bindgen-futures"] + +# Enable this to use the reconnecting rpc client +reconnecting-rpc-client = ["pezkuwi-subxt-rpcs/reconnecting-rpc-client"] + +# Enable this to use jsonrpsee, which enables the jsonrpsee RPC client, and +# a couple of util functions which rely on jsonrpsee. +jsonrpsee = [ + "dep:jsonrpsee", + "pezkuwi-subxt-rpcs/jsonrpsee", + "runtime", +] + +# Enable this to fetch and utilize the latest unstable metadata from a node. +# The unstable metadata is subject to breaking changes and the subxt might +# fail to decode the metadata properly. Use this to experiment with the +# latest features exposed by the metadata. +unstable-metadata = [] + +# Activate this to expose the Light Client functionality. +# Note that this feature is experimental and things may break or not work as expected. +unstable-light-client = [ + "pezkuwi-subxt-lightclient", + "pezkuwi-subxt-rpcs/unstable-light-client", +] + +# Activate this to expose the ability to generate metadata from Wasm runtime files. +runtime-wasm-path = ["pezkuwi-subxt-macro/runtime-wasm-path"] + +[dependencies] +async-trait = { workspace = true } +codec = { package = "parity-scale-codec", workspace = true, features = ["derive"] } +derive-where = { workspace = true } +either = { workspace = true } +frame-metadata = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } +pezsp-crypto-hashing = { workspace = true } +scale-bits = { workspace = true, features = ["default"] } +scale-decode = { workspace = true, features = ["default"] } +scale-encode = { workspace = true, features = ["default"] } +scale-info = { workspace = true, features = ["default"] } +scale-value = { workspace = true, features = ["default"] } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true, features = ["default", "raw_value"] } +thiserror = { workspace = true } +tracing = { workspace = true } +web-time = { workspace = true } + +# Provides some deserialization, types like U256/H256 and hashing impls like twox/blake256: +primitive-types = { workspace = true, features = ["codec", "scale-info", "serde"] } + +# Included if the "jsonrpsee" feature is enabled. +jsonrpsee = { workspace = true, optional = true, features = ["jsonrpsee-types"] } + +# Other pezkuwi-subxt crates we depend on. +pezkuwi-subxt-core = { workspace = true, features = ["std"] } +pezkuwi-subxt-lightclient = { workspace = true, optional = true, default-features = false } +pezkuwi-subxt-macro = { workspace = true } +pezkuwi-subxt-metadata = { workspace = true, features = ["std"] } +pezkuwi-subxt-rpcs = { workspace = true, features = ["subxt"] } + +# For parsing urls to disallow insecure schemes +url = { workspace = true } + +# Included if "native" feature is enabled +tokio-util = { workspace = true, features = ["compat"], optional = true } + +# Included if the reconnecting rpc client feature is enabled +# Only the `tokio/sync` is used in the reconnecting rpc client +# and that compiles both for native and web. +tokio = { workspace = true, optional = true } +wasm-bindgen-futures = { workspace = true, optional = true } + +[dev-dependencies] +assert_matches = { workspace = true } +bitvec = { workspace = true } +codec = { workspace = true, features = ["bit-vec", "derive"] } +pezkuwi-subxt-rpcs = { workspace = true, features = ["mock-rpc-client", "subxt"] } +pezkuwi-subxt-signer = { workspace = true, features = ["unstable-eth"] } +pezsp-core = { workspace = true, features = ["std"] } +pezsp-keyring = { workspace = true, features = ["std"] } +pezsp-runtime = { workspace = true, features = ["std"] } +scale-info = { workspace = true, features = ["bit-vec"] } +tokio = { workspace = true, features = ["macros", "rt-multi-thread", "sync", "time"] } +# Tracing subscriber is useful for light-client examples to ensure that +# the `bootNodes` and chain spec are configured correctly. If all is fine, then +# the light-client will emit INFO logs with +# `GrandPa warp sync finished` and `Finalized block runtime ready.` +tracing-subscriber = { workspace = true } +# These deps are needed to test the reconnecting rpc client +http-body = { workspace = true } +hyper = { workspace = true } +jsonrpsee = { workspace = true, features = ["server"] } +tower = { workspace = true } + +[[example]] +name = "light_client_basic" +path = "examples/light_client_basic.rs" +required-features = ["jsonrpsee", "unstable-light-client"] + +[[example]] +name = "light_client_local_node" +path = "examples/light_client_local_node.rs" +required-features = ["jsonrpsee", "native", "unstable-light-client"] + +[[example]] +name = "setup_reconnecting_rpc_client" +path = "examples/setup_reconnecting_rpc_client.rs" +required-features = ["reconnecting-rpc-client"] + +[package.metadata.docs.rs] +features = ["default", "unstable-light-client"] +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.playground] +features = ["default", "unstable-light-client"] diff --git a/vendor/pezkuwi-subxt/subxt/examples/bizinikiwi_compat_signer.rs b/vendor/pezkuwi-subxt/subxt/examples/bizinikiwi_compat_signer.rs new file mode 100644 index 00000000..d42531e0 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/bizinikiwi_compat_signer.rs @@ -0,0 +1,115 @@ +//! This example demonstrates how to use to add a custom signer implementation to `subxt` +//! by using the signer implementation from pezkuwi-sdk. +//! +//! Similar functionality was provided by the `bizinikiwi-compat` feature in the original `subxt` +//! crate. which is now removed. + +#![allow(missing_docs, unused)] + +use sp_core::{Pair as _, sr25519}; +use pezkuwi_subxt::{Config, OnlineClient, PezkuwiConfig, config::bizinikiwi::MultiAddress}; + +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +/// A concrete PairSigner implementation which relies on `sr25519::Pair` for signing +/// and that PezkuwiConfig is the runtime configuration. +mod pair_signer { + use super::*; + use sp_runtime::{ + MultiSignature as SpMultiSignature, + traits::{IdentifyAccount, Verify}, + }; + use pezkuwi_subxt::{ + config::bizinikiwi::{AccountId32, MultiSignature}, + tx::Signer, + }; + + /// A [`Signer`] implementation for [`sp_core::sr25519::Pair`]. + #[derive(Clone)] + pub struct PairSigner { + account_id: ::AccountId, + signer: sr25519::Pair, + } + + impl PairSigner { + /// Creates a new [`Signer`] from an [`sp_core::sr25519::Pair`]. + pub fn new(signer: sr25519::Pair) -> Self { + let account_id = + ::Signer::from(signer.public()).into_account(); + Self { + // Convert `sp_core::AccountId32` to `pezkuwi_subxt::config::bizinikiwi::AccountId32`. + // + // This is necessary because we use `pezkuwi_subxt::config::bizinikiwi::AccountId32` and no + // From/Into impls are provided between `sp_core::AccountId32` because + // `pezkuwi-sdk` isn't a direct dependency in subxt. + // + // This can also be done by provided a wrapper type around + // `pezkuwi_subxt::config::bizinikiwi::AccountId32` to implement such conversions but + // that also most likely requires a custom `Config` with a separate `AccountId` type + // to work properly without additional hacks. + account_id: AccountId32(account_id.into()), + signer, + } + } + + /// Returns the [`sp_core::sr25519::Pair`] implementation used to construct this. + pub fn signer(&self) -> &sr25519::Pair { + &self.signer + } + + /// Return the account ID. + pub fn account_id(&self) -> &AccountId32 { + &self.account_id + } + } + + impl Signer for PairSigner { + fn account_id(&self) -> ::AccountId { + self.account_id.clone() + } + + fn sign(&self, signer_payload: &[u8]) -> ::Signature { + let signature = self.signer.sign(signer_payload); + MultiSignature::Sr25519(signature.0) + } + } +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + tracing_subscriber::fmt::init(); + + // Create a new API client, configured to talk to Pezkuwi nodes. + let api = OnlineClient::::new().await?; + + let signer = { + let acc = sr25519::Pair::from_string("//Alice", None)?; + pair_signer::PairSigner::new(acc) + }; + + let dest = { + let acc = sr25519::Pair::from_string("//Bob", None)?; + MultiAddress::Address32(acc.public().0) + }; + + // Build a balance transfer extrinsic. + let balance_transfer_tx = pezkuwi::tx().balances().transfer_allow_death(dest, 100_000); + + // Submit the balance transfer extrinsic from Alice, and wait for it to be successful + // and in a finalized block. We get back the extrinsic events if all is well. + let events = api + .tx() + .sign_and_submit_then_watch_default(&balance_transfer_tx, &signer) + .await? + .wait_for_finalized_success() + .await?; + + // Find a Transfer event and print it. + let transfer_event = events.find_first::()?; + if let Some(event) = transfer_event { + println!("Balance transfer success: {event:?}"); + } + + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/block_decoding_dynamic.rs b/vendor/pezkuwi-subxt/subxt/examples/block_decoding_dynamic.rs new file mode 100644 index 00000000..e879d590 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/block_decoding_dynamic.rs @@ -0,0 +1,44 @@ +#![allow(missing_docs)] +use pezkuwi_subxt::{OnlineClient, PezkuwiConfig}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a client that subscribes to blocks of the Pezkuwi network. + let api = OnlineClient::::from_url("wss://rpc.pezkuwi.io:443").await?; + + // Subscribe to all finalized blocks: + let mut blocks_sub = api.blocks().subscribe_finalized().await?; + while let Some(block) = blocks_sub.next().await { + let block = block?; + let block_number = block.header().number; + let block_hash = block.hash(); + println!("Block #{block_number} ({block_hash})"); + + // Decode each signed extrinsic in the block dynamically + let extrinsics = block.extrinsics().await?; + for ext in extrinsics.iter() { + let Some(transaction_extensions) = ext.transaction_extensions() else { + continue; // we do not look at inherents in this example + }; + + // Decode the fields into our dynamic Value type to display: + let fields = ext.decode_as_fields::()?; + + println!(" {}/{}", ext.pallet_name(), ext.call_name()); + println!(" Transaction Extensions:"); + for signed_ext in transaction_extensions.iter() { + // We only want to take a look at these 3 signed extensions, because the others all + // just have unit fields. + if ["CheckMortality", "CheckNonce", "ChargeTransactionPayment"] + .contains(&signed_ext.name()) + { + println!(" {}: {}", signed_ext.name(), signed_ext.value()?); + } + } + println!(" Fields:"); + println!(" {fields}\n"); + } + } + + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/block_decoding_static.rs b/vendor/pezkuwi-subxt/subxt/examples/block_decoding_static.rs new file mode 100644 index 00000000..9cae4ba4 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/block_decoding_static.rs @@ -0,0 +1,59 @@ +#![allow(missing_docs)] +use pezkuwi_subxt::{ + OnlineClient, PezkuwiConfig, + utils::{AccountId32, MultiAddress}, +}; + +use codec::Decode; + +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +use pezkuwi::balances::calls::types::TransferKeepAlive; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a client that subscribes to blocks of the Pezkuwi network. + let api = OnlineClient::::from_url("wss://rpc.pezkuwi.io:443").await?; + + // Subscribe to all finalized blocks: + let mut blocks_sub = api.blocks().subscribe_finalized().await?; + + // For each block, print details about the `TransferKeepAlive` transactions we are interested + // in. + while let Some(block) = blocks_sub.next().await { + let block = block?; + let block_number = block.header().number; + let block_hash = block.hash(); + println!("Block #{block_number} ({block_hash}):"); + + let extrinsics = block.extrinsics().await?; + for transfer in extrinsics.find::() { + let transfer = transfer?; + + let Some(extensions) = transfer.details.transaction_extensions() else { + panic!("TransferKeepAlive should be signed") + }; + + let addr_bytes = + transfer.details.address_bytes().expect("TransferKeepAlive should be signed"); + let sender = MultiAddress::::decode(&mut &addr_bytes[..]) + .expect("Decoding should work"); + let sender = display_address(&sender); + let receiver = display_address(&transfer.value.dest); + let value = transfer.value.value; + let tip = extensions.tip().expect("Should have tip"); + let nonce = extensions.nonce().expect("Should have nonce"); + + println!( + " Transfer of {value} DOT:\n {sender} (Tip: {tip}, Nonce: {nonce}) ---> {receiver}", + ); + } + } + + Ok(()) +} + +fn display_address(addr: &MultiAddress) -> String { + if let MultiAddress::Id(id32) = addr { format!("{id32}") } else { "MultiAddress::...".into() } +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/blocks_subscribing.rs b/vendor/pezkuwi-subxt/subxt/examples/blocks_subscribing.rs new file mode 100644 index 00000000..5bca0b0d --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/blocks_subscribing.rs @@ -0,0 +1,63 @@ +#![allow(missing_docs)] +use pezkuwi_subxt::{OnlineClient, PezkuwiConfig}; + +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a client to use: + let api = OnlineClient::::new().await?; + + // Subscribe to all finalized blocks: + let mut blocks_sub = api.blocks().subscribe_finalized().await?; + + // For each block, print a bunch of information about it: + while let Some(block) = blocks_sub.next().await { + let block = block?; + + let block_number = block.header().number; + let block_hash = block.hash(); + + println!("Block #{block_number}:"); + println!(" Hash: {block_hash}"); + println!(" Extrinsics:"); + + // Log each of the extrinsic with it's associated events: + let extrinsics = block.extrinsics().await?; + for ext in extrinsics.iter() { + let idx = ext.index(); + let events = ext.events().await?; + let bytes_hex = format!("0x{}", hex::encode(ext.bytes())); + + // See the API docs for more ways to decode extrinsics: + let decoded_ext = ext.as_root_extrinsic::(); + + println!(" Extrinsic #{idx}:"); + println!(" Bytes: {bytes_hex}"); + println!(" Decoded: {decoded_ext:?}"); + + println!(" Events:"); + for evt in events.iter() { + let evt = evt?; + let pallet_name = evt.pallet_name(); + let event_name = evt.variant_name(); + let event_values = evt.decode_as_fields::()?; + + println!(" {pallet_name}_{event_name}"); + println!(" {event_values}"); + } + + println!(" Transaction Extensions:"); + if let Some(transaction_extensions) = ext.transaction_extensions() { + for transaction_extension in transaction_extensions.iter() { + let name = transaction_extension.name(); + let value = transaction_extension.value()?.to_string(); + println!(" {name}: {value}"); + } + } + } + } + + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/constants_dynamic.rs b/vendor/pezkuwi-subxt/subxt/examples/constants_dynamic.rs new file mode 100644 index 00000000..84275267 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/constants_dynamic.rs @@ -0,0 +1,25 @@ +#![allow(missing_docs)] +use pezkuwi_subxt::{OnlineClient, PezkuwiConfig, dynamic::Value}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a client to use: + let api = OnlineClient::::new().await?; + + // We can query a constant by providing a tuple of the pallet and constant name. The return type + // will be `Value` if we pass this query: + let constant_query = ("System", "BlockLength"); + let _value = api.constants().at(&constant_query)?; + + // Or we can use the library function to query a constant, which allows us to pass a generic + // type that Subxt will attempt to decode the constant into: + let constant_query = pezkuwi_subxt::dynamic::constant::("System", "BlockLength"); + let value = api.constants().at(&constant_query)?; + + // Or we can obtain the bytes for the constant, using either form of query. + let bytes = api.constants().bytes_at(&constant_query)?; + + println!("Constant bytes: {:?}", bytes); + println!("Constant value: {}", value); + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/constants_static.rs b/vendor/pezkuwi-subxt/subxt/examples/constants_static.rs new file mode 100644 index 00000000..f16017a0 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/constants_static.rs @@ -0,0 +1,24 @@ +#![allow(missing_docs)] +use pezkuwi_subxt::{OnlineClient, PezkuwiConfig}; + +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a client to use: + let api = OnlineClient::::new().await?; + + // A query to obtain some constant: + let constant_query = pezkuwi::constants().system().block_length(); + + // Obtain the value: + let value = api.constants().at(&constant_query)?; + + // Or obtain the bytes: + let bytes = api.constants().bytes_at(&constant_query)?; + + println!("Encoded block length: {bytes:?}"); + println!("Block length: {value:?}"); + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/events.rs b/vendor/pezkuwi-subxt/subxt/examples/events.rs new file mode 100644 index 00000000..297bd42e --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/events.rs @@ -0,0 +1,48 @@ +#![allow(missing_docs)] +use pezkuwi_subxt::{OnlineClient, PezkuwiConfig}; + +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a client to use: + let api = OnlineClient::::new().await?; + + // Get events for the latest block: + let events = api.events().at_latest().await?; + + // We can dynamically decode events: + println!("Dynamic event details:"); + for event in events.iter() { + let event = event?; + + let pallet = event.pallet_name(); + let variant = event.variant_name(); + let field_values = event.decode_as_fields::()?; + + println!("{pallet}::{variant}: {field_values}"); + } + + // Or we can attempt to statically decode them into the root Event type: + println!("Static event details:"); + for event in events.iter() { + let event = event?; + + if let Ok(ev) = event.as_root_event::() { + println!("{ev:?}"); + } else { + println!(""); + } + } + + // Or we can look for specific events which match our statically defined ones: + let transfer_event = events.find_first::()?; + if let Some(ev) = transfer_event { + println!(" - Balance transfer success: value: {:?}", ev.amount); + } else { + println!(" - No balance transfer event found in this block"); + } + + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/light_client_basic.rs b/vendor/pezkuwi-subxt/subxt/examples/light_client_basic.rs new file mode 100644 index 00000000..6ca7d69e --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/light_client_basic.rs @@ -0,0 +1,47 @@ +#![allow(missing_docs)] +use futures::StreamExt; +use pezkuwi_subxt::{PezkuwiConfig, client::OnlineClient, lightclient::LightClient}; + +// Generate an interface that we can use from the node's metadata. +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +const POLKADOT_SPEC: &str = include_str!("../../artifacts/demo_chain_specs/pezkuwi.json"); +const ASSET_HUB_SPEC: &str = + include_str!("../../artifacts/demo_chain_specs/pezkuwi_asset_hub.json"); + +#[tokio::main] +async fn main() -> Result<(), Box> { + // The lightclient logs are informative: + tracing_subscriber::fmt::init(); + + // Instantiate a light client with the Pezkuwi relay chain, + // and connect it to Asset Hub, too. + let (lightclient, pezkuwi_rpc) = LightClient::relay_chain(POLKADOT_SPEC)?; + let asset_hub_rpc = lightclient.parachain(ASSET_HUB_SPEC)?; + + // Create Subxt clients from these Smoldot backed RPC clients. + let pezkuwi_api = OnlineClient::::from_rpc_client(pezkuwi_rpc).await?; + let asset_hub_api = OnlineClient::::from_rpc_client(asset_hub_rpc).await?; + + // Use them! + let pezkuwi_sub = pezkuwi_api + .blocks() + .subscribe_finalized() + .await? + .map(|block| ("Pezkuwi", block)); + let parachain_sub = asset_hub_api + .blocks() + .subscribe_finalized() + .await? + .map(|block| ("AssetHub", block)); + + let mut stream_combinator = futures::stream::select(pezkuwi_sub, parachain_sub); + + while let Some((chain, block)) = stream_combinator.next().await { + let block = block?; + println!(" Chain {:?} hash={:?}", chain, block.hash()); + } + + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/light_client_local_node.rs b/vendor/pezkuwi-subxt/subxt/examples/light_client_local_node.rs new file mode 100644 index 00000000..7b876fe0 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/light_client_local_node.rs @@ -0,0 +1,58 @@ +#![allow(missing_docs)] +use pezkuwi_subxt_signer::sr25519::dev; +use pezkuwi_subxt::{ + PezkuwiConfig, + client::OnlineClient, + lightclient::{ChainConfig, LightClient}, + utils::fetch_chainspec_from_rpc_node, +}; + +// Generate an interface that we can use from the node's metadata. +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // The smoldot logs are informative: + tracing_subscriber::fmt::init(); + + // Use a utility function to obtain a chain spec from a locally running node: + let chain_spec = fetch_chainspec_from_rpc_node("ws://127.0.0.1:9944").await?; + + // Configure the bootnodes of this chain spec. In this case, because we start one + // single node, the bootnodes must be overwritten for the light client to connect + // to the local node. + // + // The `12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp` is the P2P address + // from a local pezkuwi node starting with + // `--node-key 0000000000000000000000000000000000000000000000000000000000000001` + let chain_config = ChainConfig::chain_spec(chain_spec.get()).set_bootnodes([ + "/ip4/127.0.0.1/tcp/30333/p2p/12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp", + ])?; + + // Start the light client up, establishing a connection to the local node. + let (_light_client, chain_rpc) = LightClient::relay_chain(chain_config)?; + let api = OnlineClient::::from_rpc_client(chain_rpc).await?; + + // Build a balance transfer extrinsic. + let dest = dev::bob().public_key().into(); + let balance_transfer_tx = pezkuwi::tx().balances().transfer_allow_death(dest, 10_000); + + // Submit the balance transfer extrinsic from Alice, and wait for it to be successful + // and in a finalized block. We get back the extrinsic events if all is well. + let from = dev::alice(); + let events = api + .tx() + .sign_and_submit_then_watch_default(&balance_transfer_tx, &from) + .await? + .wait_for_finalized_success() + .await?; + + // Find a Transfer event and print it. + let transfer_event = events.find_first::()?; + if let Some(event) = transfer_event { + println!("Balance transfer success: {event:?}"); + } + + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/rpc_legacy.rs b/vendor/pezkuwi-subxt/subxt/examples/rpc_legacy.rs new file mode 100644 index 00000000..38659082 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/rpc_legacy.rs @@ -0,0 +1,61 @@ +#![allow(missing_docs)] +use pezkuwi_subxt_signer::sr25519::dev; +use pezkuwi_subxt::{ + OnlineClient, PezkuwiConfig, + backend::{legacy::LegacyRpcMethods, rpc::RpcClient}, + config::DefaultExtrinsicParamsBuilder as Params, +}; + +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // First, create a raw RPC client: + let rpc_client = RpcClient::from_url("ws://127.0.0.1:9944").await?; + + // Use this to construct our RPC methods: + let rpc = LegacyRpcMethods::::new(rpc_client.clone()); + + // We can use the same client to drive our full Subxt interface too: + let api = OnlineClient::::from_rpc_client(rpc_client.clone()).await?; + + // Now, we can make some RPC calls using some legacy RPC methods. + println!( + "📛 System Name: {:?}\n🩺 Health: {:?}\n🖫 Properties: {:?}\n🔗 Chain: {:?}\n", + rpc.system_name().await?, + rpc.system_health().await?, + rpc.system_properties().await?, + rpc.system_chain().await? + ); + + // We can also interleave RPC calls and using the full Subxt client, here to submit multiple + // transactions using the legacy `system_account_next_index` RPC call, which returns a nonce + // that is adjusted for any transactions already in the pool: + + let alice = dev::alice(); + let bob = dev::bob(); + + loop { + let current_nonce = rpc.system_account_next_index(&alice.public_key().into()).await?; + + let ext_params = Params::new().mortal(8).nonce(current_nonce).build(); + + let balance_transfer = pezkuwi::tx() + .balances() + .transfer_allow_death(bob.public_key().into(), 1_000_000); + + let ext_hash = api + .tx() + .create_partial_offline(&balance_transfer, ext_params)? + .sign(&alice) + .submit() + .await?; + + println!("Submitted ext {ext_hash} with nonce {current_nonce}"); + + // Sleep less than block time, but long enough to ensure + // not all transactions end up in the same block. + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + } +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/runtime_apis_dynamic.rs b/vendor/pezkuwi-subxt/subxt/examples/runtime_apis_dynamic.rs new file mode 100644 index 00000000..e2ab296b --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/runtime_apis_dynamic.rs @@ -0,0 +1,24 @@ +#![allow(missing_docs)] +use pezkuwi_subxt_signer::sr25519::dev; +use pezkuwi_subxt::{OnlineClient, config::PezkuwiConfig, utils::AccountId32}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a client to use: + let api = OnlineClient::::new().await?; + + // Create a "dynamic" runtime API payload that calls the + // `AccountNonceApi_account_nonce` function. We could use the + // `scale_value::Value` type as output, and a vec of those as inputs, + // but since we know the input + return types we can pass them directly. + // There is one input argument, so the inputs are a tuple of one element. + let account: AccountId32 = dev::alice().public_key().into(); + let runtime_api_call = + pezkuwi_subxt::dynamic::runtime_api_call::<_, u64>("AccountNonceApi", "account_nonce", (account,)); + + // Submit the call to get back a result. + let nonce = api.runtime_api().at_latest().await?.call(runtime_api_call).await?; + + println!("Account nonce: {:#?}", nonce); + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/runtime_apis_raw.rs b/vendor/pezkuwi-subxt/subxt/examples/runtime_apis_raw.rs new file mode 100644 index 00000000..b4f6ec74 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/runtime_apis_raw.rs @@ -0,0 +1,27 @@ +#![allow(missing_docs)] +use pezkuwi_subxt::{ + OnlineClient, PezkuwiConfig, + ext::{ + codec::{Compact, Decode}, + frame_metadata::RuntimeMetadataPrefixed, + }, +}; + +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a client to use: + let api = OnlineClient::::new().await?; + + // Use runtime APIs at the latest block: + let runtime_apis = api.runtime_api().at_latest().await?; + + // Ask for metadata and decode it: + let result_bytes = runtime_apis.call_raw("Metadata_metadata", None).await?; + let (_, meta): (Compact, RuntimeMetadataPrefixed) = Decode::decode(&mut &*result_bytes)?; + + println!("{meta:?}"); + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/runtime_apis_static.rs b/vendor/pezkuwi-subxt/subxt/examples/runtime_apis_static.rs new file mode 100644 index 00000000..97c21efd --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/runtime_apis_static.rs @@ -0,0 +1,23 @@ +#![allow(missing_docs)] +use pezkuwi_subxt_signer::sr25519::dev; +use pezkuwi_subxt::{OnlineClient, config::PezkuwiConfig}; + +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a client to use: + let api = OnlineClient::::new().await?; + + // Create a runtime API payload that calls into + // `AccountNonceApi_account_nonce` function. + let account = dev::alice().public_key().into(); + let runtime_api_call = pezkuwi::apis().account_nonce_api().account_nonce(account); + + // Submit the call and get back a result. + let nonce = api.runtime_api().at_latest().await?.call(runtime_api_call).await; + + println!("AccountNonceApi_account_nonce for Alice: {nonce:?}"); + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/setup_client_custom_rpc.rs b/vendor/pezkuwi-subxt/subxt/examples/setup_client_custom_rpc.rs new file mode 100644 index 00000000..8438241e --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/setup_client_custom_rpc.rs @@ -0,0 +1,81 @@ +#![allow(missing_docs)] +use std::{ + fmt::Write, + pin::Pin, + sync::{Arc, Mutex}, +}; +use pezkuwi_subxt::{ + OnlineClient, PezkuwiConfig, + backend::rpc::{RawRpcFuture, RawRpcSubscription, RawValue, RpcClient, RpcClientT}, +}; + +// A dummy RPC client that doesn't actually handle requests properly +// at all, but instead just logs what requests to it were made. +struct MyLoggingClient { + log: Arc>, +} + +// We have to implement this fairly low level trait to turn [`MyLoggingClient`] +// into an RPC client that we can make use of in Subxt. Here we just log the requests +// made but don't forward them to any real node, and instead just return nonsense. +impl RpcClientT for MyLoggingClient { + fn request_raw<'a>( + &'a self, + method: &'a str, + params: Option>, + ) -> RawRpcFuture<'a, Box> { + writeln!( + self.log.lock().unwrap(), + "{method}({})", + params.as_ref().map(|p| p.get()).unwrap_or("[]") + ) + .unwrap(); + + // We've logged the request; just return garbage. Because a boxed future is returned, + // you're able to run whatever async code you'd need to actually talk to a node. + let res = RawValue::from_string("[]".to_string()).unwrap(); + Box::pin(std::future::ready(Ok(res))) + } + + fn subscribe_raw<'a>( + &'a self, + sub: &'a str, + params: Option>, + unsub: &'a str, + ) -> RawRpcFuture<'a, RawRpcSubscription> { + writeln!( + self.log.lock().unwrap(), + "{sub}({}) (unsub: {unsub})", + params.as_ref().map(|p| p.get()).unwrap_or("[]") + ) + .unwrap(); + + // We've logged the request; just return garbage. Because a boxed future is returned, + // and that will return a boxed Stream impl, you have a bunch of flexibility to build + // and return whatever type of Stream you see fit. + let res = RawValue::from_string("[]".to_string()).unwrap(); + let stream = futures::stream::once(async move { Ok(res) }); + let stream: Pin + Send>> = Box::pin(stream); + // This subscription does not provide an ID. + Box::pin(std::future::ready(Ok(RawRpcSubscription { stream, id: None }))) + } +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Instantiate our replacement RPC client. + let log = Arc::default(); + let rpc_client = { + let inner = MyLoggingClient { log: Arc::clone(&log) }; + RpcClient::new(inner) + }; + + // Pass this into our OnlineClient to instantiate it. This will lead to some + // RPC calls being made to fetch chain details/metadata, which will immediately + // fail.. + let _ = OnlineClient::::from_rpc_client(rpc_client).await; + + // But, we can see that the calls were made via our custom RPC client: + println!("Log of calls made:\n\n{}", log.lock().unwrap().as_str()); + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/setup_client_offline.rs b/vendor/pezkuwi-subxt/subxt/examples/setup_client_offline.rs new file mode 100644 index 00000000..1cff0981 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/setup_client_offline.rs @@ -0,0 +1,32 @@ +#![allow(missing_docs)] +use pezkuwi_subxt::{ + OfflineClient, config::PezkuwiConfig, ext::codec::Decode, metadata::Metadata, utils::H256, +}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // We need to obtain the following details for an OfflineClient to be instantiated: + + // 1. Genesis hash (RPC call: chain_getBlockHash(0)): + let genesis_hash = { + let h = "91b171bb158e2d3848fa23a9f1c25182fb8e20313b2c1eb49219da7a70ce90c3"; + let bytes = hex::decode(h).unwrap(); + H256::from_slice(&bytes) + }; + + // 2. A runtime version (system_version constant on a Bizinikiwi node has these): + let runtime_version = + pezkuwi_subxt::client::RuntimeVersion { spec_version: 9370, transaction_version: 20 }; + + // 3. Metadata (I'll load it from the downloaded metadata, but you can use `subxt metadata > + // file.scale` to download it): + let metadata = { + let bytes = std::fs::read("./artifacts/pezkuwi_metadata_small.scale").unwrap(); + Metadata::decode(&mut &*bytes).unwrap() + }; + + // Create an offline client using the details obtained above: + let _api = OfflineClient::::new(genesis_hash, runtime_version, metadata); + + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/setup_config_assethub.rs b/vendor/pezkuwi-subxt/subxt/examples/setup_config_assethub.rs new file mode 100644 index 00000000..7e2d9ef7 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/setup_config_assethub.rs @@ -0,0 +1,53 @@ +#![allow(missing_docs)] +use pezkuwi_subxt_signer::sr25519::dev; +use pezkuwi_subxt::config::{ + Config, DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder, PezkuwiConfig, BizinikiwConfig, +}; + +#[pezkuwi_subxt::subxt( + runtime_metadata_path = "../artifacts/pezkuwi_metadata_full.scale", + derive_for_type( + path = "staging_xcm::v3::multilocation::MultiLocation", + derive = "Clone, codec::Encode", + recursive + ) +)] +pub mod runtime {} +use runtime::runtime_types::{ + staging_xcm::v3::multilocation::MultiLocation, xcm::v3::junctions::Junctions, +}; + +// We don't need to construct this at runtime, so an empty enum is appropriate. +pub enum AssetHubConfig {} + +impl Config for AssetHubConfig { + type AccountId = ::AccountId; + type Address = ::Address; + type Signature = ::Signature; + type Hasher = ::Hasher; + type Header = ::Header; + type ExtrinsicParams = DefaultExtrinsicParams; + // Here we use the MultiLocation from the metadata as a part of the config: + // The `ChargeAssetTxPayment` signed extension that is part of the ExtrinsicParams above, now + // uses the type: + type AssetId = MultiLocation; +} + +#[tokio::main] +async fn main() { + // With the config defined, we can create an extrinsic with subxt: + let client = pezkuwi_subxt::OnlineClient::::new().await.unwrap(); + let tx_payload = runtime::tx().system().remark(b"Hello".to_vec()); + + // Build extrinsic params using an asset at this location as a tip: + let location: MultiLocation = MultiLocation { parents: 3, interior: Junctions::Here }; + let tx_config = DefaultExtrinsicParamsBuilder::::new() + .tip_of(1234, location) + .build(); + + // And provide the extrinsic params including the tip when submitting a transaction: + let _ = client + .tx() + .sign_and_submit_then_watch(&tx_payload, &dev::alice(), tx_config) + .await; +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/setup_config_custom.rs b/vendor/pezkuwi-subxt/subxt/examples/setup_config_custom.rs new file mode 100644 index 00000000..dd856e76 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/setup_config_custom.rs @@ -0,0 +1,95 @@ +#![allow(missing_docs)] +use codec::Encode; +use pezkuwi_subxt_signer::sr25519::dev; +use pezkuwi_subxt::{ + client::ClientState, + config::{ + Config, ExtrinsicParams, ExtrinsicParamsEncoder, ExtrinsicParamsError, HashFor, + transaction_extensions::Params, + }, +}; + +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_full.scale")] +pub mod runtime {} + +// We don't need to construct this at runtime, +// so an empty enum is appropriate: +pub enum CustomConfig {} + +impl Config for CustomConfig { + type AccountId = pezkuwi_subxt::utils::AccountId32; + type Address = pezkuwi_subxt::utils::MultiAddress; + type Signature = pezkuwi_subxt::utils::MultiSignature; + type Hasher = pezkuwi_subxt::config::bizinikiwi::BlakeTwo256; + type Header = pezkuwi_subxt::config::bizinikiwi::BizinikiwiHeader; + type ExtrinsicParams = CustomExtrinsicParams; + type AssetId = u32; +} + +// This represents some arbitrary (and nonsensical) custom parameters that +// will be attached to transaction extra and additional payloads: +pub struct CustomExtrinsicParams { + genesis_hash: HashFor, + tip: u128, + foo: bool, +} + +// We can provide a "pretty" interface to allow users to provide these: +#[derive(Default)] +pub struct CustomExtrinsicParamsBuilder { + tip: u128, + foo: bool, +} + +impl CustomExtrinsicParamsBuilder { + pub fn new() -> Self { + Default::default() + } + pub fn tip(mut self, value: u128) -> Self { + self.tip = value; + self + } + pub fn enable_foo(mut self) -> Self { + self.foo = true; + self + } +} + +impl Params for CustomExtrinsicParamsBuilder {} + +// Describe how to fetch and then encode the params: +impl ExtrinsicParams for CustomExtrinsicParams { + type Params = CustomExtrinsicParamsBuilder; + + // Gather together all of the params we will need to encode: + fn new(client: &ClientState, params: Self::Params) -> Result { + Ok(Self { genesis_hash: client.genesis_hash, tip: params.tip, foo: params.foo }) + } +} + +// Encode the relevant params when asked: +impl ExtrinsicParamsEncoder for CustomExtrinsicParams { + fn encode_value_to(&self, v: &mut Vec) { + (self.tip, self.foo).encode_to(v); + } + fn encode_implicit_to(&self, v: &mut Vec) { + self.genesis_hash.encode_to(v) + } +} + +#[tokio::main] +async fn main() { + // With the config defined, it can be handed to Subxt as follows: + let client = pezkuwi_subxt::OnlineClient::::new().await.unwrap(); + + let tx_payload = runtime::tx().system().remark(b"Hello".to_vec()); + + // Build your custom "Params": + let tx_config = CustomExtrinsicParamsBuilder::new().tip(1234).enable_foo(); + + // And provide them when submitting a transaction: + let _ = client + .tx() + .sign_and_submit_then_watch(&tx_payload, &dev::alice(), tx_config) + .await; +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/setup_config_transaction_extension.rs b/vendor/pezkuwi-subxt/subxt/examples/setup_config_transaction_extension.rs new file mode 100644 index 00000000..3b986729 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/setup_config_transaction_extension.rs @@ -0,0 +1,107 @@ +#![allow(missing_docs)] +use codec::Encode; +use pezkuwi_subxt_signer::sr25519::dev; +use scale_encode::EncodeAsType; +use scale_info::PortableRegistry; +use pezkuwi_subxt::{ + client::ClientState, + config::{ + Config, DefaultExtrinsicParamsBuilder, ExtrinsicParams, ExtrinsicParamsEncoder, + ExtrinsicParamsError, transaction_extensions, + }, +}; + +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod runtime {} + +// We don't need to construct this at runtime, +// so an empty enum is appropriate: +#[derive(EncodeAsType)] +pub enum CustomConfig {} + +impl Config for CustomConfig { + type AccountId = pezkuwi_subxt::utils::AccountId32; + type Address = pezkuwi_subxt::utils::MultiAddress; + type Signature = pezkuwi_subxt::utils::MultiSignature; + type Hasher = pezkuwi_subxt::config::bizinikiwi::BlakeTwo256; + type Header = pezkuwi_subxt::config::bizinikiwi::BizinikiwiHeader; + type ExtrinsicParams = transaction_extensions::AnyOf< + Self, + ( + // Load in the existing signed extensions we're interested in + // (if the extension isn't actually needed it'll just be ignored): + transaction_extensions::VerifySignature, + transaction_extensions::CheckSpecVersion, + transaction_extensions::CheckTxVersion, + transaction_extensions::CheckNonce, + transaction_extensions::CheckGenesis, + transaction_extensions::CheckMortality, + transaction_extensions::ChargeAssetTxPayment, + transaction_extensions::ChargeTransactionPayment, + transaction_extensions::CheckMetadataHash, + // And add a new one of our own: + CustomTransactionExtension, + ), + >; + type AssetId = u32; +} + +// Our custom signed extension doesn't do much: +pub struct CustomTransactionExtension; + +// Give the extension a name; this allows `AnyOf` to look it +// up in the chain metadata in order to know when and if to use it. +impl transaction_extensions::TransactionExtension for CustomTransactionExtension { + type Decoded = (); + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "CustomTransactionExtension" + } +} + +// Gather together any params we need for our signed extension, here none. +impl ExtrinsicParams for CustomTransactionExtension { + type Params = (); + + fn new(_client: &ClientState, _params: Self::Params) -> Result { + Ok(CustomTransactionExtension) + } +} + +// Encode whatever the extension needs to provide when asked: +impl ExtrinsicParamsEncoder for CustomTransactionExtension { + fn encode_value_to(&self, v: &mut Vec) { + "Hello".encode_to(v); + } + fn encode_implicit_to(&self, v: &mut Vec) { + true.encode_to(v) + } +} + +// When composing a tuple of signed extensions, the user parameters we need must +// be able to convert `Into` a tuple of corresponding `Params`. Here, we just +// "hijack" the default param builder, but add the `Params` (`()`) for our +// new signed extension at the end, to make the types line up. IN reality you may wish +// to construct an entirely new interface to provide the relevant `Params`. +pub fn custom( + params: DefaultExtrinsicParamsBuilder, +) -> <::ExtrinsicParams as ExtrinsicParams>::Params { + let (a, b, c, d, e, f, g, h, i) = params.build(); + (a, b, c, d, e, f, g, h, i, ()) +} + +#[tokio::main] +async fn main() { + // With the config defined, it can be handed to Subxt as follows: + let client = pezkuwi_subxt::OnlineClient::::new().await.unwrap(); + + let tx_payload = runtime::tx().system().remark(b"Hello".to_vec()); + + // Configure the tx params: + let tx_config = DefaultExtrinsicParamsBuilder::new().tip(1234); + + // And provide them when submitting a transaction: + let _ = client + .tx() + .sign_and_submit_then_watch(&tx_payload, &dev::alice(), custom(tx_config)) + .await; +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/setup_reconnecting_rpc_client.rs b/vendor/pezkuwi-subxt/subxt/examples/setup_reconnecting_rpc_client.rs new file mode 100644 index 00000000..7f88c3f2 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/setup_reconnecting_rpc_client.rs @@ -0,0 +1,79 @@ +//! Example to utilize the `reconnecting rpc client` in subxt +//! which hidden behind behind `--feature reconnecting-rpc-client` +//! +//! To utilize full logs from the RPC client use: +//! `RUST_LOG="jsonrpsee=trace,subxt-reconnecting-rpc-client=trace"` + +#![allow(missing_docs)] + +use std::time::Duration; + +use futures::StreamExt; +use pezkuwi_subxt::{ + OnlineClient, PezkuwiConfig, + backend::rpc::reconnecting_rpc_client::{ExponentialBackoff, RpcClient}, +}; + +// Generate an interface that we can use from the node's metadata. +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + tracing_subscriber::fmt::init(); + + // Create a new client with a reconnecting RPC client. + let rpc = RpcClient::builder() + // Reconnect with exponential backoff + // + // This API is "iterator-like" and we use `take` to limit the number of retries. + .retry_policy( + ExponentialBackoff::from_millis(100).max_delay(Duration::from_secs(10)).take(3), + ) + // There are other configurations as well that can be found at + // [`reconnecting_rpc_client::ClientBuilder`]. + .build("ws://localhost:9944".to_string()) + .await?; + + // If you want to use the chainhead backend with the reconnecting RPC client, you can do so like + // this: + // + // ``` + // use pezkuwi_subxt::backend::chain_head:ChainHeadBackend; + // use pezkuwi_subxt::OnlineClient; + // + // let backend = ChainHeadBackend::builder().build_with_background_task(RpcClient::new(rpc.clone())); + // let api: OnlineClient = OnlineClient::from_backend(Arc::new(backend)).await?; + // ``` + + let api: OnlineClient = OnlineClient::from_rpc_client(rpc.clone()).await?; + + // Run for at most 100 blocks and print a bunch of information about it. + // + // The subscription is automatically re-started when the RPC client has reconnected. + // You can test that by stopping the pezkuwi node and restarting it. + let mut blocks_sub = api.blocks().subscribe_finalized().await?.take(100); + + while let Some(block) = blocks_sub.next().await { + let block = match block { + Ok(b) => b, + Err(e) => { + // This can only happen on the legacy backend and the unstable backend + // will handle this internally. + if e.is_disconnected_will_reconnect() { + println!("The RPC connection was lost and we may have missed a few blocks"); + continue; + } + + return Err(e.into()); + }, + }; + + let block_number = block.number(); + let block_hash = block.hash(); + + println!("Block #{block_number} ({block_hash})"); + } + + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/setup_rpc_chainhead_backend.rs b/vendor/pezkuwi-subxt/subxt/examples/setup_rpc_chainhead_backend.rs new file mode 100644 index 00000000..8bed2878 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/setup_rpc_chainhead_backend.rs @@ -0,0 +1,39 @@ +//! Example to utilize the ChainHeadBackend rpc backend to subscribe to finalized blocks. + +#![allow(missing_docs)] + +use futures::StreamExt; +use pezkuwi_subxt::{ + OnlineClient, PezkuwiConfig, + backend::{ + chain_head::{ChainHeadBackend, ChainHeadBackendBuilder}, + rpc::RpcClient, + }, +}; + +// Generate an interface that we can use from the node's metadata. +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + tracing_subscriber::fmt::init(); + + let rpc = RpcClient::from_url("ws://localhost:9944".to_string()).await?; + let backend: ChainHeadBackend = + ChainHeadBackendBuilder::default().build_with_background_driver(rpc.clone()); + let api = OnlineClient::from_backend(std::sync::Arc::new(backend)).await?; + + let mut blocks_sub = api.blocks().subscribe_finalized().await?.take(100); + + while let Some(block) = blocks_sub.next().await { + let block = block?; + + let block_number = block.number(); + let block_hash = block.hash(); + + println!("Block #{block_number} ({block_hash})"); + } + + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/storage_fetch.rs b/vendor/pezkuwi-subxt/subxt/examples/storage_fetch.rs new file mode 100644 index 00000000..d2858597 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/storage_fetch.rs @@ -0,0 +1,28 @@ +#![allow(missing_docs)] +use pezkuwi_subxt_signer::sr25519::dev; +use pezkuwi_subxt::{OnlineClient, PezkuwiConfig}; + +// Generate an interface that we can use from the node's metadata. +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a new API client, configured to talk to Pezkuwi nodes. + let api = OnlineClient::::new().await?; + let account = dev::alice().public_key().into(); + + // Build a storage query to access account information. + let storage_query = pezkuwi::storage().system().account(); + + // Use that query to access a storage entry, fetch a result and decode the value. + // The static address knows that fetching requires a tuple of one value, an + // AccountId32. + let client_at = api.storage().at_latest().await?; + let account_info = client_at.entry(storage_query)?.fetch((account,)).await?.decode()?; + + // The static address that we got from the subxt macro knows the expected input + // and return types, so it is decoded into a static type for us. + println!("Alice: {account_info:?}"); + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/storage_fetch_dynamic.rs b/vendor/pezkuwi-subxt/subxt/examples/storage_fetch_dynamic.rs new file mode 100644 index 00000000..7db05423 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/storage_fetch_dynamic.rs @@ -0,0 +1,29 @@ +#![allow(missing_docs)] +use pezkuwi_subxt_signer::sr25519::dev; +use pezkuwi_subxt::{ + OnlineClient, PezkuwiConfig, + dynamic::{At, Value}, + utils::AccountId32, +}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a new API client, configured to talk to Pezkuwi nodes. + let api = OnlineClient::::new().await?; + + // Build a dynamic storage query to access account information. + // here, we assume that there is one value to provide at this entry + // to access a value; an AccountId32. In this example we don't know the + // return type and so we set it to `Value`, which anything can decode into. + let account: AccountId32 = dev::alice().public_key().into(); + let storage_query = pezkuwi_subxt::dynamic::storage::<(AccountId32,), Value>("System", "Account"); + + // Use that query to access a storage entry, fetch a result and decode the value. + let client_at = api.storage().at_latest().await?; + let account_info = client_at.entry(storage_query)?.fetch((account,)).await?.decode()?; + + // With out `Value` type we can dig in to find what we want using the `At` + // trait and `.at()` method that this provides on the Value. + println!("Alice has free balance: {}", account_info.at("data").at("free").unwrap()); + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/storage_iterating.rs b/vendor/pezkuwi-subxt/subxt/examples/storage_iterating.rs new file mode 100644 index 00000000..cb6a2e11 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/storage_iterating.rs @@ -0,0 +1,41 @@ +#![allow(missing_docs)] +use pezkuwi_subxt::{OnlineClient, PezkuwiConfig, ext::futures::StreamExt}; + +// Generate an interface that we can use from the node's metadata. +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a new API client, configured to talk to Pezkuwi nodes. + let api = OnlineClient::::new().await?; + + // Build a storage query to access account information. Same as if we were + // fetching a single value from this entry. + let storage_query = pezkuwi::storage().system().account(); + + // Use that query to access a storage entry, iterate over it and decode values. + let client_at = api.storage().at_latest().await?; + + // We provide an empty tuple when iterating. If the storage entry had been an N map with + // multiple keys, then we could provide any prefix of those keys to iterate over. This is + // statically type checked, so only a valid number/type of keys in the tuple is accepted. + let mut values = client_at.entry(storage_query)?.iter(()).await?; + + while let Some(kv) = values.next().await { + let kv = kv?; + + // The key decodes into the type that the static address knows about, in this case a + // tuple of one entry, because the only part of the key that we can decode is the + // AccountId32 for each user. + let (account_id32,) = kv.key()?.decode()?; + + // The value decodes into a statically generated type which holds account information. + let value = kv.value().decode()?; + + let value_data = value.data; + println!("{account_id32}:\n {value_data:?}"); + } + + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/storage_iterating_dynamic.rs b/vendor/pezkuwi-subxt/subxt/examples/storage_iterating_dynamic.rs new file mode 100644 index 00000000..a0e33e4e --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/storage_iterating_dynamic.rs @@ -0,0 +1,42 @@ +#![allow(missing_docs)] +use pezkuwi_subxt::{ + OnlineClient, PezkuwiConfig, + dynamic::{At, Value}, + ext::futures::StreamExt, + utils::AccountId32, +}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a new API client, configured to talk to Pezkuwi nodes. + let api = OnlineClient::::new().await?; + + // Build a dynamic storage query to access account information. + // here, we assume that there is one value to provide at this entry + // to access a value; an AccountId32. In this example we don't know the + // return type and so we set it to `Value`, which anything can decode into. + let storage_query = pezkuwi_subxt::dynamic::storage::<(AccountId32,), Value>("System", "Account"); + + // Use that query to access a storage entry, iterate over it and decode values. + let client_at = api.storage().at_latest().await?; + let mut values = client_at.entry(storage_query)?.iter(()).await?; + + while let Some(kv) = values.next().await { + let kv = kv?; + + // The key decodes into the first type we provided in the address. Since there's just + // one key, it is a tuple of one entry, an AccountId32. If we didn't know how many + // keys or their type, we could set the key to `Vec` instead. + let (account_id32,) = kv.key()?.decode()?; + + // The value decodes into the second type we provided in the address. In this example, + // we just decode it into our `Value` type and then look at the "data" field in this + // (which implicitly assumes we get a struct shaped thing back with such a field). + let value = kv.value().decode()?; + + let value_data = value.at("data").unwrap(); + println!("{account_id32}:\n {value_data}"); + } + + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/tx_basic.rs b/vendor/pezkuwi-subxt/subxt/examples/tx_basic.rs new file mode 100644 index 00000000..08e379b1 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/tx_basic.rs @@ -0,0 +1,35 @@ +#![allow(missing_docs)] +use pezkuwi_subxt_signer::sr25519::dev; +use pezkuwi_subxt::{OnlineClient, PezkuwiConfig}; + +// Generate an interface that we can use from the node's metadata. +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a new API client, configured to talk to Pezkuwi nodes. + let api = OnlineClient::::new().await?; + + // Build a balance transfer extrinsic. + let dest = dev::bob().public_key().into(); + let balance_transfer_tx = pezkuwi::tx().balances().transfer_allow_death(dest, 10_000); + + // Submit the balance transfer extrinsic from Alice, and wait for it to be successful + // and in a finalized block. We get back the extrinsic events if all is well. + let from = dev::alice(); + let events = api + .tx() + .sign_and_submit_then_watch_default(&balance_transfer_tx, &from) + .await? + .wait_for_finalized_success() + .await?; + + // Find a Transfer event and print it. + let transfer_event = events.find_first::()?; + if let Some(event) = transfer_event { + println!("Balance transfer success: {event:?}"); + } + + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/tx_basic_frontier.rs b/vendor/pezkuwi-subxt/subxt/examples/tx_basic_frontier.rs new file mode 100644 index 00000000..348d057d --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/tx_basic_frontier.rs @@ -0,0 +1,54 @@ +//! Example to use subxt to talk to bizinikiwi-based nodes with ethereum accounts +//! which is not the default for subxt which is why we need to provide a custom config. +//! +//! This example requires to run a local frontier/moonbeam node to work. + +#![allow(missing_docs)] + +use pezkuwi_subxt_core::utils::AccountId20; +use pezkuwi_subxt_signer::eth::{Signature, dev}; +use pezkuwi_subxt::OnlineClient; + +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/frontier_metadata_small.scale")] +mod eth_runtime {} + +enum EthRuntimeConfig {} + +impl pezkuwi_subxt::Config for EthRuntimeConfig { + type AccountId = AccountId20; + type Address = AccountId20; + type Signature = Signature; + type Hasher = pezkuwi_subxt::config::bizinikiwi::BlakeTwo256; + type Header = + pezkuwi_subxt::config::bizinikiwi::BizinikiwiHeader; + type ExtrinsicParams = pezkuwi_subxt::config::BizinikiwiExtrinsicParams; + type AssetId = u32; +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + let api = OnlineClient::::from_insecure_url("ws://127.0.0.1:9944").await?; + + let alith = dev::alith(); + let baltathar = dev::baltathar(); + let dest = baltathar.public_key().to_account_id(); + + println!("baltathar pub: {}", hex::encode(baltathar.public_key().0)); + println!("baltathar addr: {}", hex::encode(dest)); + + let balance_transfer_tx = eth_runtime::tx().balances().transfer_allow_death(dest, 10_001); + + let events = api + .tx() + .sign_and_submit_then_watch_default(&balance_transfer_tx, &alith) + .await? + .wait_for_finalized_success() + .await?; + + let transfer_event = events.find_first::()?; + if let Some(event) = transfer_event { + println!("Balance transfer success: {event:?}"); + } + + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/tx_boxed.rs b/vendor/pezkuwi-subxt/subxt/examples/tx_boxed.rs new file mode 100644 index 00000000..d09bb73a --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/tx_boxed.rs @@ -0,0 +1,43 @@ +#![allow(missing_docs)] +use pezkuwi_subxt_signer::sr25519::dev; +use pezkuwi_subxt::{OnlineClient, PezkuwiConfig}; + +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + let api = OnlineClient::::new().await?; + + // Prepare some extrinsics. These are boxed so that they can live alongside each other. + let txs = [dynamic_remark(), balance_transfer(), remark()]; + + for tx in txs { + let from = dev::alice(); + api.tx() + .sign_and_submit_then_watch_default(&tx, &from) + .await? + .wait_for_finalized_success() + .await?; + + println!("Submitted tx"); + } + + Ok(()) +} + +fn balance_transfer() -> Box { + let dest = dev::bob().public_key().into(); + Box::new(pezkuwi::tx().balances().transfer_allow_death(dest, 10_000)) +} + +fn remark() -> Box { + Box::new(pezkuwi::tx().system().remark(vec![1, 2, 3, 4, 5])) +} + +fn dynamic_remark() -> Box { + use pezkuwi_subxt::dynamic::{Value, tx}; + let tx_payload = tx("System", "remark", vec![Value::from_bytes("Hello")]); + + Box::new(tx_payload) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/tx_partial.rs b/vendor/pezkuwi-subxt/subxt/examples/tx_partial.rs new file mode 100644 index 00000000..60574744 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/tx_partial.rs @@ -0,0 +1,50 @@ +#![allow(missing_docs)] +use pezkuwi_subxt_signer::sr25519::dev; +use pezkuwi_subxt::{OnlineClient, PezkuwiConfig}; + +type BoxedError = Box; + +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +#[tokio::main] +async fn main() -> Result<(), BoxedError> { + // Spawned tasks require things held across await points to impl Send, + // so we use one to demonstrate that this is possible with `PartialTransaction` + tokio::spawn(signing_example()).await??; + Ok(()) +} + +async fn signing_example() -> Result<(), BoxedError> { + let api = OnlineClient::::new().await?; + + // Build a balance transfer extrinsic. + let dest = dev::bob().public_key().into(); + let balance_transfer_tx = pezkuwi::tx().balances().transfer_allow_death(dest, 10_000); + + let alice = dev::alice(); + + // Create partial tx, ready to be signed. + let mut partial_tx = api + .tx() + .create_partial( + &balance_transfer_tx, + &alice.public_key().to_account_id(), + Default::default(), + ) + .await?; + + // Simulate taking some time to get a signature back, in part to + // show that the `PartialTransaction` can be held across await points. + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + let signature = alice.sign(&partial_tx.signer_payload()); + + // Sign the transaction. + let tx = partial_tx + .sign_with_account_and_signature(&alice.public_key().to_account_id(), &signature.into()); + + // Submit it. + tx.submit_and_watch().await?.wait_for_finalized_success().await?; + + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/tx_status_stream.rs b/vendor/pezkuwi-subxt/subxt/examples/tx_status_stream.rs new file mode 100644 index 00000000..9ef5cfd8 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/tx_status_stream.rs @@ -0,0 +1,53 @@ +#![allow(missing_docs)] +use pezkuwi_subxt_signer::sr25519::dev; +use pezkuwi_subxt::{OnlineClient, PezkuwiConfig, tx::TxStatus}; + +// Generate an interface that we can use from the node's metadata. +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a new API client, configured to talk to Pezkuwi nodes. + let api = OnlineClient::::new().await?; + + // Build a balance transfer extrinsic. + let dest = dev::bob().public_key().into(); + let balance_transfer_tx = pezkuwi::tx().balances().transfer_allow_death(dest, 10_000); + + // Submit the balance transfer extrinsic from Alice, and then monitor the + // progress of it. + let from = dev::alice(); + let mut balance_transfer_progress = + api.tx().sign_and_submit_then_watch_default(&balance_transfer_tx, &from).await?; + + while let Some(status) = balance_transfer_progress.next().await { + match status? { + // It's finalized in a block! + TxStatus::InFinalizedBlock(in_block) => { + println!( + "Transaction {:?} is finalized in block {:?}", + in_block.extrinsic_hash(), + in_block.block_hash() + ); + + // grab the events and fail if no ExtrinsicSuccess event seen: + let events = in_block.wait_for_success().await?; + // We can look for events (this uses the static interface; we can also iterate + // over them and dynamically decode them): + let transfer_event = events.find_first::()?; + + if let Some(event) = transfer_event { + println!("Balance transfer success: {event:?}"); + } else { + println!("Failed to find Balances::Transfer Event"); + } + }, + // Just log any other status we encounter: + other => { + println!("Status: {other:?}"); + }, + } + } + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/examples/tx_with_params.rs b/vendor/pezkuwi-subxt/subxt/examples/tx_with_params.rs new file mode 100644 index 00000000..1d83f303 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/examples/tx_with_params.rs @@ -0,0 +1,29 @@ +#![allow(missing_docs)] +use pezkuwi_subxt_signer::sr25519::dev; +use pezkuwi_subxt::{ + OnlineClient, PezkuwiConfig, config::pezkuwi::PezkuwiExtrinsicParamsBuilder as Params, +}; + +#[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +pub mod pezkuwi {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a new API client, configured to talk to Pezkuwi nodes. + let api = OnlineClient::::new().await?; + + // Build a balance transfer extrinsic. + let dest = dev::bob().public_key().into(); + let tx = pezkuwi::tx().balances().transfer_allow_death(dest, 10_000); + + // Configure the transaction parameters; we give a small tip and set the + // transaction to live for 32 blocks from the `latest_block` above. + let tx_params = Params::new().tip(1_000).mortal(32).build(); + + // submit the transaction: + let from = dev::alice(); + let hash = api.tx().sign_and_submit(&tx, &from, tx_params).await?; + println!("Balance transfer extrinsic submitted with hash : {hash}"); + + Ok(()) +} diff --git a/vendor/pezkuwi-subxt/subxt/src/backend/chain_head/follow_stream.rs b/vendor/pezkuwi-subxt/subxt/src/backend/chain_head/follow_stream.rs new file mode 100644 index 00000000..59eb19ef --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/backend/chain_head/follow_stream.rs @@ -0,0 +1,338 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::{ + config::{Config, HashFor}, + error::BackendError, +}; +use futures::{FutureExt, Stream, StreamExt, TryStreamExt}; +use pezkuwi_subxt_rpcs::methods::chain_head::{ChainHeadRpcMethods, FollowEvent}; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + +/// A `Stream` whose goal is to remain subscribed to `chainHead_follow`. It will re-subscribe if the +/// subscription is ended for any reason, and it will return the current `subscription_id` as an +/// event, along with the other follow events. +pub struct FollowStream { + // Using this and not just keeping a copy of the RPC methods + // around means that we can test this in isolation with dummy streams. + stream_getter: FollowEventStreamGetter, + stream: InnerStreamState, +} + +impl std::fmt::Debug for FollowStream { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("FollowStream") + .field("stream_getter", &"..") + .field("stream", &self.stream) + .finish() + } +} + +/// A getter function that returns an [`FollowEventStreamFut`]. +pub type FollowEventStreamGetter = Box FollowEventStreamFut + Send>; + +/// The future which will return a stream of follow events and the subscription ID for it. +pub type FollowEventStreamFut = Pin< + Box< + dyn Future, String), BackendError>> + + Send + + 'static, + >, +>; + +/// The stream of follow events. +pub type FollowEventStream = + Pin, BackendError>> + Send + 'static>>; + +/// Either a ready message with the current subscription ID, or +/// an event from the stream itself. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum FollowStreamMsg { + /// The stream is ready (and has a subscription ID) + Ready(String), + /// An event from the stream. + Event(FollowEvent), +} + +impl FollowStreamMsg { + /// Return an event, or none if the message is a "ready" one. + pub fn into_event(self) -> Option> { + match self { + FollowStreamMsg::Ready(_) => None, + FollowStreamMsg::Event(e) => Some(e), + } + } +} + +enum InnerStreamState { + /// We've just created the stream; we'll start Initializing it + New, + /// We're fetching the inner subscription. Move to Ready when we have one. + Initializing(FollowEventStreamFut), + /// Report back the subscription ID here, and then start ReceivingEvents. + Ready(Option<(FollowEventStream, String)>), + /// We are polling for, and receiving events from the stream. + ReceivingEvents(FollowEventStream), + /// We received a stop event. We'll send one on and restart the stream. + Stopped, + /// The stream is finished and will not restart (likely due to an error). + Finished, +} + +impl std::fmt::Debug for InnerStreamState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::New => write!(f, "New"), + Self::Initializing(_) => write!(f, "Initializing(..)"), + Self::Ready(_) => write!(f, "Ready(..)"), + Self::ReceivingEvents(_) => write!(f, "ReceivingEvents(..)"), + Self::Stopped => write!(f, "Stopped"), + Self::Finished => write!(f, "Finished"), + } + } +} + +impl FollowStream { + /// Create a new [`FollowStream`] given a function which returns the stream. + pub fn new(stream_getter: FollowEventStreamGetter) -> Self { + Self { stream_getter, stream: InnerStreamState::New } + } + + /// Create a new [`FollowStream`] given the RPC methods. + pub fn from_methods(methods: ChainHeadRpcMethods) -> FollowStream> { + FollowStream { + stream_getter: Box::new(move || { + let methods = methods.clone(); + Box::pin(async move { + // Make the RPC call: + let stream = methods.chainhead_v1_follow(true).await?; + // Extract the subscription ID: + let Some(sub_id) = stream.subscription_id().map(ToOwned::to_owned) else { + return Err(BackendError::Other( + "Subscription ID expected for chainHead_follow response, but not given" + .to_owned(), + )); + }; + // Map stream errors into the higher level subxt one: + let stream = stream.map_err(|e| e.into()); + let stream: FollowEventStream> = Box::pin(stream); + // Return both: + Ok((stream, sub_id)) + }) + }), + stream: InnerStreamState::New, + } + } +} + +impl std::marker::Unpin for FollowStream {} + +impl Stream for FollowStream { + type Item = Result, BackendError>; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.get_mut(); + + loop { + match &mut this.stream { + InnerStreamState::New => { + let fut = (this.stream_getter)(); + this.stream = InnerStreamState::Initializing(fut); + continue; + }, + InnerStreamState::Initializing(fut) => { + match fut.poll_unpin(cx) { + Poll::Pending => { + return Poll::Pending; + }, + Poll::Ready(Ok(sub_with_id)) => { + this.stream = InnerStreamState::Ready(Some(sub_with_id)); + continue; + }, + Poll::Ready(Err(e)) => { + // Re-start if a reconnecting backend was enabled. + if e.is_disconnected_will_reconnect() { + this.stream = InnerStreamState::Stopped; + continue; + } + + // Finish forever if there's an error, passing it on. + this.stream = InnerStreamState::Finished; + return Poll::Ready(Some(Err(e))); + }, + } + }, + InnerStreamState::Ready(stream) => { + // We never set the Option to `None`; we just have an Option so + // that we can take ownership of the contents easily here. + let (sub, sub_id) = stream.take().expect("should always be Some"); + this.stream = InnerStreamState::ReceivingEvents(sub); + return Poll::Ready(Some(Ok(FollowStreamMsg::Ready(sub_id)))); + }, + InnerStreamState::ReceivingEvents(stream) => { + match stream.poll_next_unpin(cx) { + Poll::Pending => { + return Poll::Pending; + }, + Poll::Ready(None) => { + // No error happened but the stream ended; restart and + // pass on a Stop message anyway. + this.stream = InnerStreamState::Stopped; + continue; + }, + Poll::Ready(Some(Ok(ev))) => { + if let FollowEvent::Stop = ev { + // A stop event means the stream has ended, so start + // over after passing on the stop message. + this.stream = InnerStreamState::Stopped; + continue; + } + return Poll::Ready(Some(Ok(FollowStreamMsg::Event(ev)))); + }, + Poll::Ready(Some(Err(e))) => { + // Re-start if a reconnecting backend was enabled. + if e.is_disconnected_will_reconnect() { + this.stream = InnerStreamState::Stopped; + continue; + } + + // Finish forever if there's an error, passing it on. + this.stream = InnerStreamState::Finished; + return Poll::Ready(Some(Err(e))); + }, + } + }, + InnerStreamState::Stopped => { + this.stream = InnerStreamState::New; + return Poll::Ready(Some(Ok(FollowStreamMsg::Event(FollowEvent::Stop)))); + }, + InnerStreamState::Finished => { + return Poll::Ready(None); + }, + } + } + } +} + +#[cfg(test)] +pub(super) mod test_utils { + use super::*; + use crate::config::bizinikiwi::H256; + use pezkuwi_subxt_rpcs::methods::chain_head::{ + BestBlockChanged, Finalized, Initialized, NewBlock, + }; + use std::sync::{ + Arc, + atomic::{AtomicUsize, Ordering}, + }; + + /// Given some events, returns a follow stream getter that we can use in + /// place of the usual RPC method. + pub fn test_stream_getter(events: F) -> FollowEventStreamGetter + where + Hash: Send + 'static, + F: Fn() -> I + Send + 'static, + I: IntoIterator, BackendError>>, + { + let start_idx = Arc::new(AtomicUsize::new(0)); + + Box::new(move || { + // Start the events from where we left off last time. + let start_idx = start_idx.clone(); + let this_idx = start_idx.load(Ordering::Relaxed); + let events: Vec<_> = events().into_iter().skip(this_idx).collect(); + + Box::pin(async move { + // Increment start_idx for each event we see, so that if we get + // the stream again, we get only the remaining events for it. + let stream = futures::stream::iter(events).map(move |ev| { + start_idx.fetch_add(1, Ordering::Relaxed); + ev + }); + + let stream: FollowEventStream = Box::pin(stream); + Ok((stream, format!("sub_id_{this_idx}"))) + }) + }) + } + + /// An initialized event + pub fn ev_initialized(n: u64) -> FollowEvent { + FollowEvent::Initialized(Initialized { + finalized_block_hashes: vec![H256::from_low_u64_le(n)], + finalized_block_runtime: None, + }) + } + + /// A new block event + pub fn ev_new_block(parent_n: u64, n: u64) -> FollowEvent { + FollowEvent::NewBlock(NewBlock { + parent_block_hash: H256::from_low_u64_le(parent_n), + block_hash: H256::from_low_u64_le(n), + new_runtime: None, + }) + } + + /// A best block event + pub fn ev_best_block(n: u64) -> FollowEvent { + FollowEvent::BestBlockChanged(BestBlockChanged { + best_block_hash: H256::from_low_u64_le(n), + }) + } + + /// A finalized event + pub fn ev_finalized( + finalized_ns: impl IntoIterator, + pruned_ns: impl IntoIterator, + ) -> FollowEvent { + FollowEvent::Finalized(Finalized { + finalized_block_hashes: finalized_ns.into_iter().map(H256::from_low_u64_le).collect(), + pruned_block_hashes: pruned_ns.into_iter().map(H256::from_low_u64_le).collect(), + }) + } +} + +#[cfg(test)] +pub mod test { + use super::*; + use test_utils::{ev_initialized, ev_new_block, test_stream_getter}; + + #[tokio::test] + async fn follow_stream_provides_messages_until_error() { + // The events we'll get back on the stream. + let stream_getter = test_stream_getter(|| { + [ + Ok(ev_initialized(1)), + // Stop should lead to a drop and resubscribe: + Ok(FollowEvent::Stop), + Ok(FollowEvent::Stop), + Ok(ev_new_block(1, 2)), + // Nothing should be emitted after an error: + Err(BackendError::Other("ended".to_owned())), + Ok(ev_new_block(2, 3)), + ] + }); + + let s = FollowStream::new(stream_getter); + let out: Vec<_> = s.filter_map(async |e| e.ok()).collect().await; + + // The expected response, given the above. + assert_eq!( + out, + vec![ + FollowStreamMsg::Ready("sub_id_0".to_owned()), + FollowStreamMsg::Event(ev_initialized(1)), + FollowStreamMsg::Event(FollowEvent::Stop), + FollowStreamMsg::Ready("sub_id_2".to_owned()), + FollowStreamMsg::Event(FollowEvent::Stop), + FollowStreamMsg::Ready("sub_id_3".to_owned()), + FollowStreamMsg::Event(ev_new_block(1, 2)), + ] + ); + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/backend/chain_head/follow_stream_driver.rs b/vendor/pezkuwi-subxt/subxt/src/backend/chain_head/follow_stream_driver.rs new file mode 100644 index 00000000..82a34bb5 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/backend/chain_head/follow_stream_driver.rs @@ -0,0 +1,718 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::follow_stream_unpin::{BlockRef, FollowStreamMsg, FollowStreamUnpin}; +use crate::{ + config::Hash, + error::{BackendError, RpcError}, +}; +use futures::stream::{Stream, StreamExt}; +use pezkuwi_subxt_rpcs::methods::chain_head::{FollowEvent, Initialized, RuntimeEvent}; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + ops::DerefMut, + pin::Pin, + sync::{Arc, Mutex}, + task::{Context, Poll, Waker}, +}; + +/// A `Stream` which builds on `FollowStreamDriver`, and allows multiple subscribers to obtain +/// events from the single underlying subscription (each being provided an `Initialized` message and +/// all new blocks since then, as if they were each creating a unique `chainHead_follow` +/// subscription). This is the "top" layer of our follow stream subscriptions, and the one that's +/// interacted with elsewhere. +#[derive(Debug)] +pub struct FollowStreamDriver { + inner: FollowStreamUnpin, + shared: Shared, +} + +impl FollowStreamDriver { + /// Create a new [`FollowStreamDriver`]. This must be polled by some executor + /// in order for any progress to be made. Things can subscribe to events. + pub fn new(follow_unpin: FollowStreamUnpin) -> Self { + Self { inner: follow_unpin, shared: Shared::default() } + } + + /// Return a handle from which we can create new subscriptions to follow events. + pub fn handle(&self) -> FollowStreamDriverHandle { + FollowStreamDriverHandle { shared: self.shared.clone() } + } +} + +impl Stream for FollowStreamDriver { + type Item = Result<(), BackendError>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.inner.poll_next_unpin(cx) { + Poll::Pending => Poll::Pending, + Poll::Ready(None) => { + // Mark ourselves as done so that everything can end. + self.shared.done(); + Poll::Ready(None) + }, + Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))), + Poll::Ready(Some(Ok(item))) => { + // Push item to any subscribers. + self.shared.push_item(item); + Poll::Ready(Some(Ok(()))) + }, + } + } +} + +/// A handle that can be used to create subscribers, but that doesn't +/// itself subscribe to events. +#[derive(Debug, Clone)] +pub struct FollowStreamDriverHandle { + shared: Shared, +} + +impl FollowStreamDriverHandle { + /// Subscribe to follow events. + pub fn subscribe(&self) -> FollowStreamDriverSubscription { + self.shared.subscribe() + } +} + +/// A subscription to events from the [`FollowStreamDriver`]. All subscriptions +/// begin first with a `Ready` event containing the current subscription ID, and +/// then with an `Initialized` event containing the latest finalized block and latest +/// runtime information, and then any new/best block events and so on received since +/// the latest finalized block. +#[derive(Debug)] +pub struct FollowStreamDriverSubscription { + id: usize, + done: bool, + shared: Shared, + local_items: VecDeque>>, +} + +impl Stream for FollowStreamDriverSubscription { + type Item = FollowStreamMsg>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.done { + return Poll::Ready(None); + } + + loop { + if let Some(item) = self.local_items.pop_front() { + return Poll::Ready(Some(item)); + } + + let items = self.shared.take_items_and_save_waker(self.id, cx.waker()); + + // If no items left, mark locally as done (to avoid further locking) + // and return None to signal done-ness. + let Some(items) = items else { + self.done = true; + return Poll::Ready(None); + }; + + // No items? We've saved the waker so we'll be told when more come. + // Else, save the items locally and loop around to pop from them. + if items.is_empty() { + return Poll::Pending; + } else { + self.local_items = items; + } + } + } +} + +impl FollowStreamDriverSubscription { + /// Return the current subscription ID. If the subscription has stopped, then this will + /// wait until a new subscription has started with a new ID. + pub async fn subscription_id(self) -> Option { + let ready_event = self + .skip_while(|ev| std::future::ready(!matches!(ev, FollowStreamMsg::Ready(_)))) + .next() + .await?; + + match ready_event { + FollowStreamMsg::Ready(sub_id) => Some(sub_id), + _ => None, + } + } + + /// Subscribe to the follow events, ignoring any other messages. + pub fn events(self) -> impl Stream>> + Send + Sync { + self.filter_map(|ev| std::future::ready(ev.into_event())) + } +} + +impl Clone for FollowStreamDriverSubscription { + fn clone(&self) -> Self { + self.shared.subscribe() + } +} + +impl Drop for FollowStreamDriverSubscription { + fn drop(&mut self) { + self.shared.remove_sub(self.id); + } +} + +/// Locked shared state. The driver stream will access this state to push +/// events to any subscribers, and subscribers will access it to pull the +/// events destined for themselves. +#[derive(Debug, Clone)] +struct Shared(Arc>>); + +#[derive(Debug)] +struct SharedState { + done: bool, + next_id: usize, + subscribers: HashMap>, + /// Keep a buffer of all events that should be handed to a new subscription. + block_events_for_new_subscriptions: VecDeque>>, + // Keep track of the subscription ID we send out on new subs. + current_subscription_id: Option, + // Keep track of the init message we send out on new subs. + current_init_message: Option>>, + // Runtime events by block hash; we need to track these to know + // whether the runtime has changed when we see a finalized block notification. + seen_runtime_events: HashMap, +} + +impl Default for Shared { + fn default() -> Self { + Shared(Arc::new(Mutex::new(SharedState { + next_id: 1, + done: false, + subscribers: HashMap::new(), + current_init_message: None, + current_subscription_id: None, + seen_runtime_events: HashMap::new(), + block_events_for_new_subscriptions: VecDeque::new(), + }))) + } +} + +impl Shared { + /// Set the shared state to "done"; no more items will be handed to it. + pub fn done(&self) { + let mut shared = self.0.lock().unwrap(); + shared.done = true; + + // Wake up all subscribers so they get notified that the backend was closed + for details in shared.subscribers.values_mut() { + if let Some(waker) = details.waker.take() { + waker.wake(); + } + } + } + + /// Cleanup a subscription. + pub fn remove_sub(&self, sub_id: usize) { + let mut shared = self.0.lock().unwrap(); + shared.subscribers.remove(&sub_id); + } + + /// Take items for some subscription ID and save the waker. + pub fn take_items_and_save_waker( + &self, + sub_id: usize, + waker: &Waker, + ) -> Option>>> { + let mut shared = self.0.lock().unwrap(); + + let is_done = shared.done; + let details = shared.subscribers.get_mut(&sub_id)?; + + // no more items to pull, and stream closed, so return None. + if details.items.is_empty() && is_done { + return None; + } + + // else, take whatever items, and save the waker if not done yet. + let items = std::mem::take(&mut details.items); + if !is_done { + details.waker = Some(waker.clone()); + } + Some(items) + } + + /// Push a new item out to subscribers. + pub fn push_item(&self, item: FollowStreamMsg>) { + let mut shared = self.0.lock().unwrap(); + let shared = shared.deref_mut(); + + // broadcast item to subscribers: + for details in shared.subscribers.values_mut() { + details.items.push_back(item.clone()); + if let Some(waker) = details.waker.take() { + waker.wake(); + } + } + + // Keep our buffer of ready/block events up-to-date: + match item { + FollowStreamMsg::Ready(sub_id) => { + // Set new subscription ID when it comes in. + shared.current_subscription_id = Some(sub_id); + }, + FollowStreamMsg::Event(FollowEvent::Initialized(ev)) => { + // New subscriptions will be given this init message: + shared.current_init_message = Some(ev.clone()); + // Clear block cache (since a new finalized block hash is seen): + shared.block_events_for_new_subscriptions.clear(); + }, + FollowStreamMsg::Event(FollowEvent::Finalized(finalized_ev)) => { + // Update the init message that we'll hand out to new subscriptions. If the init + // message is `None` for some reason, we just ignore this step. + if let Some(init_message) = &mut shared.current_init_message { + // Find the latest runtime update that's been finalized. + let newest_runtime = finalized_ev + .finalized_block_hashes + .iter() + .rev() + .filter_map(|h| shared.seen_runtime_events.get(&h.hash()).cloned()) + .next(); + + shared.seen_runtime_events.clear(); + + init_message + .finalized_block_hashes + .clone_from(&finalized_ev.finalized_block_hashes); + + if let Some(runtime_ev) = newest_runtime { + init_message.finalized_block_runtime = Some(runtime_ev); + } + } + + // The last finalized block will be reported as Initialized by our driver, + // therefore there is no need to report NewBlock and BestBlock events for it. + // If the Finalized event reported multiple finalized hashes, we only care about + // the state at the head of the chain, therefore it is correct to remove those as + // well. Idem for the pruned hashes; they will never be reported again and we + // remove them from the window of events. + let to_remove: HashSet = finalized_ev + .finalized_block_hashes + .iter() + .chain(finalized_ev.pruned_block_hashes.iter()) + .map(|h| h.hash()) + .collect(); + + shared.block_events_for_new_subscriptions.retain(|ev| match ev { + FollowEvent::NewBlock(new_block_ev) => + !to_remove.contains(&new_block_ev.block_hash.hash()), + FollowEvent::BestBlockChanged(best_block_ev) => + !to_remove.contains(&best_block_ev.best_block_hash.hash()), + _ => true, + }); + }, + FollowStreamMsg::Event(FollowEvent::NewBlock(new_block_ev)) => { + // If a new runtime is seen, note it so that when a block is finalized, we + // can associate that with a runtime update having happened. + if let Some(runtime_event) = &new_block_ev.new_runtime { + shared + .seen_runtime_events + .insert(new_block_ev.block_hash.hash(), runtime_event.clone()); + } + + shared + .block_events_for_new_subscriptions + .push_back(FollowEvent::NewBlock(new_block_ev)); + }, + FollowStreamMsg::Event(ev @ FollowEvent::BestBlockChanged(_)) => { + shared.block_events_for_new_subscriptions.push_back(ev); + }, + FollowStreamMsg::Event(FollowEvent::Stop) => { + // On a stop event, clear everything. Wait for resubscription and new + // ready/initialised events. + shared.block_events_for_new_subscriptions.clear(); + shared.current_subscription_id = None; + shared.current_init_message = None; + }, + _ => { + // We don't buffer any other events. + }, + } + } + + /// Create a new subscription. + pub fn subscribe(&self) -> FollowStreamDriverSubscription { + let mut shared = self.0.lock().unwrap(); + + let id = shared.next_id; + shared.next_id += 1; + + shared + .subscribers + .insert(id, SubscriberDetails { items: VecDeque::new(), waker: None }); + + // Any new subscription should start with a "Ready" message and then an "Initialized" + // message, and then any non-finalized block events since that. If these don't exist, + // it means the subscription is currently stopped, and we should expect new Ready/Init + // messages anyway once it restarts. + let mut local_items = VecDeque::new(); + if let Some(sub_id) = &shared.current_subscription_id { + local_items.push_back(FollowStreamMsg::Ready(sub_id.clone())); + } + if let Some(init_msg) = &shared.current_init_message { + local_items + .push_back(FollowStreamMsg::Event(FollowEvent::Initialized(init_msg.clone()))); + } + for ev in &shared.block_events_for_new_subscriptions { + local_items.push_back(FollowStreamMsg::Event(ev.clone())); + } + + drop(shared); + + FollowStreamDriverSubscription { id, done: false, shared: self.clone(), local_items } + } +} + +/// Details for a given subscriber: any items it's not yet claimed, +/// and a way to wake it up when there are more items for it. +#[derive(Debug)] +struct SubscriberDetails { + items: VecDeque>>, + waker: Option, +} + +/// A stream that subscribes to finalized blocks +/// and indicates whether a block was missed if was restarted. +#[derive(Debug)] +pub struct FollowStreamFinalizedHeads { + stream: FollowStreamDriverSubscription, + sub_id: Option, + last_seen_block: Option>, + f: F, + is_done: bool, +} + +impl Unpin for FollowStreamFinalizedHeads {} + +impl FollowStreamFinalizedHeads +where + H: Hash, + F: Fn(FollowEvent>) -> Vec>, +{ + pub fn new(stream: FollowStreamDriverSubscription, f: F) -> Self { + Self { stream, sub_id: None, last_seen_block: None, f, is_done: false } + } +} + +impl Stream for FollowStreamFinalizedHeads +where + H: Hash, + F: Fn(FollowEvent>) -> Vec>, +{ + type Item = Result<(String, Vec>), BackendError>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.is_done { + return Poll::Ready(None); + } + + loop { + let Some(ev) = futures::ready!(self.stream.poll_next_unpin(cx)) else { + self.is_done = true; + return Poll::Ready(None); + }; + + let block_refs = match ev { + FollowStreamMsg::Ready(sub_id) => { + self.sub_id = Some(sub_id); + continue; + }, + FollowStreamMsg::Event(FollowEvent::Finalized(finalized)) => { + self.last_seen_block = finalized.finalized_block_hashes.last().cloned(); + + (self.f)(FollowEvent::Finalized(finalized)) + }, + FollowStreamMsg::Event(FollowEvent::Initialized(mut init)) => { + let prev = self.last_seen_block.take(); + self.last_seen_block = init.finalized_block_hashes.last().cloned(); + + if let Some(p) = prev { + let Some(pos) = + init.finalized_block_hashes.iter().position(|b| b.hash() == p.hash()) + else { + return Poll::Ready(Some(Err(RpcError::ClientError( + pezkuwi_subxt_rpcs::Error::DisconnectedWillReconnect( + "Missed at least one block when the connection was lost" + .to_owned(), + ), + ) + .into()))); + }; + + // If we got older blocks than `prev`, we need to remove them + // because they should already have been sent at this point. + init.finalized_block_hashes.drain(0..=pos); + } + + (self.f)(FollowEvent::Initialized(init)) + }, + FollowStreamMsg::Event(ev) => (self.f)(ev), + }; + + if block_refs.is_empty() { + continue; + } + + let sub_id = + self.sub_id.clone().expect("Ready is always emitted before any other event"); + + return Poll::Ready(Some(Ok((sub_id, block_refs)))); + } + } +} + +#[cfg(test)] +mod test_utils { + use super::{super::follow_stream_unpin::test_utils::test_unpin_stream_getter, *}; + + /// Return a `FollowStreamDriver` + pub fn test_follow_stream_driver_getter( + events: F, + max_life: usize, + ) -> FollowStreamDriver + where + H: Hash + 'static, + F: Fn() -> I + Send + 'static, + I: IntoIterator, BackendError>>, + { + let (stream, _) = test_unpin_stream_getter(events, max_life); + FollowStreamDriver::new(stream) + } +} + +#[cfg(test)] +mod test { + use futures::TryStreamExt; + use primitive_types::H256; + + use super::{ + super::{ + follow_stream::test_utils::{ + ev_best_block, ev_finalized, ev_initialized, ev_new_block, + }, + follow_stream_unpin::test_utils::{ + ev_best_block_ref, ev_finalized_ref, ev_initialized_ref, ev_new_block_ref, + }, + }, + test_utils::test_follow_stream_driver_getter, + *, + }; + + #[test] + fn follow_stream_driver_is_sendable() { + fn assert_send(_: T) {} + let stream_getter = test_follow_stream_driver_getter(|| [Ok(ev_initialized(1))], 10); + assert_send(stream_getter); + } + + #[tokio::test] + async fn subscribers_all_receive_events_and_finish_gracefully_on_error() { + let mut driver = test_follow_stream_driver_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(ev_new_block(0, 1)), + Ok(ev_best_block(1)), + Ok(ev_finalized([1], [])), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 10, + ); + + let handle = driver.handle(); + + let a = handle.subscribe(); + let b = handle.subscribe(); + let c = handle.subscribe(); + + // Drive to completion (the sort of real life usage I'd expect): + tokio::spawn(async move { while driver.next().await.is_some() {} }); + + let a_vec: Vec<_> = a.collect().await; + let b_vec: Vec<_> = b.collect().await; + let c_vec: Vec<_> = c.collect().await; + + let expected = vec![ + FollowStreamMsg::Ready("sub_id_0".into()), + FollowStreamMsg::Event(ev_initialized_ref(0)), + FollowStreamMsg::Event(ev_new_block_ref(0, 1)), + FollowStreamMsg::Event(ev_best_block_ref(1)), + FollowStreamMsg::Event(ev_finalized_ref([1])), + ]; + + assert_eq!(a_vec, expected); + assert_eq!(b_vec, expected); + assert_eq!(c_vec, expected); + } + + #[tokio::test] + async fn subscribers_receive_block_events_from_last_finalised() { + let mut driver = test_follow_stream_driver_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(ev_new_block(0, 1)), + Ok(ev_best_block(1)), + Ok(ev_finalized([1], [])), + Ok(ev_new_block(1, 2)), + Ok(ev_new_block(2, 3)), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 10, + ); + + // Skip past ready, init, new, best events. + let _r = driver.next().await.unwrap(); + let _i0 = driver.next().await.unwrap(); + let _n1 = driver.next().await.unwrap(); + let _b1 = driver.next().await.unwrap(); + + // THEN subscribe; subscription should still receive them: + let evs: Vec<_> = driver.handle().subscribe().take(4).collect().await; + let expected = vec![ + FollowStreamMsg::Ready("sub_id_0".into()), + FollowStreamMsg::Event(ev_initialized_ref(0)), + FollowStreamMsg::Event(ev_new_block_ref(0, 1)), + FollowStreamMsg::Event(ev_best_block_ref(1)), + ]; + assert_eq!(evs, expected); + + // Skip past finalized 1, new 2, new 3 events + let _f1 = driver.next().await.unwrap(); + let _n2 = driver.next().await.unwrap(); + let _n3 = driver.next().await.unwrap(); + + // THEN subscribe again; new subs will see an updated initialized message + // with the latest finalized block hash. + let evs: Vec<_> = driver.handle().subscribe().take(4).collect().await; + let expected = vec![ + FollowStreamMsg::Ready("sub_id_0".into()), + FollowStreamMsg::Event(ev_initialized_ref(1)), + FollowStreamMsg::Event(ev_new_block_ref(1, 2)), + FollowStreamMsg::Event(ev_new_block_ref(2, 3)), + ]; + assert_eq!(evs, expected); + } + + #[tokio::test] + async fn subscribers_receive_new_blocks_before_subscribing() { + let mut driver = test_follow_stream_driver_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(ev_new_block(0, 1)), + Ok(ev_best_block(1)), + Ok(ev_new_block(1, 2)), + Ok(ev_new_block(2, 3)), + Ok(ev_finalized([1], [])), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 10, + ); + + // Skip to the first finalized block F1. + let _r = driver.next().await.unwrap(); + let _i0 = driver.next().await.unwrap(); + let _n1 = driver.next().await.unwrap(); + let _b1 = driver.next().await.unwrap(); + let _n2 = driver.next().await.unwrap(); + let _n3 = driver.next().await.unwrap(); + let _f1 = driver.next().await.unwrap(); + + // THEN subscribe; and make sure new block 1 and 2 are received. + let evs: Vec<_> = driver.handle().subscribe().take(4).collect().await; + let expected = vec![ + FollowStreamMsg::Ready("sub_id_0".into()), + FollowStreamMsg::Event(ev_initialized_ref(1)), + FollowStreamMsg::Event(ev_new_block_ref(1, 2)), + FollowStreamMsg::Event(ev_new_block_ref(2, 3)), + ]; + assert_eq!(evs, expected); + } + + #[tokio::test] + async fn subscribe_finalized_blocks_restart_works() { + let mut driver = test_follow_stream_driver_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(ev_new_block(0, 1)), + Ok(ev_best_block(1)), + Ok(ev_finalized([1], [])), + Ok(FollowEvent::Stop), + Ok(ev_initialized(1)), + Ok(ev_finalized([2], [])), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 10, + ); + + let handle = driver.handle(); + + tokio::spawn(async move { while driver.next().await.is_some() {} }); + + let f = |ev| match ev { + FollowEvent::Finalized(ev) => ev.finalized_block_hashes, + FollowEvent::Initialized(ev) => ev.finalized_block_hashes, + _ => vec![], + }; + + let stream = FollowStreamFinalizedHeads::new(handle.subscribe(), f); + let evs: Vec<_> = stream.try_collect().await.unwrap(); + + let expected = vec![ + ("sub_id_0".to_string(), vec![BlockRef::new(H256::from_low_u64_le(0))]), + ("sub_id_0".to_string(), vec![BlockRef::new(H256::from_low_u64_le(1))]), + ("sub_id_5".to_string(), vec![BlockRef::new(H256::from_low_u64_le(2))]), + ]; + assert_eq!(evs, expected); + } + + #[tokio::test] + async fn subscribe_finalized_blocks_restart_with_missed_blocks() { + let mut driver = test_follow_stream_driver_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(FollowEvent::Stop), + // Emulate that we missed some blocks. + Ok(ev_initialized(13)), + Ok(ev_finalized([14], [])), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 10, + ); + + let handle = driver.handle(); + + tokio::spawn(async move { while driver.next().await.is_some() {} }); + + let f = |ev| match ev { + FollowEvent::Finalized(ev) => ev.finalized_block_hashes, + FollowEvent::Initialized(ev) => ev.finalized_block_hashes, + _ => vec![], + }; + + let evs: Vec<_> = FollowStreamFinalizedHeads::new(handle.subscribe(), f).collect().await; + + assert_eq!( + evs[0].as_ref().unwrap(), + &("sub_id_0".to_string(), vec![BlockRef::new(H256::from_low_u64_le(0))]) + ); + assert!( + matches!(&evs[1], Err(BackendError::Rpc(RpcError::ClientError(pezkuwi_subxt_rpcs::Error::DisconnectedWillReconnect(e)))) if e.contains("Missed at least one block when the connection was lost")) + ); + assert_eq!( + evs[2].as_ref().unwrap(), + &("sub_id_2".to_string(), vec![BlockRef::new(H256::from_low_u64_le(14))]) + ); + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/backend/chain_head/follow_stream_unpin.rs b/vendor/pezkuwi-subxt/subxt/src/backend/chain_head/follow_stream_unpin.rs new file mode 100644 index 00000000..cf95540d --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/backend/chain_head/follow_stream_unpin.rs @@ -0,0 +1,807 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::{ChainHeadRpcMethods, follow_stream::FollowStream}; +use crate::{ + config::{Config, Hash, HashFor}, + error::BackendError, +}; +use futures::stream::{FuturesUnordered, Stream, StreamExt}; +use pezkuwi_subxt_rpcs::methods::chain_head::{ + BestBlockChanged, Finalized, FollowEvent, Initialized, NewBlock, +}; + +use std::{ + collections::{HashMap, HashSet}, + future::Future, + pin::Pin, + sync::{Arc, Mutex}, + task::{Context, Poll, Waker}, +}; + +/// The type of stream item. +pub use super::follow_stream::FollowStreamMsg; + +/// A `Stream` which builds on `FollowStream`, and handles pinning. It replaces any block hash seen +/// in the follow events with a `BlockRef` which, when all clones are dropped, will lead to an +/// "unpin" call for that block hash being queued. It will also automatically unpin any blocks that +/// exceed a given max age, to try and prevent the underlying stream from ending (and _all_ blocks +/// from being unpinned as a result). Put simply, it tries to keep every block pinned as long as +/// possible until the block is no longer used anywhere. +#[derive(Debug)] +pub struct FollowStreamUnpin { + // The underlying stream of events. + inner: FollowStream, + // A method to call to unpin a block, given a block hash and a subscription ID. + unpin_method: UnpinMethodHolder, + // Futures for sending unpin events that we'll poll to completion as + // part of polling the stream as a whole. + unpin_futs: FuturesUnordered, + // Each time a new finalized block is seen, we give it an age of `next_rel_block_age`, + // and then increment this ready for the next finalized block. So, the first finalized + // block will have an age of 0, the next 1, 2, 3 and so on. We can then use `max_block_life` + // to say "unpin all blocks with an age < (next_rel_block_age-1) - max_block_life". + next_rel_block_age: usize, + // The latest ID of the FollowStream subscription, which we can use + // to unpin blocks. + subscription_id: Option>, + // The longest period a block can be pinned for. + max_block_life: usize, + // The currently seen and pinned blocks. + pinned: HashMap>, + // Shared state about blocks we've flagged to unpin from elsewhere + unpin_flags: UnpinFlags, +} + +// Just a wrapper to make implementing debug on the whole thing easier. +struct UnpinMethodHolder(UnpinMethod); +impl std::fmt::Debug for UnpinMethodHolder { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "UnpinMethodHolder(Box) -> UnpinFut>)") + } +} + +/// The type of the unpin method that we need to provide. +pub type UnpinMethod = Box) -> UnpinFut + Send>; + +/// The future returned from [`UnpinMethod`]. +pub type UnpinFut = Pin + Send + 'static>>; + +impl std::marker::Unpin for FollowStreamUnpin {} + +impl Stream for FollowStreamUnpin { + type Item = Result>, BackendError>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.as_mut(); + + loop { + // Poll any queued unpin tasks. + let unpin_futs_are_pending = match this.unpin_futs.poll_next_unpin(cx) { + Poll::Ready(Some(())) => continue, + Poll::Ready(None) => false, + Poll::Pending => true, + }; + + // Poll the inner stream for the next event. + let Poll::Ready(ev) = this.inner.poll_next_unpin(cx) else { + return Poll::Pending; + }; + + let Some(ev) = ev else { + // if the stream is done, but `unpin_futs` are still pending, then + // return pending here so that they are still driven to completion. + // Else, return `Ready(None)` to signal nothing left to do. + return match unpin_futs_are_pending { + true => Poll::Pending, + false => Poll::Ready(None), + }; + }; + + // Error? just return it and do nothing further. + let ev = match ev { + Ok(ev) => ev, + Err(e) => { + return Poll::Ready(Some(Err(e))); + }, + }; + + // React to any actual FollowEvent we get back. + let ev = match ev { + FollowStreamMsg::Ready(subscription_id) => { + // update the subscription ID we'll use to unpin things. + this.subscription_id = Some(subscription_id.clone().into()); + + FollowStreamMsg::Ready(subscription_id) + }, + FollowStreamMsg::Event(FollowEvent::Initialized(details)) => { + let mut finalized_block_hashes = + Vec::with_capacity(details.finalized_block_hashes.len()); + + // Pin each of the finalized blocks. None of them will show up again (except as + // a parent block), and so they can all be unpinned immediately at any + // time. Increment the block age for each one, so that older finalized + // blocks are pruned first. + for finalized_block in &details.finalized_block_hashes { + let rel_block_age = this.next_rel_block_age; + let block_ref = + this.pin_unpinnable_block_at(rel_block_age, *finalized_block); + + finalized_block_hashes.push(block_ref); + this.next_rel_block_age += 1; + } + + FollowStreamMsg::Event(FollowEvent::Initialized(Initialized { + finalized_block_hashes, + finalized_block_runtime: details.finalized_block_runtime, + })) + }, + FollowStreamMsg::Event(FollowEvent::NewBlock(details)) => { + // One bigger than our parent, and if no parent seen (maybe it was + // unpinned already), then one bigger than the last finalized block num + // as a best guess. + let parent_rel_block_age = this + .pinned + .get(&details.parent_block_hash) + .map(|p| p.rel_block_age) + .unwrap_or(this.next_rel_block_age.saturating_sub(1)); + + let block_ref = this.pin_block_at(parent_rel_block_age + 1, details.block_hash); + let parent_block_ref = + this.pin_block_at(parent_rel_block_age, details.parent_block_hash); + + FollowStreamMsg::Event(FollowEvent::NewBlock(NewBlock { + block_hash: block_ref, + parent_block_hash: parent_block_ref, + new_runtime: details.new_runtime, + })) + }, + FollowStreamMsg::Event(FollowEvent::BestBlockChanged(details)) => { + // We expect this block to already exist, so it'll keep its existing block_num, + // but worst case it'll just get the current finalized block_num + 1. + let rel_block_age = this.next_rel_block_age; + let block_ref = this.pin_block_at(rel_block_age, details.best_block_hash); + + FollowStreamMsg::Event(FollowEvent::BestBlockChanged(BestBlockChanged { + best_block_hash: block_ref, + })) + }, + FollowStreamMsg::Event(FollowEvent::Finalized(details)) => { + let finalized_block_refs: Vec<_> = details + .finalized_block_hashes + .into_iter() + .enumerate() + .map(|(idx, hash)| { + // These blocks _should_ exist already and so will have a known block + // num, but if they don't, we just increment the num from the + // last finalized block we saw, which should be accurate. + // + // `pin_unpinnable_block_at` indicates that the block will not show up + // in future events (They will show up as a parent block, but we + // don't care about that right now). + let rel_block_age = this.next_rel_block_age + idx; + this.pin_unpinnable_block_at(rel_block_age, hash) + }) + .collect(); + + // Our relative block height is increased by however many finalized + // blocks we've seen. + this.next_rel_block_age += finalized_block_refs.len(); + + let pruned_block_refs: Vec<_> = details + .pruned_block_hashes + .into_iter() + .map(|hash| { + // We should know about these, too, and if not we set their age to + // last_finalized + 1. + // + // `pin_unpinnable_block_at` indicates that the block will not show up + // in future events. + let rel_block_age = this.next_rel_block_age; + this.pin_unpinnable_block_at(rel_block_age, hash) + }) + .collect(); + + // At this point, we also check to see which blocks we should submit unpin + // events for. We will unpin: + // - Any block that's older than the max age. + // - Any block that has no references left (ie has been dropped) that _also_ has + // showed up in the pruned list in a finalized event (so it will never be in + // another event). + this.unpin_blocks(cx.waker()); + + FollowStreamMsg::Event(FollowEvent::Finalized(Finalized { + finalized_block_hashes: finalized_block_refs, + pruned_block_hashes: pruned_block_refs, + })) + }, + FollowStreamMsg::Event(FollowEvent::Stop) => { + // clear out "old" things that are no longer applicable since + // the subscription has ended (a new one will be created under the hood, at + // which point we'll get given a new subscription ID. + this.subscription_id = None; + this.pinned.clear(); + this.unpin_futs.clear(); + this.unpin_flags.lock().unwrap().clear(); + this.next_rel_block_age = 0; + + FollowStreamMsg::Event(FollowEvent::Stop) + }, + // These events aren't interesting; we just forward them on: + FollowStreamMsg::Event(FollowEvent::OperationBodyDone(details)) => + FollowStreamMsg::Event(FollowEvent::OperationBodyDone(details)), + FollowStreamMsg::Event(FollowEvent::OperationCallDone(details)) => + FollowStreamMsg::Event(FollowEvent::OperationCallDone(details)), + FollowStreamMsg::Event(FollowEvent::OperationStorageItems(details)) => + FollowStreamMsg::Event(FollowEvent::OperationStorageItems(details)), + FollowStreamMsg::Event(FollowEvent::OperationWaitingForContinue(details)) => + FollowStreamMsg::Event(FollowEvent::OperationWaitingForContinue(details)), + FollowStreamMsg::Event(FollowEvent::OperationStorageDone(details)) => + FollowStreamMsg::Event(FollowEvent::OperationStorageDone(details)), + FollowStreamMsg::Event(FollowEvent::OperationInaccessible(details)) => + FollowStreamMsg::Event(FollowEvent::OperationInaccessible(details)), + FollowStreamMsg::Event(FollowEvent::OperationError(details)) => + FollowStreamMsg::Event(FollowEvent::OperationError(details)), + }; + + // Return our event. + return Poll::Ready(Some(Ok(ev))); + } + } +} + +impl FollowStreamUnpin { + /// Create a new [`FollowStreamUnpin`]. + pub fn new( + follow_stream: FollowStream, + unpin_method: UnpinMethod, + max_block_life: usize, + ) -> Self { + Self { + inner: follow_stream, + unpin_method: UnpinMethodHolder(unpin_method), + max_block_life, + pinned: Default::default(), + subscription_id: None, + next_rel_block_age: 0, + unpin_flags: Default::default(), + unpin_futs: Default::default(), + } + } + + /// Create a new [`FollowStreamUnpin`] given the RPC methods. + pub fn from_methods( + follow_stream: FollowStream>, + methods: ChainHeadRpcMethods, + max_block_life: usize, + ) -> FollowStreamUnpin> { + let unpin_method = Box::new(move |hash: HashFor, sub_id: Arc| { + let methods = methods.clone(); + let fut: UnpinFut = Box::pin(async move { + // We ignore any errors trying to unpin at the moment. + let _ = methods.chainhead_v1_unpin(&sub_id, hash).await; + }); + fut + }); + + FollowStreamUnpin::new(follow_stream, unpin_method, max_block_life) + } + + /// Is the block hash currently pinned. + pub fn is_pinned(&self, hash: &H) -> bool { + self.pinned.contains_key(hash) + } + + /// Pin a block, or return the reference to an already-pinned block. If the block has been + /// registered to be unpinned, we'll clear those flags, so that it won't be unpinned. If the + /// unpin request has already been sent though, then the block will be unpinned. + fn pin_block_at(&mut self, rel_block_age: usize, hash: H) -> BlockRef { + self.pin_block_at_setting_unpinnable_flag(rel_block_age, hash, false) + } + + /// Pin a block, or return the reference to an already-pinned block. + /// + /// This is the same as [`Self::pin_block_at`], except that it also marks the block as being + /// unpinnable now, which should be done for any block that will no longer be seen in future + /// events. + fn pin_unpinnable_block_at(&mut self, rel_block_age: usize, hash: H) -> BlockRef { + self.pin_block_at_setting_unpinnable_flag(rel_block_age, hash, true) + } + + fn pin_block_at_setting_unpinnable_flag( + &mut self, + rel_block_age: usize, + hash: H, + can_be_unpinned: bool, + ) -> BlockRef { + let entry = self + .pinned + .entry(hash) + // If there's already an entry, then clear any unpin_flags and update the + // can_be_unpinned status (this can become true but cannot become false again + // once true). + .and_modify(|entry| { + entry.can_be_unpinned = entry.can_be_unpinned || can_be_unpinned; + self.unpin_flags.lock().unwrap().remove(&hash); + }) + // If there's not an entry already, make one and return it. + .or_insert_with(|| PinnedDetails { + rel_block_age, + block_ref: BlockRef { + inner: Arc::new(BlockRefInner { hash, unpin_flags: self.unpin_flags.clone() }), + }, + can_be_unpinned, + }); + + entry.block_ref.clone() + } + + /// Unpin any blocks that are either too old, or have the unpin flag set and are old enough. + fn unpin_blocks(&mut self, waker: &Waker) { + let mut unpin_flags = self.unpin_flags.lock().unwrap(); + + // This gets the age of the last finalized block. + let rel_block_age = self.next_rel_block_age.saturating_sub(1); + + // If we asked to unpin and there was no subscription_id, then there's nothing we can do, + // and nothing will need unpinning now anyway. + let Some(sub_id) = &self.subscription_id else { + return; + }; + + let mut blocks_to_unpin = vec![]; + for (hash, details) in &self.pinned { + if rel_block_age.saturating_sub(details.rel_block_age) >= self.max_block_life || + (unpin_flags.contains(hash) && details.can_be_unpinned) + { + // The block is too old, or it's been flagged to be unpinned and won't be in a + // future backend event, so we can unpin it for real now. + blocks_to_unpin.push(*hash); + // Clear it from our unpin flags if present so that we don't try to unpin it again. + unpin_flags.remove(hash); + } + } + + // Release our lock on unpin_flags ASAP. + drop(unpin_flags); + + // No need to call the waker etc if nothing to do: + if blocks_to_unpin.is_empty() { + return; + } + + for hash in blocks_to_unpin { + self.pinned.remove(&hash); + let fut = (self.unpin_method.0)(hash, sub_id.clone()); + self.unpin_futs.push(fut); + } + + // Any new futures pushed above need polling to start. We could + // just wait for the next stream event, but let's wake the task to + // have it polled sooner, just in case it's slow to receive things. + waker.wake_by_ref(); + } +} + +// The set of block hashes that can be unpinned when ready. +// BlockRefs write to this when they are dropped. +type UnpinFlags = Arc>>; + +#[derive(Debug)] +struct PinnedDetails { + /// Relatively speaking, how old is the block? When we start following + /// blocks, the first finalized block gets an age of 0, the second an age + /// of 1 and so on. + rel_block_age: usize, + /// A block ref we can hand out to keep blocks pinned. + /// Because we store one here until it's unpinned, the live count + /// will only drop to 1 when no external refs are left. + block_ref: BlockRef, + /// Has this block showed up in the list of pruned blocks, or has it + /// been finalized? In this case, it can now been pinned as it won't + /// show up again in future events (except as a "parent block" of some + /// new block, which we're currently ignoring). + can_be_unpinned: bool, +} + +/// All blocks reported will be wrapped in this. +#[derive(Debug, Clone)] +pub struct BlockRef { + inner: Arc>, +} + +#[derive(Debug)] +struct BlockRefInner { + hash: H, + unpin_flags: UnpinFlags, +} + +impl BlockRef { + /// For testing purposes only, create a BlockRef from a hash + /// that isn't pinned. + #[cfg(test)] + pub fn new(hash: H) -> Self { + BlockRef { inner: Arc::new(BlockRefInner { hash, unpin_flags: Default::default() }) } + } + + /// Return the hash for this block. + pub fn hash(&self) -> H { + self.inner.hash + } +} + +impl PartialEq for BlockRef { + fn eq(&self, other: &Self) -> bool { + self.inner.hash == other.inner.hash + } +} + +impl PartialEq for BlockRef { + fn eq(&self, other: &H) -> bool { + &self.inner.hash == other + } +} + +impl Drop for BlockRef { + fn drop(&mut self) { + // PinnedDetails keeps one ref, so if this is the second ref, it's the + // only "external" one left and we should ask to unpin it now. if it's + // the only ref remaining, it means that it's already been unpinned, so + // nothing to do here anyway. + if Arc::strong_count(&self.inner) == 2 { + if let Ok(mut unpin_flags) = self.inner.unpin_flags.lock() { + unpin_flags.insert(self.inner.hash); + } + } + } +} + +#[cfg(test)] +pub(super) mod test_utils { + use super::{ + super::follow_stream::{FollowStream, test_utils::test_stream_getter}, + *, + }; + use crate::config::bizinikiwi::H256; + + pub type UnpinRx = std::sync::mpsc::Receiver<(H, Arc)>; + + /// Get a [`FollowStreamUnpin`] from an iterator over events. + pub fn test_unpin_stream_getter( + events: F, + max_life: usize, + ) -> (FollowStreamUnpin, UnpinRx) + where + H: Hash + 'static, + F: Fn() -> I + Send + 'static, + I: IntoIterator, BackendError>>, + { + // Unpin requests will come here so that we can look out for them. + let (unpin_tx, unpin_rx) = std::sync::mpsc::channel(); + + let follow_stream = FollowStream::new(test_stream_getter(events)); + let unpin_method: UnpinMethod = Box::new(move |hash, sub_id| { + unpin_tx.send((hash, sub_id)).unwrap(); + Box::pin(std::future::ready(())) + }); + + let follow_unpin = FollowStreamUnpin::new(follow_stream, unpin_method, max_life); + (follow_unpin, unpin_rx) + } + + /// Assert that the unpinned blocks sent from the `UnpinRx` channel match the items given. + pub fn assert_from_unpin_rx( + unpin_rx: &UnpinRx, + items: impl IntoIterator, + ) { + let expected_hashes = HashSet::::from_iter(items); + for i in 0..expected_hashes.len() { + let Ok((hash, _)) = unpin_rx.try_recv() else { + panic!("Another unpin event is expected, but failed to pull item {i} from channel"); + }; + assert!( + expected_hashes.contains(&hash), + "Hash {hash:?} was unpinned, but is not expected to have been" + ); + } + } + + /// An initialized event containing a BlockRef (useful for comparisons) + pub fn ev_initialized_ref(n: u64) -> FollowEvent> { + FollowEvent::Initialized(Initialized { + finalized_block_hashes: vec![BlockRef::new(H256::from_low_u64_le(n))], + finalized_block_runtime: None, + }) + } + + /// A new block event containing a BlockRef (useful for comparisons) + pub fn ev_new_block_ref(parent: u64, n: u64) -> FollowEvent> { + FollowEvent::NewBlock(NewBlock { + parent_block_hash: BlockRef::new(H256::from_low_u64_le(parent)), + block_hash: BlockRef::new(H256::from_low_u64_le(n)), + new_runtime: None, + }) + } + + /// A best block event containing a BlockRef (useful for comparisons) + pub fn ev_best_block_ref(n: u64) -> FollowEvent> { + FollowEvent::BestBlockChanged(BestBlockChanged { + best_block_hash: BlockRef::new(H256::from_low_u64_le(n)), + }) + } + + /// A finalized event containing a BlockRef (useful for comparisons) + pub fn ev_finalized_ref(ns: impl IntoIterator) -> FollowEvent> { + FollowEvent::Finalized(Finalized { + finalized_block_hashes: ns + .into_iter() + .map(|h| BlockRef::new(H256::from_low_u64_le(h))) + .collect(), + pruned_block_hashes: vec![], + }) + } +} + +#[cfg(test)] +mod test { + use super::{ + super::follow_stream::test_utils::{ + ev_best_block, ev_finalized, ev_initialized, ev_new_block, + }, + test_utils::{assert_from_unpin_rx, ev_new_block_ref, test_unpin_stream_getter}, + *, + }; + use crate::config::bizinikiwi::H256; + + #[tokio::test] + async fn hands_back_blocks() { + let (follow_unpin, _) = test_unpin_stream_getter( + || { + [ + Ok(ev_new_block(0, 1)), + Ok(ev_new_block(1, 2)), + Ok(ev_new_block(2, 3)), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 10, + ); + + let out: Vec<_> = follow_unpin.filter_map(async |e| e.ok()).collect().await; + + assert_eq!( + out, + vec![ + FollowStreamMsg::Ready("sub_id_0".into()), + FollowStreamMsg::Event(ev_new_block_ref(0, 1)), + FollowStreamMsg::Event(ev_new_block_ref(1, 2)), + FollowStreamMsg::Event(ev_new_block_ref(2, 3)), + ] + ); + } + + #[tokio::test] + async fn unpins_initialized_block() { + let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(ev_finalized([1], [])), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 3, + ); + + let _r = follow_unpin.next().await.unwrap().unwrap(); + + // Drop the initialized block: + let i0 = follow_unpin.next().await.unwrap().unwrap(); + drop(i0); + + // Let a finalization event occur. + let _f1 = follow_unpin.next().await.unwrap().unwrap(); + + // Now, initialized block should be unpinned. + assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(0)]); + assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(0))); + } + + #[tokio::test] + async fn unpins_old_blocks() { + let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(ev_finalized([1], [])), + Ok(ev_finalized([2], [])), + Ok(ev_finalized([3], [])), + Ok(ev_finalized([4], [])), + Ok(ev_finalized([5], [])), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 3, + ); + + let _r = follow_unpin.next().await.unwrap().unwrap(); + let _i0 = follow_unpin.next().await.unwrap().unwrap(); + unpin_rx.try_recv().expect_err("nothing unpinned yet"); + let _f1 = follow_unpin.next().await.unwrap().unwrap(); + unpin_rx.try_recv().expect_err("nothing unpinned yet"); + let _f2 = follow_unpin.next().await.unwrap().unwrap(); + unpin_rx.try_recv().expect_err("nothing unpinned yet"); + let _f3 = follow_unpin.next().await.unwrap().unwrap(); + + // Max age is 3, so after block 3 finalized, block 0 becomes too old and is unpinned. + assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(0)]); + + let _f4 = follow_unpin.next().await.unwrap().unwrap(); + + // Block 1 is now too old and is unpinned. + assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(1)]); + + let _f5 = follow_unpin.next().await.unwrap().unwrap(); + + // Block 2 is now too old and is unpinned. + assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(2)]); + } + + #[tokio::test] + async fn dropped_new_blocks_should_not_get_unpinned_until_finalization() { + let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(ev_new_block(0, 1)), + Ok(ev_new_block(1, 2)), + Ok(ev_finalized([1], [])), + Ok(ev_finalized([2], [])), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 10, + ); + + let _r = follow_unpin.next().await.unwrap().unwrap(); + let _i0 = follow_unpin.next().await.unwrap().unwrap(); + + let n1 = follow_unpin.next().await.unwrap().unwrap(); + drop(n1); + let n2 = follow_unpin.next().await.unwrap().unwrap(); + drop(n2); + + // New blocks dropped but still pinned: + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(1))); + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2))); + + let f1 = follow_unpin.next().await.unwrap().unwrap(); + drop(f1); + + // After block 1 finalized, both blocks are still pinned because: + // - block 1 was handed back in the finalized event, so will be unpinned next time. + // - block 2 wasn't mentioned in the finalized event, so should not have been unpinned yet. + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(1))); + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2))); + + let f2 = follow_unpin.next().await.unwrap().unwrap(); + drop(f2); + + // After block 2 finalized, block 1 can be unpinned finally, but block 2 needs to wait one + // more event. + assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(1))); + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2))); + assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(1)]); + } + + #[tokio::test] + async fn dropped_new_blocks_should_not_get_unpinned_until_pruned() { + let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(ev_new_block(0, 1)), + Ok(ev_new_block(1, 2)), + Ok(ev_new_block(1, 3)), + Ok(ev_finalized([1], [])), + Ok(ev_finalized([2], [3])), + Ok(ev_finalized([4], [])), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 10, + ); + + let _r = follow_unpin.next().await.unwrap().unwrap(); + let _i0 = follow_unpin.next().await.unwrap().unwrap(); + + let n1 = follow_unpin.next().await.unwrap().unwrap(); + drop(n1); + let n2 = follow_unpin.next().await.unwrap().unwrap(); + drop(n2); + let n3 = follow_unpin.next().await.unwrap().unwrap(); + drop(n3); + + let f1 = follow_unpin.next().await.unwrap().unwrap(); + drop(f1); + + // After block 1 is finalized, everything is still pinned because the finalization event + // itself returns 1, and 2/3 aren't finalized or pruned yet. + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(1))); + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2))); + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(3))); + + let f2 = follow_unpin.next().await.unwrap().unwrap(); + drop(f2); + + // After the next finalization event, block 1 can finally be unpinned since it was Finalized + // last event _and_ is no longer handed back anywhere. 2 and 3 should still be pinned. + assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(1))); + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2))); + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(3))); + assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(1)]); + + let f4 = follow_unpin.next().await.unwrap().unwrap(); + drop(f4); + + // After some other finalized event, we are now allowed to ditch the previously pruned and + // finalized blocks 2 and 3. + assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(2))); + assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(3))); + assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(2), H256::from_low_u64_le(3)]); + } + + #[tokio::test] + async fn never_unpin_new_block_before_finalized() { + // Ensure that if we drop a new block; the pinning is still active until the block is + // finalized. + let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(ev_new_block(0, 1)), + Ok(ev_new_block(1, 2)), + Ok(ev_best_block(1)), + Ok(ev_finalized([1], [])), + Ok(ev_finalized([2], [])), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 10, + ); + + let _r = follow_unpin.next().await.unwrap().unwrap(); + + // drop initialised block 0 and new block 1 and new block 2. + let i0 = follow_unpin.next().await.unwrap().unwrap(); + drop(i0); + let n1 = follow_unpin.next().await.unwrap().unwrap(); + drop(n1); + let n2 = follow_unpin.next().await.unwrap().unwrap(); + drop(n2); + let b1 = follow_unpin.next().await.unwrap().unwrap(); + drop(b1); + + // Nothing unpinned yet! + unpin_rx.try_recv().expect_err("nothing unpinned yet"); + + let f1 = follow_unpin.next().await.unwrap().unwrap(); + drop(f1); + + // After finalization, block 1 is now ready to be unpinned (it won't be seen again), + // but isn't actually unpinned yet (because it was just handed back in f1). Block 0 + // however has now been unpinned. + assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(0))); + assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(0)]); + unpin_rx.try_recv().expect_err("nothing unpinned yet"); + + let f2 = follow_unpin.next().await.unwrap().unwrap(); + drop(f2); + + // After f2, we can get rid of block 1 now, which was finalized last time. + assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(1))); + assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(1)]); + unpin_rx.try_recv().expect_err("nothing unpinned yet"); + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/backend/chain_head/mod.rs b/vendor/pezkuwi-subxt/subxt/src/backend/chain_head/mod.rs new file mode 100644 index 00000000..83bef67c --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/backend/chain_head/mod.rs @@ -0,0 +1,864 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module will expose a backend implementation based on the new APIs +//! described at . See +//! [`rpc_methods`] for the raw API calls. +//! +//! # Warning +//! +//! Everything in this module is **unstable**, meaning that it could change without +//! warning at any time. + +mod follow_stream; +mod follow_stream_driver; +mod follow_stream_unpin; +mod storage_items; + +use self::follow_stream_driver::FollowStreamFinalizedHeads; +use crate::{ + backend::{ + Backend, BlockRef, BlockRefT, RuntimeVersion, StorageResponse, StreamOf, StreamOfResults, + TransactionStatus, utils::retry, + }, + config::{Config, Hash, HashFor}, + error::{BackendError, RpcError}, +}; +use async_trait::async_trait; +use follow_stream_driver::{FollowStreamDriver, FollowStreamDriverHandle}; +use futures::{Stream, StreamExt, future::Either}; +use pezkuwi_subxt_rpcs::{ + RpcClient, + methods::chain_head::{ + FollowEvent, MethodResponse, RuntimeEvent, StorageQuery, StorageQueryType, + StorageResultType, + }, +}; +use std::{collections::HashMap, task::Poll}; +use storage_items::StorageItems; + +/// Re-export RPC types and methods from [`pezkuwi_subxt_rpcs::methods::chain_head`]. +pub mod rpc_methods { + pub use pezkuwi_subxt_rpcs::methods::legacy::*; +} + +// Expose the RPC methods. +pub use pezkuwi_subxt_rpcs::methods::chain_head::ChainHeadRpcMethods; + +/// Configure and build an [`ChainHeadBackend`]. +pub struct ChainHeadBackendBuilder { + max_block_life: usize, + transaction_timeout_secs: usize, + submit_transactions_ignoring_follow_events: bool, + _marker: std::marker::PhantomData, +} + +impl Default for ChainHeadBackendBuilder { + fn default() -> Self { + Self::new() + } +} + +impl ChainHeadBackendBuilder { + /// Create a new [`ChainHeadBackendBuilder`]. + pub fn new() -> Self { + Self { + max_block_life: usize::MAX, + transaction_timeout_secs: 240, + submit_transactions_ignoring_follow_events: false, + _marker: std::marker::PhantomData, + } + } + + /// The age of a block is defined here as the difference between the current finalized block + /// number and the block number of a given block. Once the difference equals or exceeds the + /// number given here, the block is unpinned. + /// + /// By default, we will never automatically unpin blocks, but if the number of pinned blocks + /// that we keep hold of exceeds the number that the server can tolerate, then a `stop` event + /// is generated and we are forced to resubscribe, losing any pinned blocks. + pub fn max_block_life(mut self, max_block_life: usize) -> Self { + self.max_block_life = max_block_life; + self + } + + /// When a transaction is submitted, we wait for events indicating it's successfully made it + /// into a finalized block. If it takes too long for this to happen, we assume that something + /// went wrong and that we should give up waiting. + /// + /// Provide a value here to denote how long, in seconds, to wait before giving up. Defaults to + /// 240 seconds. + /// + /// If [`Self::submit_transactions_ignoring_follow_events()`] is called, this timeout is + /// ignored. + pub fn transaction_timeout(mut self, timeout_secs: usize) -> Self { + self.transaction_timeout_secs = timeout_secs; + self + } + + /// When a transaction is submitted, we normally synchronize the events that we get back with + /// events from our background `chainHead_follow` subscription, to ensure that any blocks + /// hashes that we see can be immediately queried (for example to get events or state at that + /// block), and are kept around unless they are no longer needed. + /// + /// The main downside of this synchronization is that there may be a delay in being handed back + /// a [`TransactionStatus::InFinalizedBlock`] event while we wait to see the same block hash + /// emitted from our background `chainHead_follow` subscription in order to ensure it's + /// available for querying. + /// + /// Calling this method turns off this synchronization, speeding up the response and removing + /// any reliance on the `chainHead_follow` subscription continuing to run without stopping + /// throughout submitting a transaction. + /// + /// # Warning + /// + /// This can lead to errors when calling APIs like `wait_for_finalized_success`, which will try + /// to retrieve events at the finalized block, because there will be a race and the finalized + /// block may not be available for querying yet. + pub fn submit_transactions_ignoring_follow_events(mut self) -> Self { + self.submit_transactions_ignoring_follow_events = true; + self + } + + /// A low-level API to build the backend and driver which requires polling the driver for the + /// backend to make progress. + /// + /// This is useful if you want to manage the driver yourself, for example if you want to run it + /// in on a specific runtime. + /// + /// If you just want to run the driver in the background until completion in on the default + /// runtime, use [`ChainHeadBackendBuilder::build_with_background_driver`] instead. + pub fn build( + self, + client: impl Into, + ) -> (ChainHeadBackend, ChainHeadBackendDriver) { + // Construct the underlying follow_stream layers: + let rpc_methods = ChainHeadRpcMethods::new(client.into()); + let follow_stream = + follow_stream::FollowStream::>::from_methods(rpc_methods.clone()); + let follow_stream_unpin = + follow_stream_unpin::FollowStreamUnpin::>::from_methods( + follow_stream, + rpc_methods.clone(), + self.max_block_life, + ); + let follow_stream_driver = FollowStreamDriver::new(follow_stream_unpin); + + // Wrap these into the backend and driver that we'll expose. + let backend = ChainHeadBackend { + methods: rpc_methods, + follow_handle: follow_stream_driver.handle(), + transaction_timeout_secs: self.transaction_timeout_secs, + submit_transactions_ignoring_follow_events: self + .submit_transactions_ignoring_follow_events, + }; + let driver = ChainHeadBackendDriver { driver: follow_stream_driver }; + + (backend, driver) + } + + /// An API to build the backend and driver which will run in the background until completion + /// on the default runtime. + /// + /// - On non-wasm targets, this will spawn the driver on `tokio`. + /// - On wasm targets, this will spawn the driver on `wasm-bindgen-futures`. + #[cfg(feature = "runtime")] + #[cfg_attr(docsrs, doc(cfg(feature = "runtime")))] + pub fn build_with_background_driver(self, client: impl Into) -> ChainHeadBackend { + fn spawn(future: F) { + #[cfg(not(target_family = "wasm"))] + tokio::spawn(async move { + future.await; + }); + #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] + wasm_bindgen_futures::spawn_local(async move { + future.await; + }); + } + + let (backend, mut driver) = self.build(client); + spawn(async move { + // NOTE: we need to poll the driver until it's done i.e returns None + // to ensure that the backend is shutdown properly. + while let Some(res) = driver.next().await { + if let Err(err) = res { + tracing::debug!(target: "subxt", "chainHead backend error={err}"); + } + } + + tracing::debug!(target: "subxt", "chainHead backend was closed"); + }); + + backend + } +} + +/// Driver for the [`ChainHeadBackend`]. This must be polled in order for the +/// backend to make progress. +#[derive(Debug)] +pub struct ChainHeadBackendDriver { + driver: FollowStreamDriver>, +} + +impl Stream for ChainHeadBackendDriver { + type Item = > as Stream>::Item; + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.driver.poll_next_unpin(cx) + } +} + +/// The chainHead backend. +#[derive(Debug, Clone)] +pub struct ChainHeadBackend { + // RPC methods we'll want to call: + methods: ChainHeadRpcMethods, + // A handle to the chainHead_follow subscription: + follow_handle: FollowStreamDriverHandle>, + // How long to wait until giving up on transactions: + transaction_timeout_secs: usize, + // Don't synchronise blocks with chainHead_follow when submitting txs: + submit_transactions_ignoring_follow_events: bool, +} + +impl ChainHeadBackend { + /// Configure and construct an [`ChainHeadBackend`] and the associated + /// [`ChainHeadBackendDriver`]. + pub fn builder() -> ChainHeadBackendBuilder { + ChainHeadBackendBuilder::new() + } + + /// Stream block headers based on the provided filter fn + async fn stream_headers( + &self, + f: F, + ) -> Result>)>, BackendError> + where + F: Fn( + FollowEvent>>, + ) -> Vec>> + + Send + + Sync + + 'static, + { + let methods = self.methods.clone(); + + let headers = + FollowStreamFinalizedHeads::new(self.follow_handle.subscribe(), f).flat_map(move |r| { + let methods = methods.clone(); + + let (sub_id, block_refs) = match r { + Ok(ev) => ev, + Err(e) => return Either::Left(futures::stream::once(async { Err(e) })), + }; + + Either::Right(futures::stream::iter(block_refs).filter_map(move |block_ref| { + let methods = methods.clone(); + let sub_id = sub_id.clone(); + + async move { + let res = methods + .chainhead_v1_header(&sub_id, block_ref.hash()) + .await + .transpose()?; + + let header = match res { + Ok(header) => header, + Err(e) => return Some(Err(e.into())), + }; + + Some(Ok((header, block_ref.into()))) + } + })) + }); + + Ok(StreamOf(Box::pin(headers))) + } +} + +impl BlockRefT for follow_stream_unpin::BlockRef {} +impl From> for BlockRef { + fn from(b: follow_stream_unpin::BlockRef) -> Self { + BlockRef::new(b.hash(), b) + } +} + +impl super::sealed::Sealed for ChainHeadBackend {} + +#[async_trait] +impl Backend for ChainHeadBackend { + async fn storage_fetch_values( + &self, + keys: Vec>, + at: HashFor, + ) -> Result, BackendError> { + retry(|| async { + let queries = keys + .iter() + .map(|key| StorageQuery { key: &**key, query_type: StorageQueryType::Value }); + + let storage_items = + StorageItems::from_methods(queries, at, &self.follow_handle, self.methods.clone()) + .await?; + + let stream = storage_items.filter_map(async |val| { + let val = match val { + Ok(val) => val, + Err(e) => return Some(Err(e)), + }; + + let StorageResultType::Value(result) = val.result else { + return None; + }; + Some(Ok(StorageResponse { key: val.key.0, value: result.0 })) + }); + + Ok(StreamOf(Box::pin(stream))) + }) + .await + } + + async fn storage_fetch_descendant_keys( + &self, + key: Vec, + at: HashFor, + ) -> Result>, BackendError> { + retry(|| async { + // Ask for hashes, and then just ignore them and return the keys that come back. + let query = + StorageQuery { key: &*key, query_type: StorageQueryType::DescendantsHashes }; + + let storage_items = StorageItems::from_methods( + std::iter::once(query), + at, + &self.follow_handle, + self.methods.clone(), + ) + .await?; + + let storage_result_stream = storage_items.map(|val| val.map(|v| v.key.0)); + Ok(StreamOf(Box::pin(storage_result_stream))) + }) + .await + } + + async fn storage_fetch_descendant_values( + &self, + key: Vec, + at: HashFor, + ) -> Result, BackendError> { + retry(|| async { + let query = + StorageQuery { key: &*key, query_type: StorageQueryType::DescendantsValues }; + + let storage_items = StorageItems::from_methods( + std::iter::once(query), + at, + &self.follow_handle, + self.methods.clone(), + ) + .await?; + + let storage_result_stream = storage_items.filter_map(async |val| { + let val = match val { + Ok(val) => val, + Err(e) => return Some(Err(e)), + }; + + let StorageResultType::Value(result) = val.result else { + return None; + }; + Some(Ok(StorageResponse { key: val.key.0, value: result.0 })) + }); + + Ok(StreamOf(Box::pin(storage_result_stream))) + }) + .await + } + + async fn genesis_hash(&self) -> Result, BackendError> { + retry(|| async { + let genesis_hash = self.methods.chainspec_v1_genesis_hash().await?; + Ok(genesis_hash) + }) + .await + } + + async fn block_header(&self, at: HashFor) -> Result, BackendError> { + retry(|| async { + let sub_id = get_subscription_id(&self.follow_handle).await?; + let header = self.methods.chainhead_v1_header(&sub_id, at).await?; + Ok(header) + }) + .await + } + + async fn block_body(&self, at: HashFor) -> Result>>, BackendError> { + retry(|| async { + let sub_id = get_subscription_id(&self.follow_handle).await?; + + // Subscribe to the body response and get our operationId back. + let follow_events = self.follow_handle.subscribe().events(); + let status = self.methods.chainhead_v1_body(&sub_id, at).await?; + let operation_id = match status { + MethodResponse::LimitReached => return Err(RpcError::LimitReached.into()), + MethodResponse::Started(s) => s.operation_id, + }; + + // Wait for the response to come back with the correct operationId. + let mut exts_stream = follow_events.filter_map(|ev| { + let FollowEvent::OperationBodyDone(body) = ev else { + return std::future::ready(None); + }; + if body.operation_id != operation_id { + return std::future::ready(None); + } + let exts: Vec<_> = body.value.into_iter().map(|ext| ext.0).collect(); + std::future::ready(Some(exts)) + }); + + Ok(exts_stream.next().await) + }) + .await + } + + async fn latest_finalized_block_ref(&self) -> Result>, BackendError> { + let next_ref: Option>> = self + .follow_handle + .subscribe() + .events() + .filter_map(|ev| { + let out = match ev { + FollowEvent::Initialized(init) => + init.finalized_block_hashes.last().map(|b| b.clone().into()), + _ => None, + }; + std::future::ready(out) + }) + .next() + .await; + + next_ref.ok_or_else(|| RpcError::SubscriptionDropped.into()) + } + + async fn current_runtime_version(&self) -> Result { + // Just start a stream of version infos, and return the first value we get from it. + let runtime_version = self.stream_runtime_version().await?.next().await; + match runtime_version { + None => Err(BackendError::Rpc(RpcError::SubscriptionDropped)), + Some(Err(e)) => Err(e), + Some(Ok(version)) => Ok(version), + } + } + + async fn stream_runtime_version( + &self, + ) -> Result, BackendError> { + // Keep track of runtime details announced in new blocks, and then when blocks + // are finalized, find the latest of these that has runtime details, and clear the rest. + let mut runtimes = HashMap::new(); + let runtime_stream = self + .follow_handle + .subscribe() + .events() + .filter_map(move |ev| { + let output = match ev { + FollowEvent::Initialized(ev) => { + for finalized_block in ev.finalized_block_hashes { + runtimes.remove(&finalized_block.hash()); + } + ev.finalized_block_runtime + } + FollowEvent::NewBlock(ev) => { + if let Some(runtime) = ev.new_runtime { + runtimes.insert(ev.block_hash.hash(), runtime); + } + None + } + FollowEvent::Finalized(ev) => { + let next_runtime = { + let mut it = ev + .finalized_block_hashes + .iter() + .rev() + .filter_map(|h| runtimes.get(&h.hash()).cloned()) + .peekable(); + + let next = it.next(); + + if it.peek().is_some() { + tracing::warn!( + target: "subxt", + "Several runtime upgrades in the finalized blocks but only the latest runtime upgrade is returned" + ); + } + + next + }; + + // Remove finalized and pruned blocks as valid runtime upgrades. + for block in ev + .finalized_block_hashes + .iter() + .chain(ev.pruned_block_hashes.iter()) + { + runtimes.remove(&block.hash()); + } + + next_runtime + } + _ => None, + }; + + let runtime_event = match output { + None => return std::future::ready(None), + Some(ev) => ev, + }; + + let runtime_details = match runtime_event { + RuntimeEvent::Invalid(err) => { + return std::future::ready(Some(Err(BackendError::Other(format!("Invalid runtime error using chainHead RPCs: {}", err.error))))) + } + RuntimeEvent::Valid(ev) => ev, + }; + + let runtime_version = RuntimeVersion { + spec_version: runtime_details.spec.spec_version, + transaction_version: runtime_details.spec.transaction_version + }; + std::future::ready(Some(Ok(runtime_version))) + }); + + Ok(StreamOf::new(Box::pin(runtime_stream))) + } + + async fn stream_all_block_headers( + &self, + _hasher: T::Hasher, + ) -> Result>)>, BackendError> { + // TODO: https://github.com/pezkuwichain/subxt/issues/1568 + // + // It's possible that blocks may be silently missed if + // a reconnection occurs because it's restarted by the unstable backend. + self.stream_headers(|ev| match ev { + FollowEvent::Initialized(init) => init.finalized_block_hashes, + FollowEvent::NewBlock(ev) => { + vec![ev.block_hash] + }, + _ => vec![], + }) + .await + } + + async fn stream_best_block_headers( + &self, + _hasher: T::Hasher, + ) -> Result>)>, BackendError> { + // TODO: https://github.com/pezkuwichain/subxt/issues/1568 + // + // It's possible that blocks may be silently missed if + // a reconnection occurs because it's restarted by the unstable backend. + self.stream_headers(|ev| match ev { + FollowEvent::Initialized(init) => init.finalized_block_hashes, + FollowEvent::BestBlockChanged(ev) => vec![ev.best_block_hash], + _ => vec![], + }) + .await + } + + async fn stream_finalized_block_headers( + &self, + _hasher: T::Hasher, + ) -> Result>)>, BackendError> { + self.stream_headers(|ev| match ev { + FollowEvent::Initialized(init) => init.finalized_block_hashes, + FollowEvent::Finalized(ev) => ev.finalized_block_hashes, + _ => vec![], + }) + .await + } + + async fn submit_transaction( + &self, + extrinsic: &[u8], + ) -> Result>>, BackendError> { + // Submit a transaction. This makes no attempt to sync with follow events, + async fn submit_transaction_ignoring_follow_events( + extrinsic: &[u8], + methods: &ChainHeadRpcMethods, + ) -> Result>>, BackendError> { + let tx_progress = + methods.transactionwatch_v1_submit_and_watch(extrinsic).await?.map(|ev| { + ev.map(|tx_status| { + use pezkuwi_subxt_rpcs::methods::chain_head::TransactionStatus as RpcTransactionStatus; + match tx_status { + RpcTransactionStatus::Validated => TransactionStatus::Validated, + RpcTransactionStatus::Broadcasted => TransactionStatus::Broadcasted, + RpcTransactionStatus::BestChainBlockIncluded { block: None } => + TransactionStatus::NoLongerInBestBlock, + RpcTransactionStatus::BestChainBlockIncluded { block: Some(block) } => + TransactionStatus::InBestBlock { + hash: BlockRef::from_hash(block.hash), + }, + RpcTransactionStatus::Finalized { block } => + TransactionStatus::InFinalizedBlock { + hash: BlockRef::from_hash(block.hash), + }, + RpcTransactionStatus::Error { error } => + TransactionStatus::Error { message: error }, + RpcTransactionStatus::Invalid { error } => + TransactionStatus::Invalid { message: error }, + RpcTransactionStatus::Dropped { error } => + TransactionStatus::Dropped { message: error }, + } + }) + .map_err(Into::into) + }); + + Ok(StreamOf(Box::pin(tx_progress))) + } + + // Submit a transaction. This synchronizes with chainHead_follow events to ensure + // that block hashes returned are ready to be queried. + async fn submit_transaction_tracking_follow_events( + extrinsic: &[u8], + transaction_timeout_secs: u64, + methods: &ChainHeadRpcMethods, + follow_handle: &FollowStreamDriverHandle>, + ) -> Result>>, BackendError> { + // We care about new and finalized block hashes. + enum SeenBlockMarker { + New, + Finalized, + } + + // First, subscribe to new blocks. + let mut seen_blocks_sub = follow_handle.subscribe().events(); + + // Then, submit the transaction. + let mut tx_progress = methods.transactionwatch_v1_submit_and_watch(extrinsic).await?; + + let mut seen_blocks = HashMap::new(); + let mut done = false; + + // If we see the finalized event, we start waiting until we find a finalized block that + // matches, so we can guarantee to return a pinned block hash and be properly in sync + // with chainHead_follow. + let mut finalized_hash: Option> = None; + + // Record the start time so that we can time out if things appear to take too long. + let start_instant = web_time::Instant::now(); + + // A quick helper to return a generic error. + let err_other = |s: &str| Some(Err(BackendError::Other(s.into()))); + + // Now we can attempt to associate tx events with pinned blocks. + let tx_stream = futures::stream::poll_fn(move |cx| { + loop { + // Bail early if we're finished; nothing else to do. + if done { + return Poll::Ready(None); + } + + // Bail if we exceed 4 mins; something very likely went wrong. + if start_instant.elapsed().as_secs() > transaction_timeout_secs { + return Poll::Ready(err_other( + "Timeout waiting for the transaction to be finalized", + )); + } + + // Poll for a follow event, and error if the stream has unexpectedly ended. + let follow_ev_poll = match seen_blocks_sub.poll_next_unpin(cx) { + Poll::Ready(None) => { + return Poll::Ready(err_other( + "chainHead_follow stream ended unexpectedly", + )); + }, + Poll::Ready(Some(follow_ev)) => Poll::Ready(follow_ev), + Poll::Pending => Poll::Pending, + }; + let follow_ev_is_pending = follow_ev_poll.is_pending(); + + // If there was a follow event, then handle it and loop around to see if there + // are more. We want to buffer follow events until we hit Pending, so that + // we are as up-to-date as possible for when we see a BestBlockChanged + // event, so that we have the best change of already having seen the block + // that it mentions and returning a proper pinned block. + if let Poll::Ready(follow_ev) = follow_ev_poll { + match follow_ev { + FollowEvent::NewBlock(ev) => { + // Optimization: once we have a `finalized_hash`, we only care about + // finalized block refs now and can avoid bothering to save + // new blocks. + if finalized_hash.is_none() { + seen_blocks.insert( + ev.block_hash.hash(), + (SeenBlockMarker::New, ev.block_hash), + ); + } + }, + FollowEvent::Finalized(ev) => { + for block_ref in ev.finalized_block_hashes { + seen_blocks.insert( + block_ref.hash(), + (SeenBlockMarker::Finalized, block_ref), + ); + } + }, + FollowEvent::Stop => { + // If we get this event, we'll lose all of our existing pinned + // blocks and have a gap in which we may lose the finalized + // block that the TX is in. For now, just error if + // this happens, to prevent the case in which we never see a + // finalized block and wait forever. + return Poll::Ready(err_other( + "chainHead_follow emitted 'stop' event during transaction submission", + )); + }, + _ => {}, + } + continue; + } + + // If we have a finalized hash, we are done looking for tx events and we are + // just waiting for a pinned block with a matching hash (which must appear + // eventually given it's finalized). + if let Some(hash) = &finalized_hash { + if let Some((SeenBlockMarker::Finalized, block_ref)) = + seen_blocks.remove(hash) + { + // Found it! Hand back the event with a pinned block. We're done. + done = true; + let ev = TransactionStatus::InFinalizedBlock { hash: block_ref.into() }; + return Poll::Ready(Some(Ok(ev))); + } else { + // Not found it! If follow ev is pending, then return pending here and + // wait for a new one to come in, else loop around and see if we + // get another one immediately. + seen_blocks.clear(); + if follow_ev_is_pending { + return Poll::Pending; + } else { + continue; + } + } + } + + // If we don't have a finalized block yet, we keep polling for tx progress + // events. + let tx_progress_ev = match tx_progress.poll_next_unpin(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(None) => { + return Poll::Ready(err_other( + "No more transaction progress events, but we haven't seen a Finalized one yet", + )); + }, + Poll::Ready(Some(Err(e))) => return Poll::Ready(Some(Err(e.into()))), + Poll::Ready(Some(Ok(ev))) => ev, + }; + + // When we get one, map it to the correct format (or for finalized ev, wait for + // the pinned block): + use pezkuwi_subxt_rpcs::methods::chain_head::TransactionStatus as RpcTransactionStatus; + let tx_progress_ev = match tx_progress_ev { + RpcTransactionStatus::Finalized { block } => { + // We'll wait until we have seen this hash, to try to guarantee + // that when we return this event, the corresponding block is + // pinned and accessible. + finalized_hash = Some(block.hash); + continue; + }, + RpcTransactionStatus::BestChainBlockIncluded { block: Some(block) } => { + // Look up a pinned block ref if we can, else return a non-pinned + // block that likely isn't accessible. We have no guarantee that a best + // block on the node a tx was sent to will ever be known about on the + // chainHead_follow subscription. + let block_ref = match seen_blocks.get(&block.hash) { + Some((_, block_ref)) => block_ref.clone().into(), + None => BlockRef::from_hash(block.hash), + }; + TransactionStatus::InBestBlock { hash: block_ref } + }, + RpcTransactionStatus::BestChainBlockIncluded { block: None } => + TransactionStatus::NoLongerInBestBlock, + RpcTransactionStatus::Broadcasted => TransactionStatus::Broadcasted, + RpcTransactionStatus::Dropped { error, .. } => + TransactionStatus::Dropped { message: error }, + RpcTransactionStatus::Error { error } => + TransactionStatus::Error { message: error }, + RpcTransactionStatus::Invalid { error } => + TransactionStatus::Invalid { message: error }, + RpcTransactionStatus::Validated => TransactionStatus::Validated, + }; + return Poll::Ready(Some(Ok(tx_progress_ev))); + } + }); + + Ok(StreamOf(Box::pin(tx_stream))) + } + + if self.submit_transactions_ignoring_follow_events { + submit_transaction_ignoring_follow_events(extrinsic, &self.methods).await + } else { + submit_transaction_tracking_follow_events::( + extrinsic, + self.transaction_timeout_secs as u64, + &self.methods, + &self.follow_handle, + ) + .await + } + } + + async fn call( + &self, + method: &str, + call_parameters: Option<&[u8]>, + at: HashFor, + ) -> Result, BackendError> { + retry(|| async { + let sub_id = get_subscription_id(&self.follow_handle).await?; + + // Subscribe to the body response and get our operationId back. + let follow_events = self.follow_handle.subscribe().events(); + let call_parameters = call_parameters.unwrap_or(&[]); + let status = + self.methods.chainhead_v1_call(&sub_id, at, method, call_parameters).await?; + let operation_id = match status { + MethodResponse::LimitReached => return Err(RpcError::LimitReached.into()), + MethodResponse::Started(s) => s.operation_id, + }; + + // Wait for the response to come back with the correct operationId. + let mut call_data_stream = follow_events.filter_map(|ev| { + let FollowEvent::OperationCallDone(body) = ev else { + return std::future::ready(None); + }; + if body.operation_id != operation_id { + return std::future::ready(None); + } + std::future::ready(Some(body.output.0)) + }); + + call_data_stream + .next() + .await + .ok_or_else(|| RpcError::SubscriptionDropped.into()) + }) + .await + } +} + +/// A helper to obtain a subscription ID. +async fn get_subscription_id( + follow_handle: &FollowStreamDriverHandle, +) -> Result { + let Some(sub_id) = follow_handle.subscribe().subscription_id().await else { + return Err(RpcError::SubscriptionDropped.into()); + }; + + Ok(sub_id) +} diff --git a/vendor/pezkuwi-subxt/subxt/src/backend/chain_head/storage_items.rs b/vendor/pezkuwi-subxt/subxt/src/backend/chain_head/storage_items.rs new file mode 100644 index 00000000..79c7565f --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/backend/chain_head/storage_items.rs @@ -0,0 +1,165 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::{follow_stream_driver::FollowStreamDriverHandle, follow_stream_unpin::BlockRef}; +use crate::{ + config::{Config, HashFor}, + error::{BackendError, RpcError}, +}; +use futures::{FutureExt, Stream, StreamExt}; +use pezkuwi_subxt_rpcs::methods::chain_head::{ + ChainHeadRpcMethods, FollowEvent, MethodResponse, StorageQuery, StorageResult, +}; +use std::{ + collections::VecDeque, + future::Future, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; + +/// Obtain a stream of storage items given some query. this handles continuing +/// and stopping under the hood, and returns a stream of `StorageResult`s. +pub struct StorageItems { + done: bool, + operation_id: Arc, + buffered_responses: VecDeque, + continue_call: ContinueFutGetter, + continue_fut: Option, + follow_event_stream: FollowEventStream>, +} + +impl StorageItems { + // Subscribe to follow events, and return a stream of storage results + // given some storage queries. The stream will automatically resume as + // needed, and stop when done. + pub async fn from_methods( + queries: impl Iterator>, + at: HashFor, + follow_handle: &FollowStreamDriverHandle>, + methods: ChainHeadRpcMethods, + ) -> Result { + let sub_id = super::get_subscription_id(follow_handle).await?; + + // Subscribe to events and make the initial request to get an operation ID. + let follow_events = follow_handle.subscribe().events(); + let status = methods.chainhead_v1_storage(&sub_id, at, queries, None).await?; + let operation_id: Arc = match status { + MethodResponse::LimitReached => return Err(RpcError::LimitReached.into()), + MethodResponse::Started(s) => s.operation_id.into(), + }; + + // A function which returns the call to continue the subscription: + let continue_call: ContinueFutGetter = { + let operation_id = operation_id.clone(); + Box::new(move || { + let sub_id = sub_id.clone(); + let operation_id = operation_id.clone(); + let methods = methods.clone(); + + Box::pin(async move { + methods.chainhead_v1_continue(&sub_id, &operation_id).await?; + Ok(()) + }) + }) + }; + + Ok(StorageItems::new(operation_id, continue_call, Box::pin(follow_events))) + } + + fn new( + operation_id: Arc, + continue_call: ContinueFutGetter, + follow_event_stream: FollowEventStream>, + ) -> Self { + Self { + done: false, + buffered_responses: VecDeque::new(), + operation_id, + continue_call, + continue_fut: None, + follow_event_stream, + } + } +} + +pub type FollowEventStream = + Pin>> + Send + 'static>>; +pub type ContinueFutGetter = Box ContinueFut + Send + 'static>; +pub type ContinueFut = Pin> + Send + 'static>>; + +impl Stream for StorageItems { + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + if self.done { + return Poll::Ready(None); + } + + if let Some(item) = self.buffered_responses.pop_front() { + return Poll::Ready(Some(Ok(item))); + } + + if let Some(mut fut) = self.continue_fut.take() { + match fut.poll_unpin(cx) { + Poll::Pending => { + self.continue_fut = Some(fut); + return Poll::Pending; + }, + Poll::Ready(Err(e)) => { + if e.is_disconnected_will_reconnect() { + self.continue_fut = Some((self.continue_call)()); + continue; + } + + self.done = true; + return Poll::Ready(Some(Err(e))); + }, + Poll::Ready(Ok(())) => { + // Finished; carry on. + }, + } + } + + let ev = match self.follow_event_stream.poll_next_unpin(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(None) => return Poll::Ready(None), + Poll::Ready(Some(ev)) => ev, + }; + + match ev { + FollowEvent::OperationWaitingForContinue(id) + if id.operation_id == *self.operation_id => + { + // Start a call to ask for more events + self.continue_fut = Some((self.continue_call)()); + continue; + }, + FollowEvent::OperationStorageDone(id) if id.operation_id == *self.operation_id => { + // We're finished! + self.done = true; + return Poll::Ready(None); + }, + FollowEvent::OperationStorageItems(items) + if items.operation_id == *self.operation_id => + { + // We have items; buffer them to emit next loops. + self.buffered_responses = items.items; + continue; + }, + FollowEvent::OperationError(err) if err.operation_id == *self.operation_id => { + // Something went wrong obtaining storage items; mark as done and return the + // error. + self.done = true; + return Poll::Ready(Some(Err(BackendError::Other(err.error)))); + }, + _ => { + // We don't care about this event; wait for the next. + continue; + }, + } + } + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/backend/legacy.rs b/vendor/pezkuwi-subxt/subxt/src/backend/legacy.rs new file mode 100644 index 00000000..62aa1766 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/backend/legacy.rs @@ -0,0 +1,645 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module exposes a legacy backend implementation, which relies +//! on the legacy RPC API methods. + +use self::rpc_methods::TransactionStatus as RpcTransactionStatus; +use crate::{ + backend::{ + Backend, BlockRef, RuntimeVersion, StorageResponse, StreamOf, StreamOfResults, + TransactionStatus, + utils::{retry, retry_stream}, + }, + config::{Config, HashFor, Header}, + error::BackendError, +}; +use async_trait::async_trait; +use futures::{Future, FutureExt, Stream, StreamExt, TryStreamExt, future, future::Either, stream}; +use pezkuwi_subxt_rpcs::RpcClient; +use std::{ + collections::VecDeque, + pin::Pin, + task::{Context, Poll}, +}; + +/// Re-export legacy RPC types and methods from [`pezkuwi_subxt_rpcs::methods::legacy`]. +pub mod rpc_methods { + pub use pezkuwi_subxt_rpcs::methods::legacy::*; +} + +// Expose the RPC methods. +pub use rpc_methods::LegacyRpcMethods; + +/// Configure and build an [`LegacyBackend`]. +pub struct LegacyBackendBuilder { + storage_page_size: u32, + _marker: std::marker::PhantomData, +} + +impl Default for LegacyBackendBuilder { + fn default() -> Self { + Self::new() + } +} + +impl LegacyBackendBuilder { + /// Create a new [`LegacyBackendBuilder`]. + pub fn new() -> Self { + Self { storage_page_size: 64, _marker: std::marker::PhantomData } + } + + /// Iterating over storage entries using the [`LegacyBackend`] requires + /// fetching entries in batches. This configures the number of entries that + /// we'll try to obtain in each batch (default: 64). + pub fn storage_page_size(mut self, storage_page_size: u32) -> Self { + self.storage_page_size = storage_page_size; + self + } + + /// Given an [`RpcClient`] to use to make requests, this returns a [`LegacyBackend`], + /// which implements the [`Backend`] trait. + pub fn build(self, client: impl Into) -> LegacyBackend { + LegacyBackend { + storage_page_size: self.storage_page_size, + methods: LegacyRpcMethods::new(client.into()), + } + } +} + +/// The legacy backend. +#[derive(Debug)] +pub struct LegacyBackend { + storage_page_size: u32, + methods: LegacyRpcMethods, +} + +impl Clone for LegacyBackend { + fn clone(&self) -> LegacyBackend { + LegacyBackend { storage_page_size: self.storage_page_size, methods: self.methods.clone() } + } +} + +impl LegacyBackend { + /// Configure and construct an [`LegacyBackend`]. + pub fn builder() -> LegacyBackendBuilder { + LegacyBackendBuilder::new() + } +} + +impl super::sealed::Sealed for LegacyBackend {} + +#[async_trait] +impl Backend for LegacyBackend { + async fn storage_fetch_values( + &self, + keys: Vec>, + at: HashFor, + ) -> Result, BackendError> { + fn get_entry( + key: Vec, + at: HashFor, + methods: LegacyRpcMethods, + ) -> impl Future, BackendError>> { + retry(move || { + let methods = methods.clone(); + let key = key.clone(); + async move { + let res = methods.state_get_storage(&key, Some(at)).await?; + Ok(res.map(move |value| StorageResponse { key, value })) + } + }) + } + + let keys = keys.clone(); + let methods = self.methods.clone(); + + // For each key, return it + a future to get the result. + let iter = keys.into_iter().map(move |key| get_entry(key, at, methods.clone())); + + let s = stream::iter(iter) + // Resolve the future + .then(|fut| fut) + // Filter any Options out (ie if we didn't find a value at some key we return nothing + // for it). + .filter_map(|r| future::ready(r.transpose())); + + Ok(StreamOf(Box::pin(s))) + } + + async fn storage_fetch_descendant_keys( + &self, + key: Vec, + at: HashFor, + ) -> Result>, BackendError> { + let keys = StorageFetchDescendantKeysStream { + at, + key, + storage_page_size: self.storage_page_size, + methods: self.methods.clone(), + done: Default::default(), + keys_fut: Default::default(), + pagination_start_key: None, + }; + + let keys = keys.flat_map(|keys| { + match keys { + Err(e) => { + // If there's an error, return that next: + Either::Left(stream::iter(std::iter::once(Err(e)))) + }, + Ok(keys) => { + // Or, stream each "ok" value: + Either::Right(stream::iter(keys.into_iter().map(Ok))) + }, + } + }); + + Ok(StreamOf(Box::pin(keys))) + } + + async fn storage_fetch_descendant_values( + &self, + key: Vec, + at: HashFor, + ) -> Result, BackendError> { + let keys_stream = StorageFetchDescendantKeysStream { + at, + key, + storage_page_size: self.storage_page_size, + methods: self.methods.clone(), + done: Default::default(), + keys_fut: Default::default(), + pagination_start_key: None, + }; + + Ok(StreamOf(Box::pin(StorageFetchDescendantValuesStream { + keys: keys_stream, + results_fut: None, + results: Default::default(), + }))) + } + + async fn genesis_hash(&self) -> Result, BackendError> { + retry(|| async { + let hash = self.methods.genesis_hash().await?; + Ok(hash) + }) + .await + } + + async fn block_header(&self, at: HashFor) -> Result, BackendError> { + retry(|| async { + let header = self.methods.chain_get_header(Some(at)).await?; + Ok(header) + }) + .await + } + + async fn block_body(&self, at: HashFor) -> Result>>, BackendError> { + retry(|| async { + let Some(details) = self.methods.chain_get_block(Some(at)).await? else { + return Ok(None); + }; + Ok(Some(details.block.extrinsics.into_iter().map(|b| b.0).collect())) + }) + .await + } + + async fn latest_finalized_block_ref(&self) -> Result>, BackendError> { + retry(|| async { + let hash = self.methods.chain_get_finalized_head().await?; + Ok(BlockRef::from_hash(hash)) + }) + .await + } + + async fn current_runtime_version(&self) -> Result { + retry(|| async { + let details = self.methods.state_get_runtime_version(None).await?; + Ok(RuntimeVersion { + spec_version: details.spec_version, + transaction_version: details.transaction_version, + }) + }) + .await + } + + async fn stream_runtime_version( + &self, + ) -> Result, BackendError> { + let methods = self.methods.clone(); + + let retry_sub = retry_stream(move || { + let methods = methods.clone(); + + Box::pin(async move { + let sub = methods.state_subscribe_runtime_version().await?; + let sub = sub.map_err(|e| e.into()).map(|r| { + r.map(|v| RuntimeVersion { + spec_version: v.spec_version, + transaction_version: v.transaction_version, + }) + }); + Ok(StreamOf(Box::pin(sub))) + }) + }) + .await?; + + // For runtime version subscriptions we omit the `DisconnectedWillReconnect` error + // because the once it resubscribes it will emit the latest runtime version. + // + // Thus, it's technically possible that a runtime version can be missed if + // two runtime upgrades happen in quick succession, but this is very unlikely. + let stream = retry_sub.filter(|r| { + let mut keep = true; + if let Err(e) = r { + if e.is_disconnected_will_reconnect() { + keep = false; + } + } + async move { keep } + }); + + Ok(StreamOf(Box::pin(stream))) + } + + async fn stream_all_block_headers( + &self, + hasher: T::Hasher, + ) -> Result>)>, BackendError> { + let methods = self.methods.clone(); + let retry_sub = retry_stream(move || { + let methods = methods.clone(); + Box::pin(async move { + let sub = methods.chain_subscribe_all_heads().await?; + let sub = sub.map_err(|e| e.into()).map(move |r| { + r.map(|h| { + let hash = h.hash_with(hasher); + (h, BlockRef::from_hash(hash)) + }) + }); + Ok(StreamOf(Box::pin(sub))) + }) + }) + .await?; + + Ok(retry_sub) + } + + async fn stream_best_block_headers( + &self, + hasher: T::Hasher, + ) -> Result>)>, BackendError> { + let methods = self.methods.clone(); + + let retry_sub = retry_stream(move || { + let methods = methods.clone(); + Box::pin(async move { + let sub = methods.chain_subscribe_new_heads().await?; + let sub = sub.map_err(|e| e.into()).map(move |r| { + r.map(|h| { + let hash = h.hash_with(hasher); + (h, BlockRef::from_hash(hash)) + }) + }); + Ok(StreamOf(Box::pin(sub))) + }) + }) + .await?; + + Ok(retry_sub) + } + + async fn stream_finalized_block_headers( + &self, + hasher: T::Hasher, + ) -> Result>)>, BackendError> { + let this = self.clone(); + + let retry_sub = retry_stream(move || { + let this = this.clone(); + Box::pin(async move { + let sub = this.methods.chain_subscribe_finalized_heads().await?; + + // Get the last finalized block immediately so that the stream will emit every + // finalized block after this. + let last_finalized_block_ref = this.latest_finalized_block_ref().await?; + let last_finalized_block_num = this + .block_header(last_finalized_block_ref.hash()) + .await? + .map(|h| h.number().into()); + + // Fill in any missing blocks, because the backend may not emit every finalized + // block; just the latest ones which are finalized each time. + let sub = subscribe_to_block_headers_filling_in_gaps( + this.methods.clone(), + sub, + last_finalized_block_num, + ); + let sub = sub.map(move |r| { + r.map(|h| { + let hash = h.hash_with(hasher); + (h, BlockRef::from_hash(hash)) + }) + }); + + Ok(StreamOf(Box::pin(sub))) + }) + }) + .await?; + + Ok(retry_sub) + } + + async fn submit_transaction( + &self, + extrinsic: &[u8], + ) -> Result>>, BackendError> { + let sub = self.methods.author_submit_and_watch_extrinsic(extrinsic).await?; + + let sub = sub.filter_map(|r| { + let mapped = r + .map_err(|e| e.into()) + .map(|tx| { + match tx { + // We ignore these because they don't map nicely to the new API. They don't + // signal "end states" so this should be fine. + RpcTransactionStatus::Future => None, + RpcTransactionStatus::Retracted(_) => None, + // These roughly map across: + RpcTransactionStatus::Ready => Some(TransactionStatus::Validated), + RpcTransactionStatus::Broadcast(_peers) => + Some(TransactionStatus::Broadcasted), + RpcTransactionStatus::InBlock(hash) => + Some(TransactionStatus::InBestBlock { hash: BlockRef::from_hash(hash) }), + // These 5 mean that the stream will very likely end: + RpcTransactionStatus::FinalityTimeout(_) => + Some(TransactionStatus::Dropped { message: "Finality timeout".into() }), + RpcTransactionStatus::Finalized(hash) => + Some(TransactionStatus::InFinalizedBlock { + hash: BlockRef::from_hash(hash), + }), + RpcTransactionStatus::Usurped(_) => Some(TransactionStatus::Invalid { + message: "Transaction was usurped by another with the same nonce" + .into(), + }), + RpcTransactionStatus::Dropped => Some(TransactionStatus::Dropped { + message: "Transaction was dropped".into(), + }), + RpcTransactionStatus::Invalid => Some(TransactionStatus::Invalid { + message: + "Transaction is invalid (eg because of a bad nonce, signature etc)" + .into(), + }), + } + }) + .transpose(); + + future::ready(mapped) + }); + + Ok(StreamOf::new(Box::pin(sub))) + } + + async fn call( + &self, + method: &str, + call_parameters: Option<&[u8]>, + at: HashFor, + ) -> Result, BackendError> { + retry(|| async { + let res = self.methods.state_call(method, call_parameters, Some(at)).await?; + Ok(res) + }) + .await + } +} + +/// Note: This is exposed for testing but is not considered stable and may change +/// without notice in a patch release. +#[doc(hidden)] +pub fn subscribe_to_block_headers_filling_in_gaps( + methods: LegacyRpcMethods, + sub: S, + mut last_block_num: Option, +) -> impl Stream> + Send +where + T: Config, + S: Stream> + Send, + E: Into + Send + 'static, +{ + sub.flat_map(move |s| { + // Get the header, or return a stream containing just the error. + let header = match s { + Ok(header) => header, + Err(e) => return Either::Left(stream::once(async { Err(e.into()) })), + }; + + // We want all previous details up to, but not including this current block num. + let end_block_num = header.number().into(); + + // This is one after the last block we returned details for last time. + let start_block_num = last_block_num.map(|n| n + 1).unwrap_or(end_block_num); + + // Iterate over all of the previous blocks we need headers for, ignoring the current block + // (which we already have the header info for): + let methods = methods.clone(); + let previous_headers = stream::iter(start_block_num..end_block_num) + .then(move |n| { + let methods = methods.clone(); + async move { + let hash = methods.chain_get_block_hash(Some(n.into())).await?; + let header = methods.chain_get_header(hash).await?; + Ok::<_, BackendError>(header) + } + }) + .filter_map(async |h| h.transpose()); + + // On the next iteration, we'll get details starting just after this end block. + last_block_num = Some(end_block_num); + + // Return a combination of any previous headers plus the new header. + Either::Right(previous_headers.chain(stream::once(async { Ok(header) }))) + }) +} + +/// This provides a stream of values given some prefix `key`. It +/// internally manages pagination and such. +#[allow(clippy::type_complexity)] +pub struct StorageFetchDescendantKeysStream { + methods: LegacyRpcMethods, + key: Vec, + at: HashFor, + // How many entries to ask for each time. + storage_page_size: u32, + // What key do we start paginating from? None = from the beginning. + pagination_start_key: Option>, + // Keys, future and cached: + keys_fut: + Option>, BackendError>> + Send + 'static>>>, + // Set to true when we're done: + done: bool, +} + +impl std::marker::Unpin for StorageFetchDescendantKeysStream {} + +impl Stream for StorageFetchDescendantKeysStream { + type Item = Result>, BackendError>; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.as_mut(); + loop { + // We're already done. + if this.done { + return Poll::Ready(None); + } + + // Poll future to fetch next keys. + if let Some(mut keys_fut) = this.keys_fut.take() { + let Poll::Ready(keys) = keys_fut.poll_unpin(cx) else { + this.keys_fut = Some(keys_fut); + return Poll::Pending; + }; + + match keys { + Ok(mut keys) => { + if this.pagination_start_key.is_some() && + keys.first() == this.pagination_start_key.as_ref() + { + // Currently, Smoldot returns the "start key" as the first key in the + // input (see https://github.com/smol-dot/smoldot/issues/1692), whereas Bizinikiwi doesn't. + // We don't expect the start key to be returned either (since it was the + // last key of prev iteration), so remove it if we see it. This + // `remove()` method isn't very efficient but this will be a non + // issue with the RPC V2 APIs or if Smoldot aligns with Bizinikiwi + // anyway. + keys.remove(0); + } + if keys.is_empty() { + // No keys left; we're done! + this.done = true; + return Poll::Ready(None); + } + // The last key is where we want to paginate from next time. + this.pagination_start_key = keys.last().cloned(); + // return all of the keys from this run. + return Poll::Ready(Some(Ok(keys))); + }, + Err(e) => { + if e.is_disconnected_will_reconnect() { + this.keys_fut = Some(keys_fut); + continue; + } + + // Error getting keys? Return it. + return Poll::Ready(Some(Err(e))); + }, + } + } + + // Else, we don't have a fut to get keys yet so start one going. + let methods = this.methods.clone(); + let key = this.key.clone(); + let at = this.at; + let storage_page_size = this.storage_page_size; + let pagination_start_key = this.pagination_start_key.clone(); + let keys_fut = async move { + let keys = methods + .state_get_keys_paged( + &key, + storage_page_size, + pagination_start_key.as_deref(), + Some(at), + ) + .await?; + Ok(keys) + }; + this.keys_fut = Some(Box::pin(keys_fut)); + } + } +} + +/// This provides a stream of values given some stream of keys. +#[allow(clippy::type_complexity)] +pub struct StorageFetchDescendantValuesStream { + // Stream of keys. + keys: StorageFetchDescendantKeysStream, + // Then we track the future to get the values back for each key: + results_fut: Option< + Pin< + Box< + dyn Future, Vec)>>, BackendError>> + + Send + + 'static, + >, + >, + >, + // And finally we return each result back one at a time: + results: VecDeque<(Vec, Vec)>, +} + +impl Stream for StorageFetchDescendantValuesStream { + type Item = Result; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.as_mut(); + loop { + // If we have results back, return them one by one + if let Some((key, value)) = this.results.pop_front() { + let res = StorageResponse { key, value }; + return Poll::Ready(Some(Ok(res))); + } + + // If we're waiting on the next results then poll that future: + if let Some(mut results_fut) = this.results_fut.take() { + match results_fut.poll_unpin(cx) { + Poll::Ready(Ok(Some(results))) => { + this.results = results; + continue; + }, + Poll::Ready(Ok(None)) => { + // No values back for some keys? Skip. + continue; + }, + Poll::Ready(Err(e)) => return Poll::Ready(Some(Err(e))), + Poll::Pending => { + this.results_fut = Some(results_fut); + return Poll::Pending; + }, + } + } + + match this.keys.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(keys))) => { + let methods = this.keys.methods.clone(); + let at = this.keys.at; + let results_fut = async move { + let keys = keys.iter().map(|k| &**k); + let values = retry(|| async { + let res = + methods.state_query_storage_at(keys.clone(), Some(at)).await?; + Ok(res) + }) + .await?; + let values: VecDeque<_> = values + .into_iter() + .flat_map(|v| { + v.changes.into_iter().filter_map(|(k, v)| { + let v = v?; + Some((k.0, v.0)) + }) + }) + .collect(); + Ok(Some(values)) + }; + + this.results_fut = Some(Box::pin(results_fut)); + continue; + }, + Poll::Ready(Some(Err(e))) => return Poll::Ready(Some(Err(e))), + Poll::Ready(None) => return Poll::Ready(None), + Poll::Pending => return Poll::Pending, + } + } + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/backend/mod.rs b/vendor/pezkuwi-subxt/subxt/src/backend/mod.rs new file mode 100644 index 00000000..39f542dd --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/backend/mod.rs @@ -0,0 +1,1024 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module exposes a backend trait for Subxt which allows us to get and set +//! the necessary information (probably from a JSON-RPC API, but that's up to the +//! implementation). + +pub mod chain_head; +pub mod legacy; +pub mod utils; + +use crate::{ + config::{Config, HashFor}, + error::BackendError, +}; +use async_trait::async_trait; +use codec::{Decode, Encode}; +use futures::{Stream, StreamExt}; +use pezkuwi_subxt_core::client::RuntimeVersion; +use pezkuwi_subxt_metadata::Metadata; +use std::{pin::Pin, sync::Arc}; + +/// Some re-exports from the [`pezkuwi_subxt_rpcs`] crate, also accessible in full via +/// [`crate::ext::pezkuwi_subxt_rpcs`]. +pub mod rpc { + pub use pezkuwi_subxt_rpcs::{ + RpcClient, RpcClientT, + client::{RawRpcFuture, RawRpcSubscription, RawValue, RpcParams}, + rpc_params, + }; + + crate::macros::cfg_reconnecting_rpc_client! { + /// An RPC client that automatically reconnects. + /// + /// # Example + /// + /// ```rust,no_run,standalone_crate + /// use std::time::Duration; + /// use futures::StreamExt; + /// use pezkuwi_subxt::backend::rpc::reconnecting_rpc_client::{RpcClient, ExponentialBackoff}; + /// use pezkuwi_subxt::{OnlineClient, PezkuwiConfig}; + /// + /// #[tokio::main] + /// async fn main() { + /// let rpc = RpcClient::builder() + /// .retry_policy(ExponentialBackoff::from_millis(100).max_delay(Duration::from_secs(10))) + /// .build("ws://localhost:9944".to_string()) + /// .await + /// .unwrap(); + /// + /// let subxt_client: OnlineClient = OnlineClient::from_rpc_client(rpc.clone()).await.unwrap(); + /// let mut blocks_sub = subxt_client.blocks().subscribe_finalized().await.unwrap(); + /// + /// while let Some(block) = blocks_sub.next().await { + /// let block = match block { + /// Ok(b) => b, + /// Err(e) => { + /// if e.is_disconnected_will_reconnect() { + /// println!("The RPC connection was lost and we may have missed a few blocks"); + /// continue; + /// } else { + /// panic!("Error: {}", e); + /// } + /// } + /// }; + /// println!("Block #{} ({})", block.number(), block.hash()); + /// } + /// } + /// ``` + pub use pezkuwi_subxt_rpcs::client::reconnecting_rpc_client; + } +} + +/// Prevent the backend trait being implemented externally. +#[doc(hidden)] +pub(crate) mod sealed { + pub trait Sealed {} +} + +/// This trait exposes the interface that Subxt will use to communicate with +/// a backend. Its goal is to be as minimal as possible. +#[async_trait] +pub trait Backend: sealed::Sealed + Send + Sync + 'static { + /// Fetch values from storage. + async fn storage_fetch_values( + &self, + keys: Vec>, + at: HashFor, + ) -> Result, BackendError>; + + /// Fetch keys underneath the given key from storage. + async fn storage_fetch_descendant_keys( + &self, + key: Vec, + at: HashFor, + ) -> Result>, BackendError>; + + /// Fetch values underneath the given key from storage. + async fn storage_fetch_descendant_values( + &self, + key: Vec, + at: HashFor, + ) -> Result, BackendError>; + + /// Fetch the genesis hash + async fn genesis_hash(&self) -> Result, BackendError>; + + /// Get a block header + async fn block_header(&self, at: HashFor) -> Result, BackendError>; + + /// Return the extrinsics found in the block. Each extrinsic is represented + /// by a vector of bytes which has _not_ been SCALE decoded (in other words, the + /// first bytes in the vector will decode to the compact encoded length of the extrinsic) + async fn block_body(&self, at: HashFor) -> Result>>, BackendError>; + + /// Get the most recent finalized block hash. + /// Note: needed only in blocks client for finalized block stream; can prolly be removed. + async fn latest_finalized_block_ref(&self) -> Result>, BackendError>; + + /// Get information about the current runtime. + async fn current_runtime_version(&self) -> Result; + + /// A stream of all new runtime versions as they occur. + async fn stream_runtime_version(&self) + -> Result, BackendError>; + + /// A stream of all new block headers as they arrive. + async fn stream_all_block_headers( + &self, + hasher: T::Hasher, + ) -> Result>)>, BackendError>; + + /// A stream of best block headers. + async fn stream_best_block_headers( + &self, + hasher: T::Hasher, + ) -> Result>)>, BackendError>; + + /// A stream of finalized block headers. + async fn stream_finalized_block_headers( + &self, + hasher: T::Hasher, + ) -> Result>)>, BackendError>; + + /// Submit a transaction. This will return a stream of events about it. + async fn submit_transaction( + &self, + bytes: &[u8], + ) -> Result>>, BackendError>; + + /// Make a call to some runtime API. + async fn call( + &self, + method: &str, + call_parameters: Option<&[u8]>, + at: HashFor, + ) -> Result, BackendError>; +} + +/// helpful utility methods derived from those provided on [`Backend`] +#[async_trait] +pub trait BackendExt: Backend { + /// Fetch a single value from storage. + async fn storage_fetch_value( + &self, + key: Vec, + at: HashFor, + ) -> Result>, BackendError> { + self.storage_fetch_values(vec![key], at) + .await? + .next() + .await + .transpose() + .map(|o| o.map(|s| s.value)) + } + + /// The same as a [`Backend::call()`], but it will also attempt to decode the + /// result into the given type, which is a fairly common operation. + async fn call_decoding( + &self, + method: &str, + call_parameters: Option<&[u8]>, + at: HashFor, + ) -> Result { + let bytes = self.call(method, call_parameters, at).await?; + let res = + D::decode(&mut &*bytes).map_err(BackendError::CouldNotScaleDecodeRuntimeResponse)?; + Ok(res) + } + + /// Return the metadata at some version. + async fn metadata_at_version( + &self, + version: u32, + at: HashFor, + ) -> Result { + let param = version.encode(); + + let opaque: Option = + self.call_decoding("Metadata_metadata_at_version", Some(¶m), at).await?; + let Some(opaque) = opaque else { + return Err(BackendError::MetadataVersionNotFound(version)); + }; + + let metadata: Metadata = + Decode::decode(&mut &opaque.0[..]).map_err(BackendError::CouldNotDecodeMetadata)?; + Ok(metadata) + } + + /// Return V14 metadata from the legacy `Metadata_metadata` call. + async fn legacy_metadata(&self, at: HashFor) -> Result { + let opaque: frame_metadata::OpaqueMetadata = + self.call_decoding("Metadata_metadata", None, at).await?; + let metadata: Metadata = + Decode::decode(&mut &opaque.0[..]).map_err(BackendError::CouldNotDecodeMetadata)?; + Ok(metadata) + } +} + +#[async_trait] +impl + ?Sized, T: Config> BackendExt for B {} + +/// An opaque struct which, while alive, indicates that some references to a block +/// still exist. This gives the backend the opportunity to keep the corresponding block +/// details around for a while if it likes and is able to. No guarantees can be made about +/// how long the corresponding details might be available for, but if no references to a block +/// exist, then the backend is free to discard any details for it. +#[derive(Clone)] +pub struct BlockRef { + hash: H, + // We keep this around so that when it is dropped, it has the + // opportunity to tell the backend. + _pointer: Option>, +} + +impl From for BlockRef { + fn from(value: H) -> Self { + BlockRef::from_hash(value) + } +} + +impl PartialEq for BlockRef { + fn eq(&self, other: &Self) -> bool { + self.hash == other.hash + } +} +impl Eq for BlockRef {} + +// Manual implementation to work around https://github.com/mcarton/rust-derivative/issues/115. +impl PartialOrd for BlockRef { + fn partial_cmp(&self, other: &Self) -> Option { + self.hash.partial_cmp(&other.hash) + } +} + +impl Ord for BlockRef { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.hash.cmp(&other.hash) + } +} + +impl std::fmt::Debug for BlockRef { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("BlockRef").field(&self.hash).finish() + } +} + +impl std::hash::Hash for BlockRef { + fn hash(&self, state: &mut Hasher) { + self.hash.hash(state); + } +} + +impl BlockRef { + /// A [`BlockRef`] that doesn't reference a given block, but does have an associated hash. + /// This is used in the legacy backend, which has no notion of pinning blocks. + pub fn from_hash(hash: H) -> Self { + Self { hash, _pointer: None } + } + /// Construct a [`BlockRef`] from an instance of the underlying trait. It's expected + /// that the [`Backend`] implementation will call this if it wants to track which blocks + /// are potentially in use. + pub fn new(hash: H, inner: P) -> Self { + Self { hash, _pointer: Some(Arc::new(inner)) } + } + + /// Return the hash of the referenced block. + pub fn hash(&self) -> H + where + H: Copy, + { + self.hash + } +} + +/// A trait that a [`Backend`] can implement to know when some block +/// can be unpinned: when this is dropped, there are no remaining references +/// to the block that it's associated with. +pub trait BlockRefT: Send + Sync + 'static {} + +/// A stream of some item. +pub struct StreamOf(Pin + Send + 'static>>); + +impl Stream for StreamOf { + type Item = T; + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.0.poll_next_unpin(cx) + } +} + +impl std::fmt::Debug for StreamOf { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("StreamOf").field(&"").finish() + } +} + +impl StreamOf { + /// Construct a new stream. + pub fn new(inner: Pin + Send + 'static>>) -> Self { + StreamOf(inner) + } + + /// Returns the next item in the stream. This is just a wrapper around + /// [`StreamExt::next()`] so that you can avoid the extra import. + pub async fn next(&mut self) -> Option { + StreamExt::next(self).await + } +} + +/// A stream of [`Result`]. +pub type StreamOfResults = StreamOf>; + +/// The status of the transaction. +/// +/// If the status is [`TransactionStatus::InFinalizedBlock`], [`TransactionStatus::Error`], +/// [`TransactionStatus::Invalid`] or [`TransactionStatus::Dropped`], then no future +/// events will be emitted. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum TransactionStatus { + /// Transaction is part of the future queue. + Validated, + /// The transaction has been broadcast to other nodes. + Broadcasted, + /// Transaction is no longer in a best block. + NoLongerInBestBlock, + /// Transaction has been included in block with given hash. + InBestBlock { + /// Block hash the transaction is in. + hash: BlockRef, + }, + /// Transaction has been finalized by a finality-gadget, e.g GRANDPA + InFinalizedBlock { + /// Block hash the transaction is in. + hash: BlockRef, + }, + /// Something went wrong in the node. + Error { + /// Human readable message; what went wrong. + message: String, + }, + /// Transaction is invalid (bad nonce, signature etc). + Invalid { + /// Human readable message; why was it invalid. + message: String, + }, + /// The transaction was dropped. + Dropped { + /// Human readable message; why was it dropped. + message: String, + }, +} + +/// A response from calls like [`Backend::storage_fetch_values`] or +/// [`Backend::storage_fetch_descendant_values`]. +#[derive(serde::Serialize, serde::Deserialize, Clone, PartialEq, Debug)] +pub struct StorageResponse { + /// The key. + pub key: Vec, + /// The associated value. + pub value: Vec, +} + +#[cfg(test)] +mod test { + use super::*; + use crate::backend::StorageResponse; + use core::convert::Infallible; + use futures::StreamExt; + use pezkuwi_subxt_core::{Config, config::DefaultExtrinsicParams}; + use pezkuwi_subxt_rpcs::client::{ + MockRpcClient, + mock_rpc_client::{Json, MockRpcClientBuilder}, + }; + use primitive_types::H256; + use rpc::RpcClientT; + use std::collections::{HashMap, VecDeque}; + + fn random_hash() -> H256 { + H256::random() + } + + fn disconnected_will_reconnect() -> pezkuwi_subxt_rpcs::Error { + pezkuwi_subxt_rpcs::Error::DisconnectedWillReconnect("..".into()) + } + + fn storage_response>, V: Into>>(key: K, value: V) -> StorageResponse + where + Vec: From, + { + StorageResponse { key: key.into(), value: value.into() } + } + + // Define dummy config + enum Conf {} + impl Config for Conf { + type AccountId = crate::utils::AccountId32; + type Address = crate::utils::MultiAddress; + type Signature = crate::utils::MultiSignature; + type Hasher = crate::config::bizinikiwi::BlakeTwo256; + type Header = crate::config::bizinikiwi::BizinikiwiHeader; + type ExtrinsicParams = DefaultExtrinsicParams; + type AssetId = u32; + } + + mod legacy { + use super::*; + use crate::{ + backend::legacy::{LegacyBackend, rpc_methods::RuntimeVersion}, + error::RpcError, + }; + + use crate::backend::Backend; + + fn client_runtime_version(num: u32) -> crate::client::RuntimeVersion { + crate::client::RuntimeVersion { spec_version: num, transaction_version: num } + } + + fn runtime_version(num: u32) -> RuntimeVersion { + RuntimeVersion { spec_version: num, transaction_version: num, other: HashMap::new() } + } + + #[tokio::test] + async fn storage_fetch_values() { + // Map from storage key to responses, given out in order, when that key is requested. + let mut values: HashMap<&str, VecDeque<_>> = HashMap::from_iter([ + ( + "ID1", + VecDeque::from_iter([ + Err(disconnected_will_reconnect()), + Ok(Json(hex::encode("Data1"))), + ]), + ), + ( + "ID2", + VecDeque::from_iter([ + Err(disconnected_will_reconnect()), + Ok(Json(hex::encode("Data2"))), + ]), + ), + ("ID3", VecDeque::from_iter([Ok(Json(hex::encode("Data3")))])), + ]); + + let rpc_client = MockRpcClient::builder() + .method_handler("state_getStorage", move |params| { + // Decode the storage key as first item from sequence of params: + let params = params.map(|p| p.get().to_string()); + let rpc_params = jsonrpsee::types::Params::new(params.as_deref()); + let key: sp_core::Bytes = rpc_params.sequence().next().unwrap(); + let key = std::str::from_utf8(&key.0).unwrap(); + // Fetch the response to use from our map, popping it from the front. + let values = values.get_mut(key).unwrap(); + let value = values.pop_front().unwrap(); + async move { value } + }) + .build(); + + // Test + let backend: LegacyBackend = LegacyBackend::builder().build(rpc_client); + + let response = backend + .storage_fetch_values( + ["ID1".into(), "ID2".into(), "ID3".into()].into(), + random_hash(), + ) + .await + .unwrap(); + + let response = response.map(|x| x.unwrap()).collect::>().await; + + let expected = vec![ + storage_response("ID1", "Data1"), + storage_response("ID2", "Data2"), + storage_response("ID3", "Data3"), + ]; + + assert_eq!(expected, response) + } + + #[tokio::test] + async fn storage_fetch_value() { + let rpc_client = MockRpcClient::builder() + .method_handler_once("state_getStorage", async move |_params| { + // Return "disconnected" error on first call + Err::(disconnected_will_reconnect()) + }) + .method_handler_once("state_getStorage", async move |_param| { + // Return some hex encoded storage value on the next one + Json(hex::encode("Data1")) + }) + .build(); + + // Test + let backend: LegacyBackend = LegacyBackend::builder().build(rpc_client); + let response = backend.storage_fetch_value("ID1".into(), random_hash()).await.unwrap(); + + let response = response.unwrap(); + assert_eq!("Data1".to_owned(), String::from_utf8(response).unwrap()) + } + + /// This test should cover the logic of the following methods: + /// - `genesis_hash` + /// - `block_header` + /// - `block_body` + /// - `latest_finalized_block` + /// - `current_runtime_version` + /// - `current_runtime_version` + /// - `call` + /// The test covers them because they follow the simple pattern of: + /// ```rust,no_run,standalone_crate + /// async fn THE_THING(&self) -> Result, BackendError> { + /// retry(|| ).await + /// } + /// ``` + #[tokio::test] + async fn simple_fetch() { + let hash = random_hash(); + let rpc_client = MockRpcClient::builder() + .method_handler_once("chain_getBlockHash", async move |_params| { + // Return "disconnected" error on first call + Err::(disconnected_will_reconnect()) + }) + .method_handler_once("chain_getBlockHash", async move |_params| { + // Return the blockhash on next call + Json(hash) + }) + .build(); + + // Test + let backend: LegacyBackend = LegacyBackend::builder().build(rpc_client); + let response = backend.genesis_hash().await.unwrap(); + + assert_eq!(hash, response) + } + + /// This test should cover the logic of the following methods: + /// - `stream_runtime_version` + /// - `stream_all_block_headers` + /// - `stream_best_block_headers` + /// The test covers them because they follow the simple pattern of: + /// ```rust,no_run,standalone_crate + /// async fn stream_the_thing( + /// &self, + /// ) -> Result>)>, BackendError> { + /// let methods = self.methods.clone(); + /// let retry_sub = retry_stream(move || { + /// let methods = methods.clone(); + /// Box::pin(async move { + /// methods.do_the_thing().await? + /// }); + /// Ok(StreamOf(Box::pin(sub))) + /// }) + /// }) + /// .await?; + /// Ok(retry_sub) + /// } + /// ``` + #[tokio::test] + async fn stream_simple() { + // Each time the subscription is called, it will pop the first set + // of values from this and return them one after the other. + let mut data = VecDeque::from_iter([ + vec![ + Ok(Json(runtime_version(0))), + Err(disconnected_will_reconnect()), + Ok(Json(runtime_version(1))), + ], + vec![ + Err(disconnected_will_reconnect()), + Ok(Json(runtime_version(2))), + Ok(Json(runtime_version(3))), + ], + vec![ + Ok(Json(runtime_version(4))), + Ok(Json(runtime_version(5))), + Err(pezkuwi_subxt_rpcs::Error::Client("..".into())), + ], + ]); + + let rpc_client = MockRpcClient::builder() + .subscription_handler("state_subscribeRuntimeVersion", move |_params, _unsub| { + let res = data.pop_front().unwrap(); + async move { res } + }) + .build(); + + // Test + let backend: LegacyBackend = LegacyBackend::builder().build(rpc_client); + let mut results = backend.stream_runtime_version().await.unwrap(); + + assert_eq!(results.next().await.unwrap().unwrap(), client_runtime_version(0)); + assert_eq!(results.next().await.unwrap().unwrap(), client_runtime_version(4)); + assert_eq!(results.next().await.unwrap().unwrap(), client_runtime_version(5)); + assert!(matches!( + results.next().await.unwrap(), + Err(BackendError::Rpc(RpcError::ClientError(pezkuwi_subxt_rpcs::Error::Client(_)))) + )); + assert!(results.next().await.is_none()); + } + } + + mod unstable_backend { + use pezkuwi_subxt_rpcs::methods::chain_head::{ + self, Bytes, Initialized, MethodResponse, MethodResponseStarted, OperationError, + OperationId, OperationStorageItems, RuntimeSpec, RuntimeVersionEvent, + }; + use tokio::select; + + use super::{chain_head::*, *}; + + fn build_backend( + rpc_client: impl RpcClientT, + ) -> (ChainHeadBackend, ChainHeadBackendDriver) { + let (backend, driver): (ChainHeadBackend, _) = + ChainHeadBackend::builder().build(rpc_client); + (backend, driver) + } + + fn build_backend_spawn_background(rpc_client: impl RpcClientT) -> ChainHeadBackend { + ChainHeadBackend::builder().build_with_background_driver(rpc_client) + } + + fn runtime_spec() -> RuntimeSpec { + let spec = serde_json::json!({ + "specName": "zagros", + "implName": "parity-zagros", + "specVersion": 9122, + "implVersion": 0, + "transactionVersion": 7, + "apis": { + "0xdf6acb689907609b": 3, + "0x37e397fc7c91f5e4": 1, + "0x40fe3ad401f8959a": 5, + "0xd2bc9897eed08f15": 3, + "0xf78b278be53f454c": 2, + "0xaf2c0297a23e6d3d": 1, + "0x49eaaf1b548a0cb0": 1, + "0x91d5df18b0d2cf58": 1, + "0xed99c5acb25eedf5": 3, + "0xcbca25e39f142387": 2, + "0x687ad44ad37f03c2": 1, + "0xab3c0572291feb8b": 1, + "0xbc9d89904f5b923f": 1, + "0x37c8bb1350a9a2a8": 1 + } + }); + serde_json::from_value(spec).expect("Mock runtime spec should be the right shape") + } + + type FollowEvent = chain_head::FollowEvent>; + + /// Build a mock client which can handle `chainHead_v1_follow` subscriptions. + /// Messages from the provided receiver are sent to the latest active subscription. + fn mock_client_builder( + recv: tokio::sync::mpsc::UnboundedReceiver, + ) -> MockRpcClientBuilder { + mock_client_builder_with_ids(recv, 0..) + } + + fn mock_client_builder_with_ids( + recv: tokio::sync::mpsc::UnboundedReceiver, + ids: I, + ) -> MockRpcClientBuilder + where + I: IntoIterator + Send, + I::IntoIter: Send + Sync + 'static, + { + use pezkuwi_subxt_rpcs::{Error, UserError, client::mock_rpc_client::AndThen}; + + let recv = Arc::new(tokio::sync::Mutex::new(recv)); + let mut ids = ids.into_iter(); + + MockRpcClient::builder().subscription_handler( + "chainHead_v1_follow", + move |_params, _unsub| { + let recv = recv.clone(); + let id = ids.next(); + + // For each new follow subscription, we take messages from `recv` and pipe them + // to the output for the subscription (after an Initialized event). if the + // output is dropped/closed, we stop pulling messages from `recv`, waiting + // for a new chainHEad_v1_follow subscription. + let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); + tokio::spawn(async move { + let mut recv_guard = recv.lock().await; + loop { + select! { + // Channel closed, so stop pulling from `recv`. + _ = tx.closed() => { + break + }, + // Relay messages from `recv` unless some error sending. + Some(msg) = recv_guard.recv() => { + if tx.send(Json(msg)).is_err() { + break + } + } + } + } + }); + + async move { + if let Some(id) = id { + let follow_event = + FollowEvent::Initialized(Initialized::> { + finalized_block_hashes: vec![random_hash()], + finalized_block_runtime: Some(chain_head::RuntimeEvent::Valid( + RuntimeVersionEvent { spec: runtime_spec() }, + )), + }); + + let res = AndThen( + // First send an initialized event with new ID + (vec![Json(follow_event)], subscription_id(id)), + // Next, send any events provided via the recv channel + rx, + ); + + Ok(res) + } else { + // Ran out of subscription IDs; return an error. + Err(Error::User(UserError::method_not_found())) + } + } + }, + ) + } + + fn subscription_id(id: usize) -> String { + format!("chainHeadFollowSubscriptionId{id}") + } + + fn response_started(id: &str) -> MethodResponse { + MethodResponse::Started(MethodResponseStarted { + operation_id: id.to_owned(), + discarded_items: None, + }) + } + + fn operation_error(id: &str) -> FollowEvent { + FollowEvent::OperationError(OperationError { + operation_id: id.to_owned(), + error: "error".to_owned(), + }) + } + + fn limit_reached() -> MethodResponse { + MethodResponse::LimitReached + } + + fn storage_done(id: &str) -> FollowEvent { + FollowEvent::OperationStorageDone(OperationId { operation_id: id.to_owned() }) + } + fn storage_result(key: &str, value: &str) -> chain_head::StorageResult { + chain_head::StorageResult { + key: Bytes(key.to_owned().into()), + result: chain_head::StorageResultType::Value(Bytes(value.to_owned().into())), + } + } + fn storage_items(id: &str, items: &[chain_head::StorageResult]) -> FollowEvent { + FollowEvent::OperationStorageItems(OperationStorageItems { + operation_id: id.to_owned(), + items: VecDeque::from(items.to_owned()), + }) + } + + fn operation_continue(id: &str) -> FollowEvent { + FollowEvent::OperationWaitingForContinue(OperationId { operation_id: id.to_owned() }) + } + + fn follow_event_stop() -> FollowEvent { + FollowEvent::Stop + } + + #[tokio::test] + async fn storage_fetch_values_returns_stream_with_single_error() { + let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); + + let rpc_client = mock_client_builder(rx) + .method_handler_once("chainHead_v1_storage", move |_params| { + tokio::spawn(async move { + // Wait a little and then send an error response on the + // chainHead_follow subscription: + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + tx.send(operation_error("Id1")).unwrap(); + }); + + async move { Json(response_started("Id1")) } + }) + .build(); + + let backend = build_backend_spawn_background(rpc_client); + + // Test + // This request should encounter an error. + let mut response = backend + .storage_fetch_values( + ["ID1".into(), "ID2".into(), "ID3".into()].into(), + random_hash(), + ) + .await + .unwrap(); + + assert!( + response + .next() + .await + .unwrap() + .is_err_and(|e| matches!(e, BackendError::Other(e) if e == "error")) + ); + assert!(response.next().await.is_none()); + } + + /// Tests that the method will retry on failed query + #[tokio::test] + async fn storage_fetch_values_retry_query() { + let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); + + let rpc_client = mock_client_builder(rx) + .method_handler_once("chainHead_v1_storage", async move |_params| { + // First call; return DisconnectedWillReconnect + Err::(disconnected_will_reconnect()) + }) + .method_handler_once("chainHead_v1_storage", async move |_params| { + // Otherwise, return that we'll start sending a response, and spawn + // task to send the relevant response via chainHead_follow. + tokio::spawn(async move { + tx.send(storage_items( + "Id1", + &[ + storage_result("ID1", "Data1"), + storage_result("ID2", "Data2"), + storage_result("ID3", "Data3"), + ], + )) + .unwrap(); + + tx.send(storage_done("Id1")).unwrap(); + }); + + Ok(Json(response_started("Id1"))) + }) + .build(); + + // Despite DisconnectedWillReconnect we try again transparently + // and get the data we asked for. + let backend = build_backend_spawn_background(rpc_client); + let response = backend + .storage_fetch_values( + ["ID1".into(), "ID2".into(), "ID3".into()].into(), + random_hash(), + ) + .await + .unwrap(); + + let response = response.map(|x| x.unwrap()).collect::>().await; + + assert_eq!( + vec![ + storage_response("ID1", "Data1"), + storage_response("ID2", "Data2"), + storage_response("ID3", "Data3"), + ], + response + ) + } + + #[tokio::test] + async fn storage_fetch_values_retry_chainhead_continue() { + let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); + let tx2 = tx.clone(); + + let rpc_client = mock_client_builder(rx) + .method_handler_once("chainHead_v1_storage", async move |_params| { + // First call; return DisconnectedWillReconnect + Err::(disconnected_will_reconnect()) + }) + .method_handler_once("chainHead_v1_storage", async move |_params| { + // Next call, return a storage item and then a "waiting for continue". + tokio::spawn(async move { + tx.send(storage_items("Id1", &[storage_result("ID1", "Data1")])).unwrap(); + tx.send(operation_continue("Id1")).unwrap(); + }); + Ok(Json(response_started("Id1"))) + }) + .method_handler_once("chainHead_v1_continue", async move |_params| { + // First call; return DisconnectedWillReconnect + Err::(disconnected_will_reconnect()) + }) + .method_handler_once("chainHead_v1_continue", async move |_params| { + // Next call; acknowledge the "continue" and return remaining storage items. + tokio::spawn(async move { + tx2.send(storage_items("Id1", &[storage_result("ID2", "Data2")])).unwrap(); + tx2.send(storage_items("Id1", &[storage_result("ID3", "Data3")])).unwrap(); + tx2.send(storage_done("Id1")).unwrap(); + }); + Ok(Json(())) + }) + .build(); + + let backend = build_backend_spawn_background(rpc_client); + + // We should success, transparently handling `continue`s and `DisconnectWillReconnects`. + let response = backend + .storage_fetch_values( + ["ID1".into(), "ID2".into(), "ID3".into()].into(), + random_hash(), + ) + .await + .unwrap(); + + let response = response.map(|x| x.unwrap()).collect::>().await; + + assert_eq!( + vec![ + storage_response("ID1", "Data1"), + storage_response("ID2", "Data2"), + storage_response("ID3", "Data3"), + ], + response + ) + } + + #[tokio::test] + async fn simple_fetch() { + let hash = random_hash(); + let (_tx, rx) = tokio::sync::mpsc::unbounded_channel(); + let rpc_client = mock_client_builder(rx) + .method_handler_once("chainSpec_v1_genesisHash", async move |_params| { + // First call, return disconnected error. + Err::(disconnected_will_reconnect()) + }) + .method_handler_once("chainSpec_v1_genesisHash", async move |_params| { + // Next call, return the hash. + Ok(Json(hash)) + }) + .build(); + + // Test + // This request should encounter an error on `request` and do a retry. + let backend = build_backend_spawn_background(rpc_client); + let response_hash = backend.genesis_hash().await.unwrap(); + + assert_eq!(hash, response_hash) + } + + // Check that the backend will resubscribe on Stop, and handle a change in subscription ID. + // see https://github.com/pezkuwichain/subxt/issues/1567 + #[tokio::test] + async fn stale_subscription_id_failure() { + let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); + let rpc_client = mock_client_builder_with_ids(rx, [1, 2]) + .method_handler("chainHead_v1_storage", move |params| { + // Decode the follow subscription ID which is the first param. + let this_sub_id = { + let params = params.as_ref().map(|p| p.get()); + let rpc_params = jsonrpsee::types::Params::new(params); + rpc_params.sequence().next::().unwrap() + }; + + // While it's equal to `subscription_id(1)`, it means we are seeing the first + // chainHead_follow subscription ID. error until we see an updated ID. + let is_wrong_sub_id = this_sub_id == subscription_id(1); + + async move { + if is_wrong_sub_id { + Json(limit_reached()) + } else { + Json(response_started("some_id")) + } + } + }) + .build(); + + let (backend, mut driver): (ChainHeadBackend, _) = build_backend(rpc_client); + + // Send a "FollowEvent::Stop" via chainhead_follow, and advance the driver just enough + // that this message has been processed. + tx.send(follow_event_stop()).unwrap(); + let _ = driver.next().await.unwrap(); + + // If we make a storage call at this point, we'll still be passing the "old" + // subscription ID, because the driver hasn't advanced enough to start a new + // chainhead_follow subscription, and will therefore fail with a "limit reached" + // response (to emulate what would happen if the chainHead_v1_storage call was made + // with the wrong subscription ID). + let response = backend.storage_fetch_values(["ID1".into()].into(), random_hash()).await; + assert!(matches!(response, Err(e) if e.is_rpc_limit_reached())); + + // Advance the driver until a new chainHead_follow subscription has been started up. + let _ = driver.next().await.unwrap(); + let _ = driver.next().await.unwrap(); + let _ = driver.next().await.unwrap(); + + // Now, the ChainHeadBackend will use a new subscription ID and work. (If the driver + // advanced in the background automatically, this would happen automatically for us). + let response = backend.storage_fetch_values(["ID1".into()].into(), random_hash()).await; + assert!(response.is_ok()); + } + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/backend/utils.rs b/vendor/pezkuwi-subxt/subxt/src/backend/utils.rs new file mode 100644 index 00000000..186ae163 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/backend/utils.rs @@ -0,0 +1,274 @@ +//! RPC utils. + +use super::{StreamOf, StreamOfResults}; +use crate::error::BackendError; +use futures::{FutureExt, Stream, StreamExt, future::BoxFuture}; +use std::{future::Future, pin::Pin, task::Poll}; + +/// Resubscribe callback. +type ResubscribeGetter = Box ResubscribeFuture + Send>; + +/// Future that resolves to a subscription stream. +type ResubscribeFuture = + Pin, BackendError>> + Send>>; + +pub(crate) enum PendingOrStream { + Pending(BoxFuture<'static, Result, BackendError>>), + Stream(StreamOfResults), +} + +impl std::fmt::Debug for PendingOrStream { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + PendingOrStream::Pending(_) => write!(f, "Pending"), + PendingOrStream::Stream(_) => write!(f, "Stream"), + } + } +} + +/// Retry subscription. +struct RetrySubscription { + resubscribe: ResubscribeGetter, + state: Option>, +} + +impl std::marker::Unpin for RetrySubscription {} + +impl Stream for RetrySubscription { + type Item = Result; + + fn poll_next( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> Poll> { + loop { + let Some(mut this) = self.state.take() else { + return Poll::Ready(None); + }; + + match this { + PendingOrStream::Stream(ref mut s) => match s.poll_next_unpin(cx) { + Poll::Ready(Some(Err(err))) => { + if err.is_disconnected_will_reconnect() { + self.state = Some(PendingOrStream::Pending((self.resubscribe)())); + } + return Poll::Ready(Some(Err(err))); + }, + Poll::Ready(None) => return Poll::Ready(None), + Poll::Ready(Some(Ok(val))) => { + self.state = Some(this); + return Poll::Ready(Some(Ok(val))); + }, + Poll::Pending => { + self.state = Some(this); + return Poll::Pending; + }, + }, + PendingOrStream::Pending(mut fut) => match fut.poll_unpin(cx) { + Poll::Ready(Ok(stream)) => { + self.state = Some(PendingOrStream::Stream(stream)); + continue; + }, + Poll::Ready(Err(err)) => { + if err.is_disconnected_will_reconnect() { + self.state = Some(PendingOrStream::Pending((self.resubscribe)())); + } + return Poll::Ready(Some(Err(err))); + }, + Poll::Pending => { + self.state = Some(PendingOrStream::Pending(fut)); + return Poll::Pending; + }, + }, + }; + } + } +} + +/// Retry a future until it doesn't return a disconnected error. +/// +/// # Example +/// +/// ```rust,no_run,standalone_crate +/// use pezkuwi_subxt::backend::utils::retry; +/// +/// async fn some_future() -> Result<(), pezkuwi_subxt::error::BackendError> { +/// Ok(()) +/// } +/// +/// #[tokio::main] +/// async fn main() { +/// let result = retry(|| some_future()).await; +/// } +/// ``` +pub async fn retry(mut retry_future: F) -> Result +where + F: FnMut() -> T, + T: Future>, +{ + const REJECTED_MAX_RETRIES: usize = 10; + let mut rejected_retries = 0; + + loop { + match retry_future().await { + Ok(v) => return Ok(v), + Err(e) => { + if e.is_disconnected_will_reconnect() { + continue; + } + + // TODO: https://github.com/pezkuwichain/subxt/issues/1567 + // This is a hack because, in the event of a disconnection, + // we may not get the correct subscription ID back on reconnecting. + // + // This is because we have a race between this future and the + // separate chainHead subscription, which runs in a different task. + // if this future is too quick, it'll be given back an old + // subscription ID from the chainHead subscription which has yet + // to reconnect and establish a new subscription ID. + // + // In the event of a wrong subscription Id being used, we happen to + // hand back an `RpcError::LimitReached`, and so can retry when we + // specifically hit that error to see if we get a new subscription ID + // eventually. + if e.is_rpc_limit_reached() && rejected_retries < REJECTED_MAX_RETRIES { + rejected_retries += 1; + continue; + } + + return Err(e); + }, + } + } +} + +/// Create a retry stream that will resubscribe on disconnect. +/// +/// It's important to note that this function is intended to work only for stateless subscriptions. +/// If the subscription takes input or modifies state, this function should not be used. +/// +/// # Example +/// +/// ```rust,no_run,standalone_crate +/// use pezkuwi_subxt::backend::{utils::retry_stream, StreamOf}; +/// use futures::future::FutureExt; +/// +/// #[tokio::main] +/// async fn main() { +/// retry_stream(|| { +/// // This needs to return a stream of results but if you are using +/// // the subxt backend already it will return StreamOf so you can just +/// // return it directly in the async block below. +/// async move { Ok(StreamOf::new(Box::pin(futures::stream::iter([Ok(2)])))) }.boxed() +/// }).await; +/// } +/// ``` +pub async fn retry_stream(sub_stream: F) -> Result, BackendError> +where + F: FnMut() -> ResubscribeFuture + Send + 'static + Clone, + R: Send + 'static, +{ + let stream = retry(sub_stream.clone()).await?; + + let resubscribe = Box::new(move || { + let sub_stream = sub_stream.clone(); + async move { retry(sub_stream).await }.boxed() + }); + + // The extra Box is to encapsulate the retry subscription type + Ok(StreamOf::new(Box::pin(RetrySubscription { + state: Some(PendingOrStream::Stream(stream)), + resubscribe, + }))) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::backend::StreamOf; + + fn disconnect_err() -> BackendError { + BackendError::Rpc( + pezkuwi_subxt_rpcs::Error::DisconnectedWillReconnect(String::new()).into(), + ) + } + + fn custom_err() -> BackendError { + BackendError::Other(String::new()) + } + + #[tokio::test] + async fn retry_stream_works() { + let retry_stream = retry_stream(|| { + async { + Ok(StreamOf::new(Box::pin(futures::stream::iter([ + Ok(1), + Ok(2), + Ok(3), + Err(disconnect_err()), + ])))) + } + .boxed() + }) + .await + .unwrap(); + + let result = retry_stream.take(5).collect::>>().await; + + assert!(matches!(result[0], Ok(r) if r == 1)); + assert!(matches!(result[1], Ok(r) if r == 2)); + assert!(matches!(result[2], Ok(r) if r == 3)); + assert!(matches!(result[3], Err(ref e) if e.is_disconnected_will_reconnect())); + assert!(matches!(result[4], Ok(r) if r == 1)); + } + + #[tokio::test] + async fn retry_sub_works() { + let stream = futures::stream::iter([Ok(1), Err(disconnect_err())]); + + let resubscribe = Box::new(move || { + async move { Ok(StreamOf::new(Box::pin(futures::stream::iter([Ok(2)])))) }.boxed() + }); + + let retry_stream = RetrySubscription { + state: Some(PendingOrStream::Stream(StreamOf::new(Box::pin(stream)))), + resubscribe, + }; + + let result: Vec<_> = retry_stream.collect().await; + + assert!(matches!(result[0], Ok(r) if r == 1)); + assert!(matches!(result[1], Err(ref e) if e.is_disconnected_will_reconnect())); + assert!(matches!(result[2], Ok(r) if r == 2)); + } + + #[tokio::test] + async fn retry_sub_err_terminates_stream() { + let stream = futures::stream::iter([Ok(1)]); + let resubscribe = Box::new(|| async move { Err(custom_err()) }.boxed()); + + let retry_stream = RetrySubscription { + state: Some(PendingOrStream::Stream(StreamOf::new(Box::pin(stream)))), + resubscribe, + }; + + assert_eq!(retry_stream.count().await, 1); + } + + #[tokio::test] + async fn retry_sub_resubscribe_err() { + let stream = futures::stream::iter([Ok(1), Err(disconnect_err())]); + let resubscribe = Box::new(|| async move { Err(custom_err()) }.boxed()); + + let retry_stream = RetrySubscription { + state: Some(PendingOrStream::Stream(StreamOf::new(Box::pin(stream)))), + resubscribe, + }; + + let result: Vec<_> = retry_stream.collect().await; + + assert!(matches!(result[0], Ok(r) if r == 1)); + assert!(matches!(result[1], Err(ref e) if e.is_disconnected_will_reconnect())); + assert!(matches!(result[2], Err(ref e) if matches!(e, BackendError::Other(_)))); + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/blocks/block_types.rs b/vendor/pezkuwi-subxt/subxt/src/blocks/block_types.rs new file mode 100644 index 00000000..e3a36025 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/blocks/block_types.rs @@ -0,0 +1,182 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::{ + backend::BlockRef, + blocks::Extrinsics, + client::{OfflineClientT, OnlineClientT}, + config::{Config, HashFor, Header}, + error::{AccountNonceError, BlockError, EventsError, ExtrinsicError}, + events, + runtime_api::RuntimeApi, + storage::StorageClientAt, +}; + +use codec::{Decode, Encode}; +use futures::lock::Mutex as AsyncMutex; +use std::sync::Arc; + +/// A representation of a block. +pub struct Block { + header: T::Header, + block_ref: BlockRef>, + client: C, + // Since we obtain the same events for every extrinsic, let's + // cache them so that we only ever do that once: + cached_events: CachedEvents, +} + +impl Clone for Block { + fn clone(&self) -> Self { + Self { + header: self.header.clone(), + block_ref: self.block_ref.clone(), + client: self.client.clone(), + cached_events: self.cached_events.clone(), + } + } +} + +// A cache for our events so we don't fetch them more than once when +// iterating over events for extrinsics. +pub(crate) type CachedEvents = Arc>>>; + +impl Block +where + T: Config, + C: OfflineClientT, +{ + pub(crate) fn new(header: T::Header, block_ref: BlockRef>, client: C) -> Self { + Block { header, block_ref, client, cached_events: Default::default() } + } + + /// Return a reference to the given block. While this reference is kept alive, + /// the backend will (if possible) endeavour to keep hold of the block. + pub fn reference(&self) -> BlockRef> { + self.block_ref.clone() + } + + /// Return the block hash. + pub fn hash(&self) -> HashFor { + self.block_ref.hash() + } + + /// Return the block number. + pub fn number(&self) -> ::Number { + self.header().number() + } + + /// Return the entire block header. + pub fn header(&self) -> &T::Header { + &self.header + } +} + +impl Block +where + T: Config, + C: OnlineClientT, +{ + /// Return the events associated with the block, fetching them from the node if necessary. + pub async fn events(&self) -> Result, EventsError> { + get_events(&self.client, self.hash(), &self.cached_events).await + } + + /// Fetch and return the extrinsics in the block body. + pub async fn extrinsics(&self) -> Result, ExtrinsicError> { + let block_hash = self.hash(); + + let extrinsics = self + .client + .backend() + .block_body(block_hash) + .await + .map_err(ExtrinsicError::CannotGetBlockBody)? + .ok_or_else(|| ExtrinsicError::BlockNotFound(block_hash.into()))?; + + let extrinsics = Extrinsics::new( + self.client.clone(), + extrinsics, + self.cached_events.clone(), + block_hash, + )?; + + Ok(extrinsics) + } + + /// Work with storage. + pub fn storage(&self) -> StorageClientAt { + StorageClientAt::new(self.client.clone(), self.block_ref.clone()) + } + + /// Execute a runtime API call at this block. + pub async fn runtime_api(&self) -> RuntimeApi { + RuntimeApi::new(self.client.clone(), self.block_ref.clone()) + } + + /// Get the account nonce for a given account ID at this block. + pub async fn account_nonce(&self, account_id: &T::AccountId) -> Result { + get_account_nonce(&self.client, account_id, self.hash()).await.map_err(|e| { + BlockError::AccountNonceError { + block_hash: self.hash().into(), + account_id: account_id.encode().into(), + reason: e, + } + }) + } +} + +// Return Events from the cache, or fetch from the node if needed. +pub(crate) async fn get_events( + client: &C, + block_hash: HashFor, + cached_events: &AsyncMutex>>, +) -> Result, EventsError> +where + T: Config, + C: OnlineClientT, +{ + // Acquire lock on the events cache. We either get back our events or we fetch and set them + // before unlocking, so only one fetch call should ever be made. We do this because the + // same events can be shared across all extrinsics in the block. + let mut lock = cached_events.lock().await; + let events = match &*lock { + Some(events) => events.clone(), + None => { + let events = events::EventsClient::new(client.clone()).at(block_hash).await?; + lock.replace(events.clone()); + events + }, + }; + + Ok(events) +} + +// Return the account nonce at some block hash for an account ID. +pub(crate) async fn get_account_nonce( + client: &C, + account_id: &T::AccountId, + block_hash: HashFor, +) -> Result +where + C: OnlineClientT, + T: Config, +{ + let account_nonce_bytes = client + .backend() + .call("AccountNonceApi_account_nonce", Some(&account_id.encode()), block_hash) + .await?; + + // custom decoding from a u16/u32/u64 into a u64, based on the number of bytes we got back. + let cursor = &mut &account_nonce_bytes[..]; + let account_nonce: u64 = match account_nonce_bytes.len() { + 2 => u16::decode(cursor)?.into(), + 4 => u32::decode(cursor)?.into(), + 8 => u64::decode(cursor)?, + _ => { + return Err(AccountNonceError::WrongNumberOfBytes(account_nonce_bytes.len())); + }, + }; + Ok(account_nonce) +} diff --git a/vendor/pezkuwi-subxt/subxt/src/blocks/blocks_client.rs b/vendor/pezkuwi-subxt/subxt/src/blocks/blocks_client.rs new file mode 100644 index 00000000..73a2a843 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/blocks/blocks_client.rs @@ -0,0 +1,186 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::Block; +use crate::{ + backend::{BlockRef, StreamOfResults}, + client::OnlineClientT, + config::{Config, HashFor}, + error::BlockError, + utils::PhantomDataSendSync, +}; +use derive_where::derive_where; +use futures::StreamExt; +use std::future::Future; + +type BlockStream = StreamOfResults; +type BlockStreamRes = Result, BlockError>; + +/// A client for working with blocks. +#[derive_where(Clone; Client)] +pub struct BlocksClient { + client: Client, + _marker: PhantomDataSendSync, +} + +impl BlocksClient { + /// Create a new [`BlocksClient`]. + pub fn new(client: Client) -> Self { + Self { client, _marker: PhantomDataSendSync::new() } + } +} + +impl BlocksClient +where + T: Config, + Client: OnlineClientT, +{ + /// Obtain block details given the provided block hash. + /// + /// # Warning + /// + /// This call only supports blocks produced since the most recent + /// runtime upgrade. You can attempt to retrieve older blocks, + /// but may run into errors attempting to work with them. + pub fn at( + &self, + block_ref: impl Into>>, + ) -> impl Future, BlockError>> + Send + 'static { + self.at_or_latest(Some(block_ref.into())) + } + + /// Obtain block details of the latest finalized block. + pub fn at_latest( + &self, + ) -> impl Future, BlockError>> + Send + 'static { + self.at_or_latest(None) + } + + /// Obtain block details given the provided block hash, or the latest block if `None` is + /// provided. + fn at_or_latest( + &self, + block_ref: Option>>, + ) -> impl Future, BlockError>> + Send + 'static { + let client = self.client.clone(); + async move { + // If a block ref isn't provided, we'll get the latest finalized ref to use. + let block_ref = match block_ref { + Some(r) => r, + None => client + .backend() + .latest_finalized_block_ref() + .await + .map_err(BlockError::CouldNotGetLatestBlock)?, + }; + + let maybe_block_header = + client.backend().block_header(block_ref.hash()).await.map_err(|e| { + BlockError::CouldNotGetBlockHeader { + block_hash: block_ref.hash().into(), + reason: e, + } + })?; + + let block_header = match maybe_block_header { + Some(header) => header, + None => { + return Err(BlockError::BlockNotFound { block_hash: block_ref.hash().into() }); + }, + }; + + Ok(Block::new(block_header, block_ref, client)) + } + } + + /// Subscribe to all new blocks imported by the node. + /// + /// **Note:** You probably want to use [`Self::subscribe_finalized()`] most of + /// the time. + pub fn subscribe_all( + &self, + ) -> impl Future>, BlockError>> + Send + 'static + where + Client: Send + Sync + 'static, + { + let client = self.client.clone(); + let hasher = client.hasher(); + header_sub_fut_to_block_sub(self.clone(), async move { + let stream = client + .backend() + .stream_all_block_headers(hasher) + .await + .map_err(BlockError::CouldNotSubscribeToAllBlocks)?; + BlockStreamRes::Ok(stream) + }) + } + + /// Subscribe to all new blocks imported by the node onto the current best fork. + /// + /// **Note:** You probably want to use [`Self::subscribe_finalized()`] most of + /// the time. + pub fn subscribe_best( + &self, + ) -> impl Future>, BlockError>> + Send + 'static + where + Client: Send + Sync + 'static, + { + let client = self.client.clone(); + let hasher = client.hasher(); + header_sub_fut_to_block_sub(self.clone(), async move { + let stream = client + .backend() + .stream_best_block_headers(hasher) + .await + .map_err(BlockError::CouldNotSubscribeToBestBlocks)?; + BlockStreamRes::Ok(stream) + }) + } + + /// Subscribe to finalized blocks. + pub fn subscribe_finalized( + &self, + ) -> impl Future>, BlockError>> + Send + 'static + where + Client: Send + Sync + 'static, + { + let client = self.client.clone(); + let hasher = client.hasher(); + header_sub_fut_to_block_sub(self.clone(), async move { + let stream = client + .backend() + .stream_finalized_block_headers(hasher) + .await + .map_err(BlockError::CouldNotSubscribeToFinalizedBlocks)?; + BlockStreamRes::Ok(stream) + }) + } +} + +/// Take a promise that will return a subscription to some block headers, +/// and return a subscription to some blocks based on this. +async fn header_sub_fut_to_block_sub( + blocks_client: BlocksClient, + sub: S, +) -> Result>, BlockError> +where + T: Config, + S: Future>)>, BlockError>> + + Send + + 'static, + Client: OnlineClientT + Send + Sync + 'static, +{ + let sub = sub.await?.then(move |header_and_ref| { + let client = blocks_client.client.clone(); + async move { + let (header, block_ref) = match header_and_ref { + Ok(header_and_ref) => header_and_ref, + Err(e) => return Err(e), + }; + + Ok(Block::new(header, block_ref, client)) + } + }); + BlockStreamRes::Ok(StreamOfResults::new(Box::pin(sub))) +} diff --git a/vendor/pezkuwi-subxt/subxt/src/blocks/extrinsic_types.rs b/vendor/pezkuwi-subxt/subxt/src/blocks/extrinsic_types.rs new file mode 100644 index 00000000..8a9c9e68 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/blocks/extrinsic_types.rs @@ -0,0 +1,334 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::{ + blocks::block_types::{CachedEvents, get_events}, + client::{OfflineClientT, OnlineClientT}, + config::{Config, HashFor}, + error::{EventsError, ExtrinsicDecodeErrorAt, ExtrinsicError}, + events, +}; +use derive_where::derive_where; +use pezkuwi_subxt_core::blocks::{ + ExtrinsicDetails as CoreExtrinsicDetails, Extrinsics as CoreExtrinsics, +}; +use scale_decode::{DecodeAsFields, DecodeAsType}; + +// Re-export anything that's directly returned/used in the APIs below. +pub use pezkuwi_subxt_core::blocks::{ + ExtrinsicTransactionExtension, ExtrinsicTransactionExtensions, StaticExtrinsic, +}; + +/// The body of a block. +pub struct Extrinsics { + inner: CoreExtrinsics, + client: C, + cached_events: CachedEvents, + hash: HashFor, +} + +impl Extrinsics +where + T: Config, + C: OfflineClientT, +{ + pub(crate) fn new( + client: C, + extrinsics: Vec>, + cached_events: CachedEvents, + hash: HashFor, + ) -> Result { + let inner = CoreExtrinsics::decode_from(extrinsics, client.metadata())?; + Ok(Self { inner, client, cached_events, hash }) + } + + /// See [`pezkuwi_subxt_core::blocks::Extrinsics::len()`]. + pub fn len(&self) -> usize { + self.inner.len() + } + + /// See [`pezkuwi_subxt_core::blocks::Extrinsics::is_empty()`]. + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + /// Return the block hash that these extrinsics are from. + pub fn block_hash(&self) -> HashFor { + self.hash + } + + /// Returns an iterator over the extrinsics in the block body. + // Dev note: The returned iterator is 'static + Send so that we can box it up and make + // use of it with our `FilterExtrinsic` stuff. + pub fn iter(&self) -> impl Iterator> + Send + Sync + 'static { + let client = self.client.clone(); + let cached_events = self.cached_events.clone(); + let block_hash = self.hash; + + self.inner.iter().map(move |inner| { + ExtrinsicDetails::new(inner, client.clone(), block_hash, cached_events.clone()) + }) + } + + /// Iterate through the extrinsics using metadata to dynamically decode and skip + /// them, and return only those which should decode to the provided `E` type. + /// If an error occurs, all subsequent iterations return `None`. + pub fn find( + &self, + ) -> impl Iterator, ExtrinsicError>> { + self.inner.find::().map(|res| { + match res { + Err(e) => Err(ExtrinsicError::from(e)), + Ok(ext) => { + // Wrap details from subxt-core into what we want here: + let details = ExtrinsicDetails::new( + ext.details, + self.client.clone(), + self.hash, + self.cached_events.clone(), + ); + + Ok(FoundExtrinsic { details, value: ext.value }) + }, + } + }) + } + + /// Iterate through the extrinsics using metadata to dynamically decode and skip + /// them, and return the first extrinsic found which decodes to the provided `E` type. + pub fn find_first( + &self, + ) -> Result>, ExtrinsicError> { + self.find::().next().transpose() + } + + /// Iterate through the extrinsics using metadata to dynamically decode and skip + /// them, and return the last extrinsic found which decodes to the provided `Ev` type. + pub fn find_last( + &self, + ) -> Result>, ExtrinsicError> { + self.find::().last().transpose() + } + + /// Find an extrinsics that decodes to the type provided. Returns true if it was found. + pub fn has(&self) -> Result { + Ok(self.find::().next().transpose()?.is_some()) + } +} + +/// A single extrinsic in a block. +pub struct ExtrinsicDetails { + inner: CoreExtrinsicDetails, + /// The block hash of this extrinsic (needed to fetch events). + block_hash: HashFor, + /// Subxt client. + client: C, + /// Cached events. + cached_events: CachedEvents, +} + +impl ExtrinsicDetails +where + T: Config, + C: OfflineClientT, +{ + // Attempt to dynamically decode a single extrinsic from the given input. + pub(crate) fn new( + inner: CoreExtrinsicDetails, + client: C, + block_hash: HashFor, + cached_events: CachedEvents, + ) -> ExtrinsicDetails { + ExtrinsicDetails { inner, client, block_hash, cached_events } + } + + /// See [`pezkuwi_subxt_core::blocks::ExtrinsicDetails::hash()`]. + pub fn hash(&self) -> HashFor { + self.inner.hash() + } + + /// See [`pezkuwi_subxt_core::blocks::ExtrinsicDetails::is_signed()`]. + pub fn is_signed(&self) -> bool { + self.inner.is_signed() + } + + /// See [`pezkuwi_subxt_core::blocks::ExtrinsicDetails::index()`]. + pub fn index(&self) -> u32 { + self.inner.index() + } + + /// See [`pezkuwi_subxt_core::blocks::ExtrinsicDetails::bytes()`]. + pub fn bytes(&self) -> &[u8] { + self.inner.bytes() + } + + /// See [`pezkuwi_subxt_core::blocks::ExtrinsicDetails::call_bytes()`]. + pub fn call_bytes(&self) -> &[u8] { + self.inner.call_bytes() + } + + /// See [`pezkuwi_subxt_core::blocks::ExtrinsicDetails::field_bytes()`]. + pub fn field_bytes(&self) -> &[u8] { + self.inner.field_bytes() + } + + /// See [`pezkuwi_subxt_core::blocks::ExtrinsicDetails::address_bytes()`]. + pub fn address_bytes(&self) -> Option<&[u8]> { + self.inner.address_bytes() + } + + /// See [`pezkuwi_subxt_core::blocks::ExtrinsicDetails::signature_bytes()`]. + pub fn signature_bytes(&self) -> Option<&[u8]> { + self.inner.signature_bytes() + } + + /// See [`pezkuwi_subxt_core::blocks::ExtrinsicDetails::transaction_extensions_bytes()`]. + pub fn transaction_extensions_bytes(&self) -> Option<&[u8]> { + self.inner.transaction_extensions_bytes() + } + + /// See [`pezkuwi_subxt_core::blocks::ExtrinsicDetails::transaction_extensions()`]. + pub fn transaction_extensions(&self) -> Option> { + self.inner.transaction_extensions() + } + + /// See [`pezkuwi_subxt_core::blocks::ExtrinsicDetails::pallet_index()`]. + pub fn pallet_index(&self) -> u8 { + self.inner.pallet_index() + } + + /// See [`pezkuwi_subxt_core::blocks::ExtrinsicDetails::call_index()`]. + pub fn call_index(&self) -> u8 { + self.inner.call_index() + } + + /// See [`pezkuwi_subxt_core::blocks::ExtrinsicDetails::pallet_name()`]. + pub fn pallet_name(&self) -> &str { + self.inner.pallet_name() + } + + /// See [`pezkuwi_subxt_core::blocks::ExtrinsicDetails::call_name()`]. + pub fn call_name(&self) -> &str { + self.inner.call_name() + } + + /// See [`pezkuwi_subxt_core::blocks::ExtrinsicDetails::decode_as_fields()`]. + pub fn decode_as_fields(&self) -> Result { + self.inner.decode_as_fields().map_err(Into::into) + } + + /// See [`pezkuwi_subxt_core::blocks::ExtrinsicDetails::as_extrinsic()`]. + pub fn as_extrinsic(&self) -> Result, ExtrinsicError> { + self.inner.as_extrinsic::().map_err(Into::into) + } + + /// See [`pezkuwi_subxt_core::blocks::ExtrinsicDetails::as_root_extrinsic()`]. + pub fn as_root_extrinsic(&self) -> Result { + self.inner.as_root_extrinsic::().map_err(Into::into) + } +} + +impl ExtrinsicDetails +where + T: Config, + C: OnlineClientT, +{ + /// The events associated with the extrinsic. + pub async fn events(&self) -> Result, EventsError> { + let events = get_events(&self.client, self.block_hash, &self.cached_events).await?; + let ext_hash = self.inner.hash(); + Ok(ExtrinsicEvents::new(ext_hash, self.index(), events)) + } +} + +/// A Static Extrinsic found in a block coupled with it's details. +pub struct FoundExtrinsic { + /// Details for the extrinsic. + pub details: ExtrinsicDetails, + /// The decoded extrinsic value. + pub value: E, +} + +/// The events associated with a given extrinsic. +#[derive_where(Debug)] +pub struct ExtrinsicEvents { + // The hash of the extrinsic (handy to expose here because + // this type is returned from TxProgress things in the most + // basic flows, so it's the only place people can access it + // without complicating things for themselves). + ext_hash: HashFor, + // The index of the extrinsic: + idx: u32, + // All of the events in the block: + events: events::Events, +} + +impl ExtrinsicEvents { + /// Creates a new instance of `ExtrinsicEvents`. + #[doc(hidden)] + pub fn new(ext_hash: HashFor, idx: u32, events: events::Events) -> Self { + Self { ext_hash, idx, events } + } + + /// The index of the extrinsic that these events are produced from. + pub fn extrinsic_index(&self) -> u32 { + self.idx + } + + /// Return the hash of the extrinsic. + pub fn extrinsic_hash(&self) -> HashFor { + self.ext_hash + } + + /// Return all of the events in the block that the extrinsic is in. + pub fn all_events_in_block(&self) -> &events::Events { + &self.events + } + + /// Iterate over all of the raw events associated with this transaction. + /// + /// This works in the same way that [`events::Events::iter()`] does, with the + /// exception that it filters out events not related to the submitted extrinsic. + pub fn iter(&self) -> impl Iterator, EventsError>> { + self.events.iter().filter(|ev| { + ev.as_ref() + .map(|ev| ev.phase() == events::Phase::ApplyExtrinsic(self.idx)) + .unwrap_or(true) // Keep any errors. + }) + } + + /// Find all of the transaction events matching the event type provided as a generic parameter. + /// + /// This works in the same way that [`events::Events::find()`] does, with the + /// exception that it filters out events not related to the submitted extrinsic. + pub fn find(&self) -> impl Iterator> { + self.iter().filter_map(|ev| ev.and_then(|ev| ev.as_event::()).transpose()) + } + + /// Iterate through the transaction events using metadata to dynamically decode and skip + /// them, and return the first event found which decodes to the provided `Ev` type. + /// + /// This works in the same way that [`events::Events::find_first()`] does, with the + /// exception that it ignores events not related to the submitted extrinsic. + pub fn find_first(&self) -> Result, EventsError> { + self.find::().next().transpose() + } + + /// Iterate through the transaction events using metadata to dynamically decode and skip + /// them, and return the last event found which decodes to the provided `Ev` type. + /// + /// This works in the same way that [`events::Events::find_last()`] does, with the + /// exception that it ignores events not related to the submitted extrinsic. + pub fn find_last(&self) -> Result, EventsError> { + self.find::().last().transpose() + } + + /// Find an event in those associated with this transaction. Returns true if it was found. + /// + /// This works in the same way that [`events::Events::has()`] does, with the + /// exception that it ignores events not related to the submitted extrinsic. + pub fn has(&self) -> Result { + Ok(self.find::().next().transpose()?.is_some()) + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/blocks/mod.rs b/vendor/pezkuwi-subxt/subxt/src/blocks/mod.rs new file mode 100644 index 00000000..82349465 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/blocks/mod.rs @@ -0,0 +1,22 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module exposes the necessary functionality for working with events. + +mod block_types; +mod blocks_client; +mod extrinsic_types; + +/// A reference to a block. +pub use crate::backend::BlockRef; + +pub use block_types::Block; +pub use blocks_client::BlocksClient; +pub use extrinsic_types::{ + ExtrinsicDetails, ExtrinsicEvents, ExtrinsicTransactionExtension, + ExtrinsicTransactionExtensions, Extrinsics, FoundExtrinsic, StaticExtrinsic, +}; + +// We get account nonce info in tx_client, too, so re-use the logic: +pub(crate) use block_types::get_account_nonce; diff --git a/vendor/pezkuwi-subxt/subxt/src/book/mod.rs b/vendor/pezkuwi-subxt/subxt/src/book/mod.rs new file mode 100644 index 00000000..dc225dd0 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/book/mod.rs @@ -0,0 +1,109 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +// Dev note; I used the following command to normalize and wrap comments: +// rustfmt +nightly --config wrap_comments=true,comment_width=100,normalize_comments=true +// subxt/src/book/custom_values It messed up comments in code blocks though, so be prepared to go +// and fix those. + +//! # The Subxt Guide +//! +//! Subxt is a library for interacting with Bizinikiwi based nodes. It has a focus on **sub**mitting +//! e**xt**rinsics, hence the name, however it's also capable of reading blocks, storage, events and +//! constants from a node. The aim of this guide is to explain key concepts and get you started with +//! using Subxt. +//! +//! 1. [Features](#features-at-a-glance) +//! 2. [Limitations](#limitations) +//! 3. [Quick start](#quick-start) +//! 4. [Usage](#usage) +//! +//! ## Features at a glance +//! +//! Here's a quick overview of the features that Subxt has to offer: +//! +//! - Subxt allows you to generate a static, type safe interface to a node given some metadata; this +//! allows you to catch many errors at compile time rather than runtime. +//! - Subxt also makes heavy use of node metadata to encode/decode the data sent to/from it. This +//! allows it to target almost any node which can output the correct metadata, and allows it some +//! flexibility in encoding and decoding things to account for cross-node differences. +//! - Subxt has a pallet-oriented interface, meaning that code you write to talk to some pallet on +//! one node will often "Just Work" when pointed at different nodes that use the same pallet. +//! - Subxt can work offline; you can generate and sign transactions, access constants from node +//! metadata and more, without a network connection. This is all checked at compile time, so you +//! can be certain it won't try to establish a network connection if you don't want it to. +//! - Subxt can forego the statically generated interface and build transactions, storage queries +//! and constant queries using data provided at runtime, rather than queries constructed +//! statically. +//! - Subxt can be compiled to WASM to run in the browser, allowing it to back Rust based browser +//! apps, or even bind to JS apps. +//! +//! ## Limitations +//! +//! In various places, you can provide a block hash to access data at a particular block, for +//! instance: +//! +//! - [`crate::storage::StorageClient::at`] +//! - [`crate::events::EventsClient::at`] +//! - [`crate::blocks::BlocksClient::at`] +//! - [`crate::runtime_api::RuntimeApiClient::at`] +//! +//! However, Subxt is (by default) only capable of properly working with blocks that were produced +//! after the most recent runtime update. This is because it uses the most recent metadata given +//! back by a node to encode and decode things. It's possible to decode older blocks produced by a +//! runtime that emits compatible (currently, V14) metadata by manually setting the metadata used by +//! the client using [`crate::client::OnlineClient::set_metadata()`]. +//! +//! Subxt does not support working with blocks produced prior to the runtime update that introduces +//! V14 metadata. It may have some success decoding older blocks using newer metadata, but may also +//! completely fail to do so. +//! +//! ## Quick start +//! +//! Here is a simple but complete example of using Subxt to transfer some tokens from the example +//! accounts, Alice to Bob: +//! +//! ```rust,ignore +#![doc = include_str!("../../examples/tx_basic.rs")] +//! ``` +//! +//! This example assumes that a Pezkuwi node is running locally (Subxt endeavors to support all +//! recent releases). Typically, to use Subxt to talk to some custom Bizinikiwi node (for example a +//! parachain node), you'll want to: +//! +//! 1. [Generate an interface](setup::codegen) +//! 2. [Create a config](setup::config) +//! 3. [Use the config to instantiate the client](setup::client) +//! +//! Follow the above links to learn more about each step. +//! +//! ## Usage +//! +//! Once Subxt is configured, the next step is interacting with a node. Follow the links +//! below to learn more about how to use Subxt for each of the following things: +//! +//! - [Transactions](usage::transactions): Subxt can build and submit transactions, wait until they are in +//! blocks, and retrieve the associated events. +//! - [Storage](usage::storage): Subxt can query the node storage. +//! - [Events](usage::events): Subxt can read the events emitted for recent blocks. +//! - [Constants](usage::constants): Subxt can access the constant values stored in a node, which +//! remain the same for a given runtime version. +//! - [Blocks](usage::blocks): Subxt can load recent blocks or subscribe to new/finalized blocks, +//! reading the extrinsics, events and storage at these blocks. +//! - [Runtime APIs](usage::runtime_apis): Subxt can make calls into pallet runtime APIs to retrieve +//! data. +//! - [Custom values](usage::custom_values): Subxt can access "custom values" stored in the metadata. +//! - [Raw RPC calls](usage::rpc): Subxt can be used to make raw RPC requests to compatible nodes. +//! +//! ## Examples +//! +//! Some complete, self contained examples which are not a part of this guide: +//! +//! - [`parachain-example`](https://github.com/pezkuwichain/subxt/tree/master/examples/parachain-example) is an example +//! which uses Zombienet to spawn a parachain locally, and then connects to it using Subxt. +//! - [`wasm-example`](https://github.com/pezkuwichain/subxt/tree/master/examples/wasm-example) is an example of writing +//! a Rust app that contains a Yew based UI, uses Subxt to interact with a chain, and compiles to WASM in order to +//! run entirely in the browser. +pub mod setup; +pub mod usage; diff --git a/vendor/pezkuwi-subxt/subxt/src/book/setup/client.rs b/vendor/pezkuwi-subxt/subxt/src/book/setup/client.rs new file mode 100644 index 00000000..02f2f2aa --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/book/setup/client.rs @@ -0,0 +1,53 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! # The Subxt client. +//! +//! The client forms the entry point to all of the Subxt APIs. Every client implements one or +//! both of [`crate::client::OfflineClientT`] and [`crate::client::OnlineClientT`]. +//! +//! Subxt ships with three clients which implement one or both of traits: +//! - An [online client](crate::client::OnlineClient). +//! - An [offline client](crate::client::OfflineClient). +//! - A light client (which is currently still unstable). +//! +//! In theory it's possible for users to implement their own clients, although this isn't generally +//! expected. +//! +//! The provided clients are all generic over the [`crate::config::Config`] that they accept, which +//! determines how they will interact with the chain. +//! +//! In the case of the [`crate::OnlineClient`], we have various ways to instantiate it: +//! +//! - [`crate::OnlineClient::new()`] to connect to a node running locally. This uses the default +//! Subxt backend, and the default RPC client. +//! - [`crate::OnlineClient::from_url()`] to connect to a node at a specific URL. This uses the +//! default Subxt backend, and the default RPC client. +//! - [`crate::OnlineClient::from_rpc_client()`] to instantiate the client with a +//! [`crate::backend::rpc::RpcClient`]. +//! - [`crate::OnlineClient::from_backend()`] to instantiate Subxt using a custom backend. Currently +//! there is just one backend, [`crate::backend::legacy::LegacyBackend`]. This backend can be +//! instantiated from a [`crate::backend::rpc::RpcClient`]. +//! +//! [`crate::backend::rpc::RpcClient`] can itself be instantiated from anything that implements the +//! low level [`crate::backend::rpc::RpcClientT`] trait; this allows you to decide how Subxt will +//! attempt to talk to a node if you'd prefer something other default client. We use this approach +//! under the hood to implement the light client. +//! +//! ## Examples +//! +//! Most of the other examples will instantiate a client. Here are a couple of examples for less +//! common cases. +//! +//! ### Writing a custom [`crate::backend::rpc::RpcClientT`] implementation: +//! +//! ```rust,ignore +#![doc = include_str!("../../../examples/setup_client_custom_rpc.rs")] +//! ``` +//! +//! ### Creating an [`crate::OfflineClient`]: +//! ```rust,ignore +#![doc = include_str!("../../../examples/setup_client_offline.rs")] +//! ``` +//! diff --git a/vendor/pezkuwi-subxt/subxt/src/book/setup/codegen.rs b/vendor/pezkuwi-subxt/subxt/src/book/setup/codegen.rs new file mode 100644 index 00000000..c459c1b4 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/book/setup/codegen.rs @@ -0,0 +1,65 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! # Generating an interface +//! +//! The simplest way to use Subxt is to generate an interface to a chain that you'd like to interact +//! with. This generated interface allows you to build transactions and construct queries to access +//! data while leveraging the full type safety of the Rust compiler. +//! +//! ## The `#[subxt]` macro +//! +//! The most common way to generate the interface is to use the [`#[subxt]`](crate::subxt) macro. +//! Using this macro looks something like: +//! +//! ```rust,no_run,standalone_crate +//! #[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_tiny.scale")] +//! pub mod pezkuwi {} +//! ``` +//! +//! The macro takes a path to some node metadata, and uses that to generate the interface you'll use +//! to talk to it. [Go here](crate::subxt) to learn more about the options available to the macro. +//! +//! To obtain this metadata you'll need for the above, you can use the `subxt` CLI tool to download +//! it from a node. The tool can be installed via `cargo`: +//! +//! ```shell +//! cargo install subxt-cli +//! ``` +//! +//! And then it can be used to fetch metadata and save it to a file: +//! +//! ```shell +//! # Download and save all of the metadata: +//! subxt metadata > metadata.scale +//! # Download and save only the pallets you want to generate an interface for: +//! subxt metadata --pallets Balances,System > metadata.scale +//! ``` +//! +//! Explicitly specifying pallets will cause the tool to strip out all unnecessary metadata and type +//! information, making the bundle much smaller in the event that you only need to generate an +//! interface for a subset of the available pallets on the node. +//! +//! ## The CLI tool +//! +//! Using the [`#[subxt]`](crate::subxt) macro carries some downsides: +//! +//! - Using it to generate an interface will have a small impact on compile times (though much less +//! of one if you only need a few pallets). +//! - IDE support for autocompletion and documentation when using the macro interface can be poor. +//! - It's impossible to manually look at the generated code to understand and debug things. +//! +//! If these are an issue, you can manually generate the same code that the macro generates under +//! the hood by using the `subxt codegen` command: +//! +//! ```shell +//! # Install the CLI tool if you haven't already: +//! cargo install subxt-cli +//! # Generate and format rust code, saving it to `interface.rs`: +//! subxt codegen | rustfmt > interface.rs +//! ``` +//! +//! Use `subxt codegen --help` for more options; many of the options available via the macro are +//! also available via the CLI tool, such as the ability to substitute generated types for others, +//! or strip out docs from the generated code. diff --git a/vendor/pezkuwi-subxt/subxt/src/book/setup/config.rs b/vendor/pezkuwi-subxt/subxt/src/book/setup/config.rs new file mode 100644 index 00000000..2756e0cb --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/book/setup/config.rs @@ -0,0 +1,184 @@ +//! # Creating a Config +//! +//! Subxt requires you to provide a type implementing [`crate::config::Config`] in order to connect +//! to a node. The [`crate::config::Config`] trait for the most part mimics the +//! `frame_system::Config` trait. For most use cases, you can just use one of the following Configs +//! shipped with Subxt: +//! +//! - [`PezkuwiConfig`](crate::config::PezkuwiConfig) for talking to Pezkuwi nodes, and +//! - [`BizinikiwConfig`](crate::config::BizinikiwConfig) for talking to generic nodes built with +//! Bizinikiwi. +//! +//! # How to create a Config for a custom chain? +//! +//! Some chains may use config that is not compatible with our +//! [`PezkuwiConfig`](crate::config::PezkuwiConfig) or +//! [`BizinikiwConfig`](crate::config::BizinikiwConfig). +//! +//! We now walk through creating a custom [`crate::config::Config`] for a parachain, using the +//! ["Statemint"](https://parachains.info/details/statemint) parachain, also known as "Asset Hub", as an example. It +//! is currently (as of 2023-06-26) deployed on Pezkuwi and [Kusama (as "Statemine")](https://parachains.info/details/statemine). +//! +//! To construct a valid [`crate::config::Config`] implementation, we need to find out which types +//! to use for `AccountId`, `Hasher`, etc. For this, we need to take a look at the source code of Statemint, which is currently a part of the [Pezpezcumulus Github repository](https://github.com/pezkuwichain/pezcumulus). +//! The crate defining the asset hub runtime can be found [here](https://github.com/pezkuwichain/pezcumulus/tree/master/parachains/runtimes/assets/asset-hub-pezkuwi). +//! +//! ## `AccountId`, `Hash`, `Hasher` and `Header` +//! +//! For these config types, we need to find out where the parachain runtime implements the +//! `frame_system::Config` trait. Look for a code fragment like `impl frame_system::Config for +//! Runtime { ... }` In the source code. For Statemint it looks like [this](https://github.com/pezkuwichain/pezcumulus/blob/e2b7ad2061824f490c08df27a922c64f50accd6b/parachains/runtimes/assets/asset-hub-pezkuwi/src/lib.rs#L179) +//! at the time of writing. The `AccountId`, `Hash` and `Header` types of the [frame_system::pallet::Config](https://docs.rs/frame-system/latest/frame_system/pallet/trait.Config.html) +//! correspond to the ones we want to use in our Subxt [crate::Config]. In the Case of Statemint +//! (Asset Hub) they are: +//! +//! - AccountId: `sp_core::crypto::AccountId32` +//! - Hash: `sp_core::H256` +//! - Hasher (type `Hashing` in [frame_system::pallet::Config](https://docs.rs/frame-system/latest/frame_system/pallet/trait.Config.html)): +//! `sp_runtime::traits::BlakeTwo256` +//! - Header: `sp_runtime::generic::Header` +//! +//! Subxt has its own versions of some of these types in order to avoid needing to pull in Bizinikiwi +//! dependencies: +//! +//! - `sp_core::crypto::AccountId32` can be swapped with [`crate::utils::AccountId32`]. +//! - `sp_core::H256` is a re-export which subxt also provides as +//! [`crate::config::bizinikiwi::H256`]. +//! - `sp_runtime::traits::BlakeTwo256` can be swapped with +//! [`crate::config::bizinikiwi::BlakeTwo256`]. +//! - `sp_runtime::generic::Header` can be swapped with +//! [`crate::config::bizinikiwi::BizinikiwiHeader`]. +//! +//! Having a look at how those types are implemented can give some clues as to how to implement +//! other custom types that you may need to use as part of your config. +//! +//! ## `Address`, `Signature` +//! +//! A Bizinikiwi runtime is typically constructed by using the [frame_support::construct_runtime](https://docs.rs/frame-support/latest/frame_support/macro.construct_runtime.html) macro. +//! In this macro, we need to specify the type of an `UncheckedExtrinsic`. Most of the time, the +//! `UncheckedExtrinsic` will be of the type `sp_runtime::generic::UncheckedExtrinsic`. The generic parameters `Address` and `Signature` +//! specified when declaring the `UncheckedExtrinsic` type are the types for `Address` and +//! `Signature` we should use with our [crate::Config] implementation. This information can +//! also be obtained from the metadata (see [`frame_metadata::v15::ExtrinsicMetadata`]). In case of +//! Statemint (Pezkuwi Asset Hub) we see the following types being used in `UncheckedExtrinsic`: +//! +//! - Address: `sp_runtime::MultiAddress` +//! - Signature: `sp_runtime::MultiSignature` +//! +//! As above, Subxt has its own versions of these types that can be used instead to avoid pulling in +//! Bizinikiwi dependencies. Using the Subxt versions also makes interacting with generated code +//! (which uses them in some places) a little nicer: +//! +//! - `sp_runtime::MultiAddress` can be swapped with [`crate::utils::MultiAddress`]. +//! - `sp_runtime::MultiSignature` can be swapped with [`crate::utils::MultiSignature`]. +//! +//! ## ExtrinsicParams +//! +//! Chains each have a set of "transaction extensions" (formally called "signed extensions") +//! configured. Transaction extensions provide a means to extend how transactions work. Each +//! transaction extension can potentially encode some "extra" data which is sent along with a +//! transaction, as well as some "additional" data which is included in the transaction signer +//! payload, but not transmitted along with the transaction. On a node, transaction extensions can +//! then perform additional checks on the submitted transactions to ensure their validity. +//! +//! The `ExtrinsicParams` config type expects to be given an implementation of the +//! [`crate::config::ExtrinsicParams`] trait. Implementations of the +//! [`crate::config::ExtrinsicParams`] trait are handed some parameters from Subxt itself, and can +//! accept arbitrary other `Params` from users, and are then expected to provide this "extra" and +//! "additional" data when asked via the required [`crate::config::ExtrinsicParamsEncoder`] impl. +//! +//! **In most cases, the default [crate::config::DefaultExtrinsicParams] type will work**: it +//! understands the "standard" transaction extensions that are in use, and allows the user to +//! provide things like a tip, and set the extrinsic mortality via +//! [`crate::config::DefaultExtrinsicParamsBuilder`]. It will use the chain metadata to decide which +//! transaction extensions to use and in which order. It will return an error if the chain uses a +//! transaction extension which it doesn't know how to handle. +//! +//! If the chain uses novel transaction extensions (or if you just wish to provide a different +//! interface for users to configure transactions), you can either: +//! +//! 1. Implement a new transaction extension and add it to the list. +//! 2. Implement [`crate::config::DefaultExtrinsicParams`] from scratch. +//! +//! See below for examples of each. +//! +//! ### Finding out which transaction extensions a chain is using. +//! +//! In either case, you'll want to find out which transaction extensions a chain is using. This +//! information can be obtained from the `SignedExtra` parameter of the `UncheckedExtrinsic` of your +//! parachain, which will be a tuple of transaction extensions. It can also be obtained from the +//! metadata (see [`frame_metadata::v15::SignedExtensionMetadata`]). +//! +//! For statemint, the transaction extensions look like +//! [this](https://github.com/pezkuwichain/pezcumulus/blob/d4bb2215bb28ee05159c4c7df1b3435177b5bf4e/parachains/runtimes/assets/asset-hub-pezkuwi/src/lib.rs#L786): +//! +//! ```rust,ignore +//! pub type SignedExtra = ( +//! frame_system::CheckNonZeroSender, +//! frame_system::CheckSpecVersion, +//! frame_system::CheckTxVersion, +//! frame_system::CheckGenesis, +//! frame_system::CheckEra, +//! frame_system::CheckNonce, +//! frame_system::CheckWeight, +//! pallet_asset_tx_payment::ChargeAssetTxPayment, +//! ); +//! ``` +//! +//! Each element of the `SignedExtra` tuple implements [codec::Encode] and +//! `sp_runtime::traits::SignedExtension` which has an associated type `AdditionalSigned` that also +//! implements [codec::Encode]. Let's look at the underlying types for each tuple element. All +//! zero-sized types have been replaced by `()` for simplicity. +//! +//! | tuple element | struct type | `AdditionalSigned` type | +//! | ------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | +//! | [`frame_system::CheckNonZeroSender`](https://docs.rs/frame-system/latest/frame_system/struct.CheckNonZeroSender.html) | () | () | +//! | [`frame_system::CheckSpecVersion`](https://docs.rs/frame-system/latest/frame_system/struct.CheckSpecVersion.html) | () | [u32] | +//! | [`frame_system::CheckTxVersion`](https://docs.rs/frame-system/latest/frame_system/struct.CheckTxVersion.html) | () | [u32] | +//! | [`frame_system::CheckGenesis`](https://docs.rs/frame-system/latest/frame_system/struct.CheckGenesis.html) | () | `Config::Hash` = `sp_core::H256` | +//! | [`frame_system::CheckMortality`](https://docs.rs/frame-system/latest/frame_system/struct.CheckMortality.html) | `sp_runtime::generic::Era` | `Config::Hash` = `sp_core::H256` | +//! | [`frame_system::CheckNonce`](https://docs.rs/frame-system/latest/frame_system/struct.CheckNonce.html) | `frame_system::pallet::Config::Index` = u32 | () | +//! | [`frame_system::CheckWeight`](https://docs.rs/frame-system/latest/frame_system/struct.CheckWeight.html) | () | () | +//! | [`frame_system::ChargeAssetTxPayment`](https://docs.rs/frame-system/latest/frame_system/struct.ChargeAssetTxPayment.html) | [pallet_asset_tx_payment::ChargeAssetTxPayment](https://docs.rs/pallet-asset-tx-payment/latest/pallet_asset_tx_payment/struct.ChargeAssetTxPayment.html) | () | +//! +//! All types in the `struct type` column make up the "extra" data that we're expected to provide. +//! All types in the `AdditionalSigned` column make up the "additional" data that we're expected to +//! provide. This information will be useful whether we want to implement +//! [`crate::config::TransactionExtension`] for a transaction extension, or implement +//! [`crate::config::ExtrinsicParams`] from scratch. +//! +//! As it happens, all of the transaction extensions in the table are either already exported in +//! [`crate::config::transaction_extensions`], or they hand back no "additional" or "extra" data. In +//! both of these cases, the default `ExtrinsicParams` configuration will work out of the box. +//! +//! ### Implementing and adding new transaction extensions to the config +//! +//! If you do need to implement a novel transaction extension, then you can implement +//! [`crate::config::transaction_extensions::TransactionExtension`] on a custom type and place it +//! into a new set of transaction extensions, like so: +//! +//! ```rust,ignore +#![doc = include_str ! ("../../../examples/setup_config_transaction_extension.rs")] +//! ``` +//! +//! ### Implementing [`crate::config::ExtrinsicParams`] from scratch +//! +//! Alternately, you are free to implement [`crate::config::ExtrinsicParams`] entirely from scratch if you know exactly what "extra" and +//! "additional" data your node needs and would prefer to craft your own interface. +//! +//! Let's see what this looks like (this config won't work on any real node): +//! ```rust,ignore +#![doc = include_str ! ("../../../examples/setup_config_custom.rs")] +//! ``` +//! +//! ### Using a type from the metadata as a config parameter +//! +//! You can also use types that are generated from chain metadata as type parameters of the Config trait. +//! Just make sure all trait bounds are satisfied. This can often be achieved by using custom derives with the subxt macro. +//! For example, the AssetHub Parachain expects tips to include a `MultiLocation`, which is a type we can draw from the metadata. +//! +//! This example shows what using the `MultiLocation` struct as part of your config would look like in subxt: +//! ```rust,ignore +#![doc = include_str ! ("../../../examples/setup_config_assethub.rs")] +//! ``` diff --git a/vendor/pezkuwi-subxt/subxt/src/book/setup/mod.rs b/vendor/pezkuwi-subxt/subxt/src/book/setup/mod.rs new file mode 100644 index 00000000..3dbcc37f --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/book/setup/mod.rs @@ -0,0 +1,14 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This modules contains details on setting up Subxt: +//! +//! - [Codegen](codegen) +//! - [Client](client) +//! +//! Alternately, [go back](super). + +pub mod client; +pub mod codegen; +pub mod config; diff --git a/vendor/pezkuwi-subxt/subxt/src/book/usage/blocks.rs b/vendor/pezkuwi-subxt/subxt/src/book/usage/blocks.rs new file mode 100644 index 00000000..8ef13b0c --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/book/usage/blocks.rs @@ -0,0 +1,104 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! # Blocks +//! +//! The [blocks API](crate::blocks::BlocksClient) in Subxt unifies many of the other interfaces, and +//! allows you to: +//! +//! - Access information about specific blocks (see [`crate::blocks::BlocksClient::at()`] and +//! [`crate::blocks::BlocksClient::at_latest()`]). +//! - Subscribe to [all](crate::blocks::BlocksClient::subscribe_all()), +//! [best](crate::blocks::BlocksClient::subscribe_best()) or +//! [finalized](crate::blocks::BlocksClient::subscribe_finalized()) blocks as they are produced. +//! **Prefer to subscribe to finalized blocks unless you know what you're doing.** +//! +//! In either case, you'll end up with [`crate::blocks::Block`]'s, from which you can access various +//! information about the block, such a the [header](crate::blocks::Block::header()), +//! [block number](crate::blocks::Block::number()) and [body (the +//! extrinsics)](crate::blocks::Block::extrinsics()). [`crate::blocks::Block`]'s also provide +//! shortcuts to other Subxt APIs that will operate at the given block: +//! +//! - [storage](crate::blocks::Block::storage()), +//! - [events](crate::blocks::Block::events()) +//! - [runtime APIs](crate::blocks::Block::runtime_api()) +//! +//! Aside from these links to other Subxt APIs, the main thing that we can do here is iterate over +//! and decode the extrinsics in a block body. +//! +//! ## Decoding Extrinsics +//! +//! Given a block, you can [download the block body](crate::blocks::Block::extrinsics()) and +//! [iterate over the extrinsics](crate::blocks::Extrinsics::iter) stored within it. The extrinsics +//! yielded are of type [ExtrinsicDetails](crate::blocks::ExtrinsicDetails), which is just a blob of +//! bytes that also stores which pallet and call in that pallet it belongs to. It also contains +//! information about signed extensions that have been used for submitting this extrinsic. +//! +//! To use the extrinsic, you probably want to decode it into a concrete Rust type. These Rust types +//! representing extrinsics from different pallets can be generated from metadata using the subxt +//! macro or the CLI tool. +//! +//! When decoding the extrinsic into a static type you have two options: +//! +//! ### Statically decode the extrinsics into [the root extrinsic type](crate::blocks::ExtrinsicDetails::as_root_extrinsic()) +//! +//! The root extrinsic type generated by subxt is a Rust enum with one variant for each pallet. Each +//! of these variants has a field that is another enum whose variants cover all calls of the +//! respective pallet. If the extrinsic bytes are valid and your metadata matches the chain's +//! metadata, decoding the bytes of an extrinsic into this root extrinsic type should always +//! succeed. +//! +//! This example shows how to subscribe to blocks and decode the extrinsics in each block into the +//! root extrinsic type. Once we get hold of the +//! [ExtrinsicDetails](crate::blocks::ExtrinsicDetails), we can decode it statically or dynamically. +//! We can also access details about the extrinsic, including the associated events and transaction +//! extensions. +//! +//! ```rust,ignore +#![doc = include_str!("../../../examples/blocks_subscribing.rs")] +//! ``` +//! +//! ### Statically decode the extrinsic into [a specific pallet call](crate::blocks::ExtrinsicDetails::as_extrinsic()) +//! +//! This is useful if you are expecting a specific extrinsic to be part of some block. If the extrinsic you try to decode +//! is a different extrinsic, an `Ok(None)` value is returned from [`as_extrinsic::()`](crate::blocks::ExtrinsicDetails::as_extrinsic()); +//! +//! If you are only interested in finding specific extrinsics in a block, you can also [iterate over all of them](crate::blocks::Extrinsics::find), +//! get only [the first one](crate::blocks::Extrinsics::find_first), or [the last one](crate::blocks::Extrinsics::find_last). +//! +//! The following example monitors `TransferKeepAlive` extrinsics on the Pezkuwi network. +//! We statically decode them and access the [tip](crate::blocks::ExtrinsicTransactionExtensions::tip()) and +//! [account nonce](crate::blocks::ExtrinsicTransactionExtensions::nonce()) transaction extensions. +//! ```rust,ignore +#![doc = include_str!("../../../examples/block_decoding_static.rs")] +//! ``` +//! +//! ### Dynamically decode the extrinsic +//! +//! Sometimes you might use subxt with metadata that is not known at compile time. In this case, you do not +//! have access to a statically generated interface module that contains the relevant Rust types. You can +//! [decode ExtrinsicDetails dynamically](crate::blocks::ExtrinsicDetails::decode_as_fields()), which gives +//! you access to it's fields as a [scale value composite](scale_value::Composite). The following example +//! looks for signed extrinsics on the Pezkuwi network and retrieves their pallet name, variant name, data +//! fields and transaction extensions dynamically. Notice how we do not need to use code generation via the +//! subxt macro. The only fixed component we provide is the [PezkuwiConfig](crate::config::PezkuwiConfig). +//! Other than that it works in a chain-agnostic way: +//! ```rust,ignore +#![doc = include_str!("../../../examples/block_decoding_dynamic.rs")] +//! ``` +//! +//! ## Decoding transaction extensions +//! +//! Extrinsics can contain transaction extensions. The transaction extensions can be different across chains. +//! The [Config](crate::Config) implementation for your chain defines which transaction extensions you expect. +//! Once you get hold of the [ExtrinsicDetails](crate::blocks::ExtrinsicDetails) for an extrinsic you are interested in, +//! you can try to [get its transaction extensions](crate::blocks::ExtrinsicDetails::transaction_extensions()). +//! These are only available on V4 signed extrinsics or V5 general extrinsics. You can try to +//! [find a specific transaction extension](crate::blocks::ExtrinsicTransactionExtensions::find), in the returned +//! [transaction extensions](crate::blocks::ExtrinsicTransactionExtensions). +//! +//! Subxt also provides utility functions to get the [tip](crate::blocks::ExtrinsicTransactionExtensions::tip()) and +//! the [account nonce](crate::blocks::ExtrinsicTransactionExtensions::nonce()) associated with an extrinsic, given +//! its transaction extensions. If you prefer to do things dynamically you can get the data of the transaction extension +//! as a [scale value](crate::blocks::ExtrinsicTransactionExtension::value()). diff --git a/vendor/pezkuwi-subxt/subxt/src/book/usage/constants.rs b/vendor/pezkuwi-subxt/subxt/src/book/usage/constants.rs new file mode 100644 index 00000000..ae43e071 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/book/usage/constants.rs @@ -0,0 +1,54 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! # Constants +//! +//! There are various constants stored in a node; the types and values of these are defined in a +//! runtime, and can only change when the runtime is updated. Much like [`super::storage`], we can +//! query these using Subxt by taking the following steps: +//! +//! 1. [Constructing a constant query](#constructing-a-query). +//! 2. [Submitting the query to get back the associated value](#submitting-it). +//! +//! ## Constructing a constant query +//! +//! We can use the statically generated interface to build constant queries: +//! +//! ```rust,no_run,standalone_crate +//! #[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_full.scale")] +//! pub mod pezkuwi {} +//! +//! let constant_query = pezkuwi::constants().system().block_length(); +//! ``` +//! +//! Alternately, we can dynamically construct a constant query. A dynamic query needs the return +//! type to be specified, where we can use [`crate::dynamic::Value`] if unsure: +//! +//! ```rust,no_run,standalone_crate +//! use pezkuwi_subxt::dynamic::Value; +//! +//! let storage_query = pezkuwi_subxt::dynamic::constant::("System", "BlockLength"); +//! ``` +//! +//! ## Submitting it +//! +//! Call [`crate::constants::ConstantsClient::at()`] to return and decode the constant into the +//! type given by the address, or [`crate::constants::ConstantsClient::bytes_at()`] to return the +//! raw bytes for some constant. +//! +//! Constant values are pulled directly out of the node metadata which Subxt has +//! already acquired, and so this function requires no network access and is available from a +//! [`crate::OfflineClient`]. +//! +//! Here's an example using a static query: +//! +//! ```rust,ignore +#![doc = include_str!("../../../examples/constants_static.rs")] +//! ``` +//! +//! And here's one using a dynamic query: +//! ```rust,ignore +#![doc = include_str!("../../../examples/constants_dynamic.rs")] +//! ``` +//! diff --git a/vendor/pezkuwi-subxt/subxt/src/book/usage/custom_values.rs b/vendor/pezkuwi-subxt/subxt/src/book/usage/custom_values.rs new file mode 100644 index 00000000..8198c38e --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/book/usage/custom_values.rs @@ -0,0 +1,70 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! # Custom Values +//! +//! Bizinikiwi-based chains can expose custom values in their metadata. +//! Each of these values: +//! +//! - can be accessed by a unique __name__. +//! - refers to a concrete __type__ stored in the metadata. +//! - contains a scale encoded __value__ of that type. +//! +//! ## Getting a custom value +//! +//! First, you must construct an address to access a custom value. This can be either: +//! - a raw [`str`] which assumes the return type to be the dynamic [`crate::dynamic::Value`] type, +//! - created via [`dynamic`](crate::custom_values::dynamic) function whereby you set the return +//! type that you want back, +//! - created via statically generated addresses as part of the `#[subxt]` macro which define the +//! return type. +//! +//! With an address, use [`at`](crate::custom_values::CustomValuesClient::at) to access and decode +//! specific values, and [`bytes_at`](crate::custom_values::CustomValuesClient::bytes_at) to access +//! the raw bytes. +//! +//! ## Examples +//! +//! Dynamically accessing a custom value using a [`str`] to select which one: +//! +//! ```rust,ignore +//! use pezkuwi_subxt::{OnlineClient, PezkuwiConfig, ext::scale_decode::DecodeAsType}; +//! use pezkuwi_subxt::dynamic::Value; +//! +//! let api = OnlineClient::::new().await?; +//! let custom_value_client = api.custom_values(); +//! let foo: Value = custom_value_client.at("foo")?; +//! ``` +//! +//! Use the [`dynamic`](crate::custom_values::dynamic) function to select the return type: +//! +//! ```rust,ignore +//! use pezkuwi_subxt::{OnlineClient, PezkuwiConfig, ext::scale_decode::DecodeAsType}; +//! +//! #[derive(Decode, DecodeAsType, Debug)] +//! struct Foo { +//! n: u8, +//! b: bool, +//! } +//! +//! let api = OnlineClient::::new().await?; +//! let custom_value_client = api.custom_values(); +//! let custom_value_addr = pezkuwi_subxt::custom_values::dynamic::("foo"); +//! let foo: Foo = custom_value_client.at(&custom_value_addr)?; +//! ``` +//! +//! Alternatively we also provide a statically generated api for custom values: +//! +//! ```rust,ignore +//! #[pezkuwi_subxt::subxt(runtime_metadata_path = "some_metadata.scale")] +//! pub mod interface {} +//! +//! let static_address = interface::custom().foo(); +//! +//! let api = OnlineClient::::new().await?; +//! let custom_value_client = api.custom_values(); +//! +//! // Now the `at()` function already decodes the value into the Foo type: +//! let foo = custom_value_client.at(&static_address)?; +//! ``` diff --git a/vendor/pezkuwi-subxt/subxt/src/book/usage/events.rs b/vendor/pezkuwi-subxt/subxt/src/book/usage/events.rs new file mode 100644 index 00000000..18546347 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/book/usage/events.rs @@ -0,0 +1,51 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! # Events +//! +//! In the process of adding extrinsics to a block, they are executed. When extrinsics are executed, +//! they normally produce events describing what's happening (at the very least, an event dictating +//! whether the extrinsic has succeeded or failed). The node may also emit some events of its own as +//! the block is processed. +//! +//! Events live in a single location in node storage which is overwritten at each block. Normal +//! nodes tend to keep a snapshot of the state at a small number of previous blocks, so you can +//! sometimes access older events by using [`crate::events::EventsClient::at()`] and providing an +//! older block hash. +//! +//! When we submit transactions using Subxt, methods like +//! [`crate::tx::TxProgress::wait_for_finalized_success()`] +//! return [`crate::blocks::ExtrinsicEvents`], which can be used to iterate and inspect the events +//! produced by that transaction being executed. We can also access _all_ of the events produced in +//! a single block using one of these two interfaces: +//! +//! ```rust,no_run,standalone_crate +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! use pezkuwi_subxt::client::OnlineClient; +//! use pezkuwi_subxt::config::PezkuwiConfig; +//! +//! // Create client: +//! let client = OnlineClient::::new().await?; +//! +//! // Get events from the latest block (use .at() to specify a block hash): +//! let events = client.blocks().at_latest().await?.events().await?; +//! // We can use this shorthand too: +//! let events = client.events().at_latest().await?; +//! # Ok(()) +//! # } +//! ``` +//! +//! Once we've loaded our events, we can iterate all events or search for specific events via +//! methods like [`crate::events::Events::iter()`] and [`crate::events::Events::find()`]. See +//! [`crate::events::Events`] and [`crate::events::EventDetails`] for more information. +//! +//! ## Example +//! +//! Here's an example which puts this all together: +//! +//! ```rust,ignore +#![doc = include_str!("../../../examples/events.rs")] +//! ``` +//! diff --git a/vendor/pezkuwi-subxt/subxt/src/book/usage/light_client.rs b/vendor/pezkuwi-subxt/subxt/src/book/usage/light_client.rs new file mode 100644 index 00000000..4438a32c --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/book/usage/light_client.rs @@ -0,0 +1,50 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! # Light Client +//! +//! The light client based interface uses _Smoldot_ to connect to a _chain_, rather than an +//! individual node. This means that you don't have to trust a specific node when interacting with +//! some chain. +//! +//! This feature is currently unstable. Use the `unstable-light-client` feature flag to enable it. +//! To use this in WASM environments, enable the `web` feature flag and disable the "native" one. +//! +//! To connect to a blockchain network, the Light Client requires a trusted sync state of the +//! network, known as a _chain spec_. One way to obtain this is by making a `sync_state_genSyncSpec` +//! RPC call to a trusted node belonging to the chain that you wish to interact with. +//! +//! Subxt exposes a utility method to obtain the chain spec: +//! [`crate::utils::fetch_chainspec_from_rpc_node()`]. Alternately, you can manually make an RPC +//! call to `sync_state_genSyncSpec` like do (assuming a node running locally on port 9933): +//! +//! ```bash +//! curl -H "Content-Type: application/json" -d '{"id":1, "jsonrpc":"2.0", "method": "sync_state_genSyncSpec", "params":[true]}' http://localhost:9933/ | jq .result > chain_spec.json +//! ``` +//! +//! ## Examples +//! +//! ### Basic Example +//! +//! This basic example uses some already-known chain specs to connect to a relay chain and parachain +//! and stream information about their finalized blocks: +//! +//! ```rust,ignore +#![doc = include_str!("../../../examples/light_client_basic.rs")] +//! ``` +//! +//! ### Connecting to a local node +//! +//! This example connects to a local chain and submits a transaction. To run this, you first need +//! to have a local pezkuwi node running using the following command: +//! ```text +//! pezkuwi --dev --node-key 0000000000000000000000000000000000000000000000000000000000000001 +//! ``` +//! +//! Then, the following code will download a chain spec from this local node, alter the bootnodes +//! to point only to the local node, and then submit a transaction through it. +//! ```rust,ignore +#![doc = include_str!("../../../examples/light_client_local_node.rs")] +//! ``` +//! diff --git a/vendor/pezkuwi-subxt/subxt/src/book/usage/mod.rs b/vendor/pezkuwi-subxt/subxt/src/book/usage/mod.rs new file mode 100644 index 00000000..77eaf21d --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/book/usage/mod.rs @@ -0,0 +1,27 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This modules contains examples of using Subxt; follow the links for more: +//! +//! - [Transactions](transactions) +//! - [Storage](storage) +//! - [Events](events) +//! - [Constants](constants) +//! - [Blocks](blocks) +//! - [Runtime APIs](runtime_apis) +//! - [Unstable Light Client](light_client) +//! - [Custom Values](custom_values) +//! - [RPC calls](rpc) +//! +//! Alternately, [go back](super). + +pub mod blocks; +pub mod constants; +pub mod custom_values; +pub mod events; +pub mod light_client; +pub mod rpc; +pub mod runtime_apis; +pub mod storage; +pub mod transactions; diff --git a/vendor/pezkuwi-subxt/subxt/src/book/usage/rpc.rs b/vendor/pezkuwi-subxt/subxt/src/book/usage/rpc.rs new file mode 100644 index 00000000..f6f12d46 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/book/usage/rpc.rs @@ -0,0 +1,23 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! # RPC calls +//! +//! The RPC interface is provided by the [`pezkuwi_subxt_rpcs`] crate but re-exposed here. We have: +//! +//! - [`crate::backend::rpc::RpcClient`] and [`crate::backend::rpc::RpcClientT`]: the underlying +//! type and trait which provides a basic RPC client. +//! - [`crate::backend::legacy::rpc_methods`] and [`crate::backend::chain_head::rpc_methods`]: RPc +//! methods that can be instantiated with an RPC client. +//! +//! See [`pezkuwi_subxt_rpcs`] or [`crate::ext::pezkuwi_subxt_rpcs`] for more. +//! +//! # Example +//! +//! Here's an example which calls some legacy JSON-RPC methods, and reuses the same connection to +//! run a full Subxt client +//! +//! ```rust,ignore +#![doc = include_str!("../../../examples/rpc_legacy.rs")] +//! ``` diff --git a/vendor/pezkuwi-subxt/subxt/src/book/usage/runtime_apis.rs b/vendor/pezkuwi-subxt/subxt/src/book/usage/runtime_apis.rs new file mode 100644 index 00000000..a5582c61 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/book/usage/runtime_apis.rs @@ -0,0 +1,78 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! # Runtime API interface +//! +//! The Runtime API interface allows Subxt to call runtime APIs exposed by certain pallets in order +//! to obtain information. Much like [`super::storage`] and [`super::transactions`], Making a +//! runtime call to a node and getting the response back takes the following steps: +//! +//! 1. [Constructing a runtime call](#constructing-a-runtime-call) +//! 2. [Submitting it to get back the response](#submitting-it) +//! +//! **Note:** Runtime APIs are only available when using V15 metadata, which is currently unstable. +//! You'll need to use `subxt metadata --version unstable` command to download the unstable V15 +//! metadata, and activate the `unstable-metadata` feature in Subxt for it to also use this metadata +//! from a node. The metadata format is unstable because it may change and break compatibility with +//! Subxt at any moment, so use at your own risk. +//! +//! ## Constructing a runtime call +//! +//! We can use the statically generated interface to build runtime calls: +//! +//! ```rust,no_run,standalone_crate +//! #[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +//! pub mod pezkuwi {} +//! +//! let runtime_call = pezkuwi::apis().metadata().metadata_versions(); +//! ``` +//! +//! Alternately, we can dynamically construct a runtime call. The input type can be a tuple or +//! vec or valid types implementing [`scale_encode::EncodeAsType`], and the output can be anything +//! implementing [`scale_decode::DecodeAsType`]: +//! +//! ```rust,no_run +//! use pezkuwi_subxt::dynamic::Value; +//! +//! let runtime_call = pezkuwi_subxt::dynamic::runtime_api_call::<(), Vec>( +//! "Metadata", +//! "metadata_versions", +//! () +//! ); +//! ``` +//! +//! All valid runtime calls implement [`crate::runtime_api::Payload`], a trait which +//! describes how to encode the runtime call arguments and what return type to decode from the +//! response. +//! +//! ## Submitting it +//! +//! Runtime calls can be handed to [`crate::runtime_api::RuntimeApi::call()`], which will submit +//! them and hand back the associated response. +//! +//! ### Making a static Runtime API call +//! +//! The easiest way to make a runtime API call is to use the statically generated interface. +//! +//! ```rust,ignore +#![doc = include_str!("../../../examples/runtime_apis_static.rs")] +//! ``` +//! +//! ### Making a dynamic Runtime API call +//! +//! If you'd prefer to construct the call at runtime, you can do this using the +//! [`crate::dynamic::runtime_api_call`] method. +//! ```rust,ignore +#![doc = include_str!("../../../examples/runtime_apis_dynamic.rs")] +//! ``` +//! +//! ### Making a raw call +//! +//! This is generally discouraged in favour of one of the above, but may be necessary (especially if +//! the node you're talking to does not yet serve V15 metadata). Here, you must manually encode +//! the argument bytes and manually provide a type for the response bytes to be decoded into. +//! ```rust,ignore +#![doc = include_str!("../../../examples/runtime_apis_raw.rs")] +//! ``` +//! diff --git a/vendor/pezkuwi-subxt/subxt/src/book/usage/storage.rs b/vendor/pezkuwi-subxt/subxt/src/book/usage/storage.rs new file mode 100644 index 00000000..1e88b0c4 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/book/usage/storage.rs @@ -0,0 +1,80 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! # Storage +//! +//! A Bizinikiwi based chain can be seen as a key/value database which starts off at some initial +//! state, and is modified by the extrinsics in each block. This database is referred to as the +//! node storage. With Subxt, you can query this key/value storage with the following steps: +//! +//! 1. [Constructing a storage query](#constructing-a-storage-query). +//! 2. [Submitting the query to get back the associated entry](#submitting-it). +//! 3. [Fetching](#fetching-storage-entries) or [iterating](#iterating-storage-entries) over that +//! entry to retrieve the value or values within it. +//! +//! ## Constructing a storage query +//! +//! We can use the statically generated interface to build storage queries: +//! +//! ```rust,no_run,standalone_crate +//! #[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +//! pub mod pezkuwi {} +//! +//! let storage_query = pezkuwi::storage().system().account(); +//! ``` +//! +//! Alternately, we can dynamically construct a storage query. A dynamic query needs the input +//! and return value types to be specified, where we can use [`crate::dynamic::Value`] if unsure. +//! +//! ```rust,no_run,standalone_crate +//! use pezkuwi_subxt::dynamic::Value; +//! +//! let storage_query = pezkuwi_subxt::dynamic::storage::<(Value,), Value>("System", "Account"); +//! ``` +//! +//! ## Submitting it +//! +//! Storage queries can be handed to various functions in [`crate::storage::StorageClientAt`] in +//! order to obtain the associated values (also referred to as storage entries) back. +//! +//! The core API here is [`crate::storage::StorageClientAt::entry()`], which takes a query and looks +//! up the corresponding storage entry, from which you can then fetch or iterate over the values +//! contained within. [`crate::storage::StorageClientAt::fetch()`] and +//! [`crate::storage::StorageClientAt::iter()`] are shorthand for this. +//! +//! When you wish to manually query some entry, [`crate::storage::StorageClientAt::fetch_raw()`] +//! exists to take in raw bytes pointing at some storage value, and return the value bytes if +//! possible. [`crate::storage::StorageClientAt::storage_version()`] +//! and [`crate::storage::StorageClientAt::runtime_wasm_code()`] use this to retrieve the version of +//! some storage API and the current Runtime WASM blob respectively. +//! +//! ### Fetching storage entries +//! +//! The simplest way to access storage entries is to construct a query and then call either +//! [`crate::storage::StorageClientAt::fetch()`]: +//! +//! ```rust,ignore +#![doc = include_str!("../../../examples/storage_fetch.rs")] +//! ``` +//! +//! For completeness, below is an example using a dynamic query instead. Dynamic queries can define the types that +//! they wish to accept inputs and decode the return value into ([`crate::dynamic::Value`] can be used here anywhere we +//! are not sure of the specific types). +//! ```rust,ignore +#![doc = include_str!("../../../examples/storage_fetch_dynamic.rs")] +//! ``` +//! +//! ### Iterating storage entries +//! +//! Many storage entries are maps of values; as well as fetching individual values, it's possible to +//! iterate over all of the values stored at that location: +//! ```rust,ignore +#![doc = include_str!("../../../examples/storage_iterating.rs")] +//! ``` +//! +//! Here's the same logic but using dynamically constructed values instead: +//! ```rust,ignore +#![doc = include_str!("../../../examples/storage_iterating_dynamic.rs")] +//! ``` +//! diff --git a/vendor/pezkuwi-subxt/subxt/src/book/usage/transactions.rs b/vendor/pezkuwi-subxt/subxt/src/book/usage/transactions.rs new file mode 100644 index 00000000..c4403f92 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/book/usage/transactions.rs @@ -0,0 +1,201 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! # Transactions +//! +//! A transaction is an extrinsic that's signed (ie it originates from a given address). The purpose +//! of extrinsics is to modify the node storage in a deterministic way, and so being able to submit +//! transactions to a node is one of the core features of Subxt. +//! +//! > Note: the documentation tends to use the terms _extrinsic_ and _transaction_ interchangeably; +//! > An extrinsic is some data that can be added to a block, and is either signed (a _transaction_) +//! > or unsigned (an _inherent_). Subxt can construct either, but overwhelmingly you'll need to +//! > sign the payload you'd like to submit. +//! +//! Submitting a transaction to a node consists of the following steps: +//! +//! 1. [Constructing a transaction payload to submit](#constructing-a-transaction-payload). +//! 2. [Signing it](#signing-it). +//! 3. [Submitting it (optionally with some additional parameters)](#submitting-it). +//! +//! We'll look at each of these steps in turn. +//! +//! ## Constructing a transaction payload +//! +//! We can use the statically generated interface to build transaction payloads: +//! +//! ```rust,no_run,standalone_crate +//! #[pezkuwi_subxt::subxt(runtime_metadata_path = "../artifacts/pezkuwi_metadata_small.scale")] +//! pub mod pezkuwi {} +//! +//! let remark = "Hello there".as_bytes().to_vec(); +//! let tx_payload = pezkuwi::tx().system().remark(remark); +//! ``` +//! +//! > If you're not sure what types to import and use to build a given payload, you can use the +//! > `subxt` CLI tool to generate the interface by using something like `subxt codegen | rustfmt > +//! > interface.rs`, to see what types and things are available (or even just to use directly +//! > instead of the [`#[subxt]`](crate::subxt) macro). +//! +//! Alternately, we can dynamically construct a transaction payload. This will not be type checked +//! or validated until it's submitted: +//! +//! ```rust,no_run,standalone_crate +//! use pezkuwi_subxt::dynamic::Value; +//! +//! let tx_payload = pezkuwi_subxt::dynamic::tx("System", "remark", vec![ +//! Value::from_bytes("Hello there") +//! ]); +//! ``` +//! +//! The [`crate::dynamic::Value`] type is a dynamic type much like a `serde_json::Value` but instead +//! represents any type of data that can be SCALE encoded or decoded. It can be serialized, +//! deserialized and parsed from/to strings. +//! +//! A valid transaction payload is just something that implements the [`crate::tx::Payload`] trait; +//! you can implement this trait on your own custom types if the built-in ones are not suitable for +//! your needs. +//! +//! ## Signing it +//! +//! You'll normally need to sign an extrinsic to prove that it originated from an account that you +//! control. To do this, you will typically first create a [`crate::tx::Signer`] instance, which +//! tells Subxt who the extrinsic is from, and takes care of signing the relevant details to prove +//! this. +//! +//! There are two main ways to create a compatible signer instance: +//! 1. The `pezkuwi_subxt_signer` crate provides a WASM compatible implementation of +//! [`crate::tx::Signer`] +//! for chains which require sr25519 or ecdsa signatures (requires the `subxt` feature to be +//! enabled). +//! 2. Alternately, implement your own [`crate::tx::Signer`] instance by wrapping it in a new type +//! pattern. +//! +//! Going for 1 leads to fewer dependencies being imported and WASM compatibility out of the box via +//! the `web` feature flag. Going for 2 is useful if you're already using the Bizinikiwi dependencies +//! or need additional signing algorithms that `pezkuwi_subxt_signer` doesn't support, and don't +//! care about WASM compatibility. +//! +//! Because 2 is more complex and require more code, we'll focus on 1 here. +//! For 2, see the example in `subxt/examples/bizinikiwi_compat_signer.rs` how +//! you can integrate things like sp_core's signer in subxt. +//! +//! Let's go through how to create a signer using the `pezkuwi_subxt_signer` crate: +//! +//! ```rust,standalone_crate +//! use pezkuwi_subxt::config::PezkuwiConfig; +//! use std::str::FromStr; +//! +//! use pezkuwi_subxt_signer::{SecretUri, sr25519}; +//! +//! // Get hold of a `Signer` for a test account: +//! let alice = sr25519::dev::alice(); +//! +//! // Or generate a keypair, here from an SURI: +//! let uri = SecretUri::from_str("vessel ladder alter error federal sibling chat ability sun glass valve picture/0/1///Password") +//! .expect("valid URI"); +//! let keypair = sr25519::Keypair::from_uri(&uri) +//! .expect("valid keypair"); +//! ``` +//! +//! After initializing the signer, let's also go through how to create a transaction and sign it: +//! +//! ```rust,no_run,standalone_crate +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! use pezkuwi_subxt::client::OnlineClient; +//! use pezkuwi_subxt::config::PezkuwiConfig; +//! use pezkuwi_subxt::dynamic::Value; +//! +//! // Create client: +//! let client = OnlineClient::::new().await?; +//! +//! // Create a dummy tx payload to sign: +//! let payload = pezkuwi_subxt::dynamic::tx("System", "remark", vec![ +//! Value::from_bytes("Hello there") +//! ]); +//! +//! // Construct the tx but don't sign it. The account nonce here defaults to 0. +//! // You can use `create_partial` to fetch the correct nonce. +//! let mut partial_tx = client.tx().create_partial_offline( +//! &payload, +//! Default::default() +//! )?; +//! +//! // Fetch the payload that needs to be signed: +//! let signer_payload = partial_tx.signer_payload(); +//! +//! // ... At this point, we can hand off the `signer_payload` to be signed externally. +//! // Ultimately we need to be given back a `signature` (or really, anything +//! // that can be SCALE encoded) and an `address`: +//! let signature; +//! let account_id; +//! # use pezkuwi_subxt::tx::Signer; +//! # let signer = pezkuwi_subxt_signer::sr25519::dev::alice(); +//! # signature = signer.sign(&signer_payload).into(); +//! # account_id = signer.public_key().to_account_id(); +//! +//! // Now we can build an tx, which one can call `submit` or `submit_and_watch` +//! // on to submit to a node and optionally watch the status. +//! let tx = partial_tx.sign_with_account_and_signature( +//! &account_id, +//! &signature +//! ); +//! # Ok(()) +//! # } +//! ``` +//! +//! ## Submitting it +//! +//! Once we have signed the transaction, we need to submit it. +//! +//! ### The high level API +//! +//! The highest level approach to doing this is to call +//! [`crate::tx::TxClient::sign_and_submit_then_watch_default`]. This hands back a +//! [`crate::tx::TxProgress`] struct which will monitor the transaction status. We can then call +//! [`crate::tx::TxProgress::wait_for_finalized_success()`] to wait for this transaction to make it +//! into a finalized block, check for an `ExtrinsicSuccess` event, and then hand back the events for +//! inspection. This looks like: +//! +//! ```rust,ignore +#![doc = include_str!("../../../examples/tx_basic.rs")] +//! ``` +//! +//! ### Providing transaction parameters +//! +//! If you'd like to provide parameters (such as mortality) to the transaction, you can use +//! [`crate::tx::TxClient::sign_and_submit_then_watch`] instead: +//! ```rust,ignore +#![doc = include_str!("../../../examples/tx_with_params.rs")] +//! ``` +//! +//! This example doesn't wait for the transaction to be included in a block; it just submits it and +//! hopes for the best! +//! +//! ### Boxing transaction payloads +//! +//! Transaction payloads can be boxed so that they all share a common type and can be stored together. +//! ```rust,ignore +#![doc = include_str!("../../../examples/tx_boxed.rs")] +//! ``` +//! +//! ### Custom handling of transaction status updates +//! +//! If you'd like more control or visibility over exactly which status updates are being emitted for +//! the transaction, you can monitor them as they are emitted and react however you choose: +//! ```rust,ignore +#![doc = include_str!("../../../examples/tx_status_stream.rs")] +//! ``` +//! +//! ### Signing transactions externally +//! +//! Subxt also allows you to get hold of the signer payload and hand that off to something else to be +//! signed. The signature can then be provided back to Subxt to build the final transaction to submit: +//! ```rust,ignore +#![doc = include_str!("../../../examples/tx_partial.rs")] +//! ``` +//! +//! Take a look at the API docs for [`crate::tx::TxProgress`], [`crate::tx::TxStatus`] and +//! [`crate::tx::TxInBlock`] for more options. diff --git a/vendor/pezkuwi-subxt/subxt/src/client/mod.rs b/vendor/pezkuwi-subxt/subxt/src/client/mod.rs new file mode 100644 index 00000000..66418771 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/client/mod.rs @@ -0,0 +1,18 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module provides two clients that can be used to work with +//! transactions, storage and events. The [`OfflineClient`] works +//! entirely offline and can be passed to any function that doesn't +//! require network access. The [`OnlineClient`] requires network +//! access. + +mod offline_client; +mod online_client; + +pub use offline_client::{OfflineClient, OfflineClientT}; +pub use online_client::{ + ClientRuntimeUpdater, OnlineClient, OnlineClientT, RuntimeUpdaterStream, Update, +}; +pub use pezkuwi_subxt_core::client::{ClientState, RuntimeVersion}; diff --git a/vendor/pezkuwi-subxt/subxt/src/client/offline_client.rs b/vendor/pezkuwi-subxt/subxt/src/client/offline_client.rs new file mode 100644 index 00000000..8b5d5bd9 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/client/offline_client.rs @@ -0,0 +1,200 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::{ + Metadata, + blocks::BlocksClient, + config::{Config, HashFor}, + constants::ConstantsClient, + custom_values::CustomValuesClient, + events::EventsClient, + runtime_api::RuntimeApiClient, + storage::StorageClient, + tx::TxClient, + view_functions::ViewFunctionsClient, +}; + +use derive_where::derive_where; +use pezkuwi_subxt_core::client::{ClientState, RuntimeVersion}; +use std::sync::Arc; + +/// A trait representing a client that can perform +/// offline-only actions. +pub trait OfflineClientT: Clone + Send + Sync + 'static { + /// Return the provided [`Metadata`]. + fn metadata(&self) -> Metadata; + + /// Return the provided genesis hash. + fn genesis_hash(&self) -> HashFor; + + /// Return the provided [`RuntimeVersion`]. + fn runtime_version(&self) -> RuntimeVersion; + + /// Return the hasher used on the chain. + fn hasher(&self) -> T::Hasher; + + /// Return the [pezkuwi_subxt_core::client::ClientState] (metadata, runtime version and genesis + /// hash). + fn client_state(&self) -> ClientState { + ClientState { + genesis_hash: self.genesis_hash(), + runtime_version: self.runtime_version(), + metadata: self.metadata(), + } + } + + /// Work with transactions. + fn tx(&self) -> TxClient { + TxClient::new(self.clone()) + } + + /// Work with events. + fn events(&self) -> EventsClient { + EventsClient::new(self.clone()) + } + + /// Work with storage. + fn storage(&self) -> StorageClient { + StorageClient::new(self.clone()) + } + + /// Access constants. + fn constants(&self) -> ConstantsClient { + ConstantsClient::new(self.clone()) + } + + /// Work with blocks. + fn blocks(&self) -> BlocksClient { + BlocksClient::new(self.clone()) + } + + /// Work with runtime APIs. + fn runtime_api(&self) -> RuntimeApiClient { + RuntimeApiClient::new(self.clone()) + } + + /// Work with View Functions. + fn view_functions(&self) -> ViewFunctionsClient { + ViewFunctionsClient::new(self.clone()) + } + + /// Work this custom types. + fn custom_values(&self) -> CustomValuesClient { + CustomValuesClient::new(self.clone()) + } +} + +/// A client that is capable of performing offline-only operations. +/// Can be constructed as long as you can populate the required fields. +#[derive_where(Debug, Clone)] +pub struct OfflineClient { + inner: Arc>, + hasher: T::Hasher, +} + +impl OfflineClient { + /// Construct a new [`OfflineClient`], providing + /// the necessary runtime and compile-time arguments. + pub fn new( + genesis_hash: HashFor, + runtime_version: RuntimeVersion, + metadata: impl Into, + ) -> OfflineClient { + let metadata = metadata.into(); + let hasher = ::new(&metadata); + + OfflineClient { + hasher, + inner: Arc::new(ClientState { genesis_hash, runtime_version, metadata }), + } + } + + /// Return the genesis hash. + pub fn genesis_hash(&self) -> HashFor { + self.inner.genesis_hash + } + + /// Return the runtime version. + pub fn runtime_version(&self) -> RuntimeVersion { + self.inner.runtime_version + } + + /// Return the [`Metadata`] used in this client. + pub fn metadata(&self) -> Metadata { + self.inner.metadata.clone() + } + + /// Return the hasher used for the chain. + pub fn hasher(&self) -> T::Hasher { + self.hasher + } + + // Just a copy of the most important trait methods so that people + // don't need to import the trait for most things: + + /// Work with transactions. + pub fn tx(&self) -> TxClient { + >::tx(self) + } + + /// Work with events. + pub fn events(&self) -> EventsClient { + >::events(self) + } + + /// Work with storage. + pub fn storage(&self) -> StorageClient { + >::storage(self) + } + + /// Access constants. + pub fn constants(&self) -> ConstantsClient { + >::constants(self) + } + + /// Work with blocks. + pub fn blocks(&self) -> BlocksClient { + >::blocks(self) + } + + /// Work with runtime APIs. + pub fn runtime_api(&self) -> RuntimeApiClient { + >::runtime_api(self) + } + + /// Work with View Functions. + pub fn view_functions(&self) -> ViewFunctionsClient { + >::view_functions(self) + } + + /// Access custom types + pub fn custom_values(&self) -> CustomValuesClient { + >::custom_values(self) + } +} + +impl OfflineClientT for OfflineClient { + fn genesis_hash(&self) -> HashFor { + self.genesis_hash() + } + fn runtime_version(&self) -> RuntimeVersion { + self.runtime_version() + } + fn metadata(&self) -> Metadata { + self.metadata() + } + fn hasher(&self) -> T::Hasher { + self.hasher() + } +} + +// For ergonomics; cloning a client is deliberately fairly cheap (via Arc), +// so this allows users to pass references to a client rather than explicitly +// cloning. This is partly for consistency with OnlineClient, which can be +// easily converted into an OfflineClient for ergonomics. +impl<'a, T: Config> From<&'a OfflineClient> for OfflineClient { + fn from(c: &'a OfflineClient) -> Self { + c.clone() + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/client/online_client.rs b/vendor/pezkuwi-subxt/subxt/src/client/online_client.rs new file mode 100644 index 00000000..8b5308f4 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/client/online_client.rs @@ -0,0 +1,556 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::{OfflineClient, OfflineClientT}; +use crate::{ + Metadata, + backend::{Backend, BackendExt, StreamOfResults, legacy::LegacyBackend, rpc::RpcClient}, + blocks::{BlockRef, BlocksClient}, + config::{Config, HashFor}, + constants::ConstantsClient, + custom_values::CustomValuesClient, + error::{BackendError, OnlineClientError, RuntimeUpdateeApplyError, RuntimeUpdaterError}, + events::EventsClient, + runtime_api::RuntimeApiClient, + storage::StorageClient, + tx::TxClient, + view_functions::ViewFunctionsClient, +}; +use derive_where::derive_where; +use futures::{TryFutureExt, future}; +use pezkuwi_subxt_core::client::{ClientState, RuntimeVersion}; +use std::sync::{Arc, RwLock}; + +/// A trait representing a client that can perform +/// online actions. +pub trait OnlineClientT: OfflineClientT { + /// Return a backend that can be used to communicate with a node. + fn backend(&self) -> &dyn Backend; +} + +/// A client that can be used to perform API calls (that is, either those +/// requiring an [`OfflineClientT`] or those requiring an [`OnlineClientT`]). +#[derive_where(Clone)] +pub struct OnlineClient { + inner: Arc>>, + backend: Arc>, +} + +#[derive_where(Debug)] +struct Inner { + genesis_hash: HashFor, + runtime_version: RuntimeVersion, + metadata: Metadata, + hasher: T::Hasher, +} + +impl std::fmt::Debug for OnlineClient { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Client") + .field("rpc", &"RpcClient") + .field("inner", &self.inner) + .finish() + } +} + +// The default constructors assume Jsonrpsee. +#[cfg(feature = "jsonrpsee")] +#[cfg_attr(docsrs, doc(cfg(feature = "jsonrpsee")))] +impl OnlineClient { + /// Construct a new [`OnlineClient`] using default settings which + /// point to a locally running node on `ws://127.0.0.1:9944`. + pub async fn new() -> Result, OnlineClientError> { + let url = "ws://127.0.0.1:9944"; + OnlineClient::from_url(url).await + } + + /// Construct a new [`OnlineClient`], providing a URL to connect to. + pub async fn from_url(url: impl AsRef) -> Result, OnlineClientError> { + pezkuwi_subxt_rpcs::utils::validate_url_is_secure(url.as_ref())?; + OnlineClient::from_insecure_url(url).await + } + + /// Construct a new [`OnlineClient`], providing a URL to connect to. + /// + /// Allows insecure URLs without SSL encryption, e.g. (http:// and ws:// URLs). + pub async fn from_insecure_url( + url: impl AsRef, + ) -> Result, OnlineClientError> { + let client = RpcClient::from_insecure_url(url).await?; + let backend = LegacyBackend::builder().build(client); + OnlineClient::from_backend(Arc::new(backend)).await + } +} + +impl OnlineClient { + /// Construct a new [`OnlineClient`] by providing an [`RpcClient`] to drive the connection. + /// This will use the current default [`Backend`], which may change in future releases. + pub async fn from_rpc_client( + rpc_client: impl Into, + ) -> Result, OnlineClientError> { + let rpc_client = rpc_client.into(); + let backend = Arc::new(LegacyBackend::builder().build(rpc_client)); + OnlineClient::from_backend(backend).await + } + + /// Construct a new [`OnlineClient`] by providing an RPC client along with the other + /// necessary details. This will use the current default [`Backend`], which may change + /// in future releases. + /// + /// # Warning + /// + /// This is considered the most primitive and also error prone way to + /// instantiate a client; the genesis hash, metadata and runtime version provided will + /// entirely determine which node and blocks this client will be able to interact with, + /// and whether it will be able to successfully do things like submit transactions. + /// + /// If you're unsure what you're doing, prefer one of the alternate methods to instantiate + /// a client. + pub fn from_rpc_client_with( + genesis_hash: HashFor, + runtime_version: RuntimeVersion, + metadata: impl Into, + rpc_client: impl Into, + ) -> Result, OnlineClientError> { + let rpc_client = rpc_client.into(); + let backend = Arc::new(LegacyBackend::builder().build(rpc_client)); + OnlineClient::from_backend_with(genesis_hash, runtime_version, metadata, backend) + } + + /// Construct a new [`OnlineClient`] by providing an underlying [`Backend`] + /// implementation to power it. Other details will be obtained from the chain. + pub async fn from_backend>( + backend: Arc, + ) -> Result, OnlineClientError> { + let latest_block = backend + .latest_finalized_block_ref() + .await + .map_err(OnlineClientError::CannotGetLatestFinalizedBlock)?; + + let (genesis_hash, runtime_version, metadata) = future::join3( + backend.genesis_hash().map_err(OnlineClientError::CannotGetGenesisHash), + backend + .current_runtime_version() + .map_err(OnlineClientError::CannotGetCurrentRuntimeVersion), + OnlineClient::fetch_metadata(&*backend, latest_block.hash()) + .map_err(OnlineClientError::CannotFetchMetadata), + ) + .await; + + OnlineClient::from_backend_with(genesis_hash?, runtime_version?, metadata?, backend) + } + + /// Construct a new [`OnlineClient`] by providing all of the underlying details needed + /// to make it work. + /// + /// # Warning + /// + /// This is considered the most primitive and also error prone way to + /// instantiate a client; the genesis hash, metadata and runtime version provided will + /// entirely determine which node and blocks this client will be able to interact with, + /// and whether it will be able to successfully do things like submit transactions. + /// + /// If you're unsure what you're doing, prefer one of the alternate methods to instantiate + /// a client. + pub fn from_backend_with>( + genesis_hash: HashFor, + runtime_version: RuntimeVersion, + metadata: impl Into, + backend: Arc, + ) -> Result, OnlineClientError> { + use pezkuwi_subxt_core::config::Hasher; + + let metadata = metadata.into(); + let hasher = T::Hasher::new(&metadata); + + Ok(OnlineClient { + inner: Arc::new(RwLock::new(Inner { genesis_hash, runtime_version, metadata, hasher })), + backend, + }) + } + + /// Fetch the metadata from bizinikiwi using the runtime API. + async fn fetch_metadata( + backend: &dyn Backend, + block_hash: HashFor, + ) -> Result { + #[cfg(feature = "unstable-metadata")] + { + /// The unstable metadata version number. + const UNSTABLE_METADATA_VERSION: u32 = u32::MAX; + + // Try to fetch the latest unstable metadata, if that fails fall back to + // fetching the latest stable metadata. + match backend.metadata_at_version(UNSTABLE_METADATA_VERSION, block_hash).await { + Ok(bytes) => Ok(bytes), + Err(_) => OnlineClient::fetch_latest_stable_metadata(backend, block_hash).await, + } + } + + #[cfg(not(feature = "unstable-metadata"))] + OnlineClient::fetch_latest_stable_metadata(backend, block_hash).await + } + + /// Fetch the latest stable metadata from the node. + async fn fetch_latest_stable_metadata( + backend: &dyn Backend, + block_hash: HashFor, + ) -> Result { + // The metadata versions we support in Subxt, from newest to oldest. + use pezkuwi_subxt_metadata::SUPPORTED_METADATA_VERSIONS; + + // Try to fetch each version that we support in order from newest to oldest. + for version in SUPPORTED_METADATA_VERSIONS { + if let Ok(bytes) = backend.metadata_at_version(version, block_hash).await { + return Ok(bytes); + } + } + + // If that fails, fetch the metadata V14 using the old API. + backend.legacy_metadata(block_hash).await + } + + /// Create an object which can be used to keep the runtime up to date + /// in a separate thread. + /// + /// # Example + /// + /// ```rust,no_run,standalone_crate + /// # #[tokio::main] + /// # async fn main() { + /// use pezkuwi_subxt::{ OnlineClient, PezkuwiConfig }; + /// + /// let client = OnlineClient::::new().await.unwrap(); + /// + /// // high level API. + /// + /// let update_task = client.updater(); + /// tokio::spawn(async move { + /// update_task.perform_runtime_updates().await; + /// }); + /// + /// + /// // low level API. + /// + /// let updater = client.updater(); + /// tokio::spawn(async move { + /// let mut update_stream = updater.runtime_updates().await.unwrap(); + /// + /// while let Ok(update) = update_stream.next().await { + /// let version = update.runtime_version().spec_version; + /// + /// match updater.apply_update(update) { + /// Ok(()) => { + /// println!("Upgrade to version: {} successful", version) + /// } + /// Err(e) => { + /// println!("Upgrade to version {} failed {:?}", version, e); + /// } + /// }; + /// } + /// }); + /// # } + /// ``` + pub fn updater(&self) -> ClientRuntimeUpdater { + ClientRuntimeUpdater(self.clone()) + } + + /// Return the hasher configured for hashing blocks and extrinsics. + pub fn hasher(&self) -> T::Hasher { + self.inner.read().expect("shouldn't be poisoned").hasher + } + + /// Return the [`Metadata`] used in this client. + pub fn metadata(&self) -> Metadata { + let inner = self.inner.read().expect("shouldn't be poisoned"); + inner.metadata.clone() + } + + /// Change the [`Metadata`] used in this client. + /// + /// # Warning + /// + /// Setting custom metadata may leave Subxt unable to work with certain blocks, + /// subscribe to latest blocks or submit valid transactions. + pub fn set_metadata(&self, metadata: impl Into) { + let mut inner = self.inner.write().expect("shouldn't be poisoned"); + inner.metadata = metadata.into(); + } + + /// Return the genesis hash. + pub fn genesis_hash(&self) -> HashFor { + let inner = self.inner.read().expect("shouldn't be poisoned"); + inner.genesis_hash + } + + /// Change the genesis hash used in this client. + /// + /// # Warning + /// + /// Setting a custom genesis hash may leave Subxt unable to + /// submit valid transactions. + pub fn set_genesis_hash(&self, genesis_hash: HashFor) { + let mut inner = self.inner.write().expect("shouldn't be poisoned"); + inner.genesis_hash = genesis_hash; + } + + /// Return the runtime version. + pub fn runtime_version(&self) -> RuntimeVersion { + let inner = self.inner.read().expect("shouldn't be poisoned"); + inner.runtime_version + } + + /// Change the [`RuntimeVersion`] used in this client. + /// + /// # Warning + /// + /// Setting a custom runtime version may leave Subxt unable to + /// submit valid transactions. + pub fn set_runtime_version(&self, runtime_version: RuntimeVersion) { + let mut inner = self.inner.write().expect("shouldn't be poisoned"); + inner.runtime_version = runtime_version; + } + + /// Return an RPC client to make raw requests with. + pub fn backend(&self) -> &dyn Backend { + &*self.backend + } + + /// Return an offline client with the same configuration as this. + pub fn offline(&self) -> OfflineClient { + let inner = self.inner.read().expect("shouldn't be poisoned"); + OfflineClient::new(inner.genesis_hash, inner.runtime_version, inner.metadata.clone()) + } + + // Just a copy of the most important trait methods so that people + // don't need to import the trait for most things: + + /// Work with transactions. + pub fn tx(&self) -> TxClient { + >::tx(self) + } + + /// Work with events. + pub fn events(&self) -> EventsClient { + >::events(self) + } + + /// Work with storage. + pub fn storage(&self) -> StorageClient { + >::storage(self) + } + + /// Access constants. + pub fn constants(&self) -> ConstantsClient { + >::constants(self) + } + + /// Work with blocks. + pub fn blocks(&self) -> BlocksClient { + >::blocks(self) + } + + /// Work with runtime API. + pub fn runtime_api(&self) -> RuntimeApiClient { + >::runtime_api(self) + } + + /// Work with View Functions. + pub fn view_functions(&self) -> ViewFunctionsClient { + >::view_functions(self) + } + + /// Access custom types. + pub fn custom_values(&self) -> CustomValuesClient { + >::custom_values(self) + } +} + +impl OfflineClientT for OnlineClient { + fn metadata(&self) -> Metadata { + self.metadata() + } + fn genesis_hash(&self) -> HashFor { + self.genesis_hash() + } + fn runtime_version(&self) -> RuntimeVersion { + self.runtime_version() + } + fn hasher(&self) -> T::Hasher { + self.hasher() + } + // This is provided by default, but we can optimise here and only lock once: + fn client_state(&self) -> ClientState { + let inner = self.inner.read().expect("shouldn't be poisoned"); + ClientState { + genesis_hash: inner.genesis_hash, + runtime_version: inner.runtime_version, + metadata: inner.metadata.clone(), + } + } +} + +impl OnlineClientT for OnlineClient { + fn backend(&self) -> &dyn Backend { + &*self.backend + } +} + +/// Client wrapper for performing runtime updates. See [`OnlineClient::updater()`] +/// for example usage. +pub struct ClientRuntimeUpdater(OnlineClient); + +impl ClientRuntimeUpdater { + fn is_runtime_version_different(&self, new: &RuntimeVersion) -> bool { + let curr = self.0.inner.read().expect("shouldn't be poisoned"); + &curr.runtime_version != new + } + + fn do_update(&self, update: Update) { + let mut writable = self.0.inner.write().expect("shouldn't be poisoned"); + writable.metadata = update.metadata; + writable.runtime_version = update.runtime_version; + } + + /// Tries to apply a new update. + pub fn apply_update(&self, update: Update) -> Result<(), RuntimeUpdateeApplyError> { + if !self.is_runtime_version_different(&update.runtime_version) { + return Err(RuntimeUpdateeApplyError::SameVersion); + } + + self.do_update(update); + + Ok(()) + } + + /// Performs runtime updates indefinitely unless encountering an error. + /// + /// *Note:* This will run indefinitely until it errors, so the typical usage + /// would be to run it in a separate background task. + pub async fn perform_runtime_updates(&self) -> Result<(), RuntimeUpdaterError> { + // Obtain an update subscription to further detect changes in the runtime version of the + // node. + let mut runtime_version_stream = self.runtime_updates().await?; + + loop { + let update = runtime_version_stream.next().await?; + + // This only fails if received the runtime version is the same the current runtime + // version which might occur because that runtime subscriptions in bizinikiwi sends + // out the initial value when they created and not only when runtime upgrades occurs. + // Thus, fine to ignore here as it strictly speaking isn't really an error + let _ = self.apply_update(update); + } + } + + /// Low-level API to get runtime updates as a stream but it's doesn't check if the + /// runtime version is newer or updates the runtime. + /// + /// Instead that's up to the user of this API to decide when to update and + /// to perform the actual updating. + pub async fn runtime_updates(&self) -> Result, RuntimeUpdaterError> { + let stream = self + .0 + .backend() + .stream_runtime_version() + .await + .map_err(RuntimeUpdaterError::CannotStreamRuntimeVersion)?; + + Ok(RuntimeUpdaterStream { stream, client: self.0.clone() }) + } +} + +/// Stream to perform runtime upgrades. +pub struct RuntimeUpdaterStream { + stream: StreamOfResults, + client: OnlineClient, +} + +impl RuntimeUpdaterStream { + /// Wait for the next runtime update. + pub async fn next(&mut self) -> Result { + let runtime_version = self + .stream + .next() + .await + .ok_or(RuntimeUpdaterError::UnexpectedEndOfUpdateStream)? + .map_err(RuntimeUpdaterError::CannotGetNextRuntimeVersion)?; + + let at = wait_runtime_upgrade_in_finalized_block(&self.client, &runtime_version).await?; + + let metadata = OnlineClient::fetch_metadata(self.client.backend(), at.hash()) + .await + .map_err(RuntimeUpdaterError::CannotFetchNewMetadata)?; + + Ok(Update { metadata, runtime_version }) + } +} + +/// Represents the state when a runtime upgrade occurred. +pub struct Update { + runtime_version: RuntimeVersion, + metadata: Metadata, +} + +impl Update { + /// Get the runtime version. + pub fn runtime_version(&self) -> &RuntimeVersion { + &self.runtime_version + } + + /// Get the metadata. + pub fn metadata(&self) -> &Metadata { + &self.metadata + } +} + +/// Helper to wait until the runtime upgrade is applied on at finalized block. +async fn wait_runtime_upgrade_in_finalized_block( + client: &OnlineClient, + runtime_version: &RuntimeVersion, +) -> Result>, RuntimeUpdaterError> { + let hasher = client.inner.read().expect("Lock shouldn't be poisoned").hasher; + + let mut block_sub = client + .backend() + .stream_finalized_block_headers(hasher) + .await + .map_err(RuntimeUpdaterError::CannotStreamFinalizedBlocks)?; + + let block_ref = loop { + let (_, block_ref) = block_sub + .next() + .await + .ok_or(RuntimeUpdaterError::UnexpectedEndOfBlockStream)? + .map_err(RuntimeUpdaterError::CannotGetNextFinalizedBlock)?; + + let addr = + crate::dynamic::storage::<(), scale_value::Value>("System", "LastRuntimeUpgrade"); + + let client_at = client.storage().at(block_ref.hash()); + let value = client_at + .entry(addr) + // The storage `system::lastRuntimeUpgrade` should always exist. + // + .map_err(|_| RuntimeUpdaterError::CantFindSystemLastRuntimeUpgrade)? + .fetch(()) + .await + .map_err(RuntimeUpdaterError::CantFetchLastRuntimeUpgrade)? + .decode_as::() + .map_err(RuntimeUpdaterError::CannotDecodeLastRuntimeUpgrade)?; + + #[derive(scale_decode::DecodeAsType)] + struct LastRuntimeUpgrade { + spec_version: u32, + } + + // We are waiting for the chain to have the same spec version + // as sent out via the runtime subscription. + if value.spec_version == runtime_version.spec_version { + break block_ref; + } + }; + + Ok(block_ref) +} diff --git a/vendor/pezkuwi-subxt/subxt/src/constants/constants_client.rs b/vendor/pezkuwi-subxt/subxt/src/constants/constants_client.rs new file mode 100644 index 00000000..8c050918 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/constants/constants_client.rs @@ -0,0 +1,46 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::{Config, client::OfflineClientT, error::ConstantError}; +use derive_where::derive_where; +use pezkuwi_subxt_core::constants::address::Address; + +/// A client for accessing constants. +#[derive_where(Clone; Client)] +pub struct ConstantsClient { + client: Client, + _marker: std::marker::PhantomData, +} + +impl ConstantsClient { + /// Create a new [`ConstantsClient`]. + pub fn new(client: Client) -> Self { + Self { client, _marker: std::marker::PhantomData } + } +} + +impl> ConstantsClient { + /// Run the validation logic against some constant address you'd like to access. Returns + /// `Ok(())` if the address is valid (or if it's not possible to check since the address has no + /// validation hash). Return an error if the address was not valid or something went wrong + /// trying to validate it (ie the pallet or constant in question do not exist at all). + pub fn validate(&self, address: Addr) -> Result<(), ConstantError> { + let metadata = self.client.metadata(); + pezkuwi_subxt_core::constants::validate(address, &metadata) + } + + /// Access the constant at the address given, returning the type defined by this address. + /// This is probably used with addresses given from static codegen, although you can manually + /// construct your own, too. + pub fn at(&self, address: Addr) -> Result { + let metadata = self.client.metadata(); + pezkuwi_subxt_core::constants::get(address, &metadata) + } + + /// Access the bytes of a constant by the address it is registered under. + pub fn bytes_at(&self, address: Addr) -> Result, ConstantError> { + let metadata = self.client.metadata(); + pezkuwi_subxt_core::constants::get_bytes(address, &metadata) + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/constants/mod.rs b/vendor/pezkuwi-subxt/subxt/src/constants/mod.rs new file mode 100644 index 00000000..9034c50a --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/constants/mod.rs @@ -0,0 +1,10 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Types associated with accessing constants. + +mod constants_client; + +pub use constants_client::ConstantsClient; +pub use pezkuwi_subxt_core::constants::address::{Address, DynamicAddress, StaticAddress, dynamic}; diff --git a/vendor/pezkuwi-subxt/subxt/src/custom_values/custom_values_client.rs b/vendor/pezkuwi-subxt/subxt/src/custom_values/custom_values_client.rs new file mode 100644 index 00000000..d28f57cf --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/custom_values/custom_values_client.rs @@ -0,0 +1,118 @@ +use crate::{Config, client::OfflineClientT, error::CustomValueError}; +use derive_where::derive_where; + +use pezkuwi_subxt_core::custom_values::address::{Address, Maybe}; + +/// A client for accessing custom values stored in the metadata. +#[derive_where(Clone; Client)] +pub struct CustomValuesClient { + client: Client, + _marker: std::marker::PhantomData, +} + +impl CustomValuesClient { + /// Create a new [`CustomValuesClient`]. + pub fn new(client: Client) -> Self { + Self { client, _marker: std::marker::PhantomData } + } +} + +impl> CustomValuesClient { + /// Access a custom value by the address it is registered under. This can be just a [str] to get + /// back a dynamic value, or a static address from the generated static interface to get a + /// value of a static type returned. + pub fn at>( + &self, + address: Addr, + ) -> Result { + pezkuwi_subxt_core::custom_values::get(address, &self.client.metadata()) + } + + /// Access the bytes of a custom value by the address it is registered under. + pub fn bytes_at(&self, address: Addr) -> Result, CustomValueError> { + pezkuwi_subxt_core::custom_values::get_bytes(address, &self.client.metadata()) + } + + /// Run the validation logic against some custom value address you'd like to access. Returns + /// `Ok(())` if the address is valid (or if it's not possible to check since the address has no + /// validation hash). Returns an error if the address was not valid (wrong name, type or raw + /// bytes) + pub fn validate(&self, address: Addr) -> Result<(), CustomValueError> { + pezkuwi_subxt_core::custom_values::validate(address, &self.client.metadata()) + } +} + +#[cfg(test)] +mod tests { + use crate::{ + Metadata, OfflineClient, BizinikiwConfig, + custom_values::{self, CustomValuesClient}, + }; + use codec::Encode; + use pezkuwi_subxt_core::client::RuntimeVersion; + use scale_decode::DecodeAsType; + use scale_info::{TypeInfo, form::PortableForm}; + use std::collections::BTreeMap; + + #[derive(Debug, Clone, PartialEq, Eq, Encode, TypeInfo, DecodeAsType)] + pub struct Person { + age: u16, + name: String, + } + + fn mock_metadata() -> Metadata { + let person_ty = scale_info::MetaType::new::(); + let unit = scale_info::MetaType::new::<()>(); + let mut types = scale_info::Registry::new(); + let person_ty_id = types.register_type(&person_ty); + let unit_id = types.register_type(&unit); + let types: scale_info::PortableRegistry = types.into(); + + let person = Person { age: 42, name: "Neo".into() }; + + let person_value_metadata: frame_metadata::v15::CustomValueMetadata = + frame_metadata::v15::CustomValueMetadata { ty: person_ty_id, value: person.encode() }; + + let frame_metadata = frame_metadata::v15::RuntimeMetadataV15 { + types, + pallets: vec![], + extrinsic: frame_metadata::v15::ExtrinsicMetadata { + version: 0, + address_ty: unit_id, + call_ty: unit_id, + signature_ty: unit_id, + extra_ty: unit_id, + signed_extensions: vec![], + }, + ty: unit_id, + apis: vec![], + outer_enums: frame_metadata::v15::OuterEnums { + call_enum_ty: unit_id, + event_enum_ty: unit_id, + error_enum_ty: unit_id, + }, + custom: frame_metadata::v15::CustomMetadata { + map: BTreeMap::from_iter([("Person".to_string(), person_value_metadata)]), + }, + }; + + let metadata: pezkuwi_subxt_metadata::Metadata = frame_metadata.try_into().unwrap(); + metadata + } + + #[test] + fn test_decoding() { + let client = OfflineClient::::new( + Default::default(), + RuntimeVersion { spec_version: 0, transaction_version: 0 }, + mock_metadata(), + ); + + let custom_value_client = CustomValuesClient::new(client); + assert!(custom_value_client.at("No one").is_err()); + + let person_addr = custom_values::dynamic::("Person"); + let person = custom_value_client.at(&person_addr).unwrap(); + assert_eq!(person, Person { age: 42, name: "Neo".into() }) + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/custom_values/mod.rs b/vendor/pezkuwi-subxt/subxt/src/custom_values/mod.rs new file mode 100644 index 00000000..ff0482b8 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/custom_values/mod.rs @@ -0,0 +1,12 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Types associated with accessing custom types + +mod custom_values_client; + +pub use custom_values_client::CustomValuesClient; +pub use pezkuwi_subxt_core::custom_values::address::{ + Address, DynamicAddress, StaticAddress, dynamic, +}; diff --git a/vendor/pezkuwi-subxt/subxt/src/error/dispatch_error.rs b/vendor/pezkuwi-subxt/subxt/src/error/dispatch_error.rs new file mode 100644 index 00000000..2d23cccf --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/error/dispatch_error.rs @@ -0,0 +1,357 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! A representation of the dispatch error; an error returned when +//! something fails in trying to submit/execute a transaction. + +use super::{DispatchErrorDecodeError, ModuleErrorDecodeError, ModuleErrorDetailsError}; +use crate::metadata::Metadata; +use core::fmt::Debug; +use scale_decode::{DecodeAsType, TypeResolver, visitor::DecodeAsTypeResult}; +use std::{borrow::Cow, marker::PhantomData}; + +/// An error dispatching a transaction. +#[derive(Debug, thiserror::Error, PartialEq, Eq)] +#[allow(clippy::large_enum_variant)] +#[non_exhaustive] +pub enum DispatchError { + /// Some error occurred. + #[error("Some unknown error occurred.")] + Other, + /// Failed to lookup some data. + #[error("Failed to lookup some data.")] + CannotLookup, + /// A bad origin. + #[error("Bad origin.")] + BadOrigin, + /// A custom error in a module. + #[error("Pallet error: {0}")] + Module(ModuleError), + /// At least one consumer is remaining so the account cannot be destroyed. + #[error("At least one consumer is remaining so the account cannot be destroyed.")] + ConsumerRemaining, + /// There are no providers so the account cannot be created. + #[error("There are no providers so the account cannot be created.")] + NoProviders, + /// There are too many consumers so the account cannot be created. + #[error("There are too many consumers so the account cannot be created.")] + TooManyConsumers, + /// An error to do with tokens. + #[error("Token error: {0}")] + Token(TokenError), + /// An arithmetic error. + #[error("Arithmetic error: {0}")] + Arithmetic(ArithmeticError), + /// The number of transactional layers has been reached, or we are not in a transactional layer. + #[error("Transactional error: {0}")] + Transactional(TransactionalError), + /// Resources exhausted, e.g. attempt to read/write data which is too large to manipulate. + #[error( + "Resources exhausted, e.g. attempt to read/write data which is too large to manipulate." + )] + Exhausted, + /// The state is corrupt; this is generally not going to fix itself. + #[error("The state is corrupt; this is generally not going to fix itself.")] + Corruption, + /// Some resource (e.g. a preimage) is unavailable right now. This might fix itself later. + #[error( + "Some resource (e.g. a preimage) is unavailable right now. This might fix itself later." + )] + Unavailable, + /// Root origin is not allowed. + #[error("Root origin is not allowed.")] + RootNotAllowed, +} + +/// An error relating to tokens when dispatching a transaction. +#[derive(scale_decode::DecodeAsType, Debug, thiserror::Error, PartialEq, Eq)] +#[non_exhaustive] +pub enum TokenError { + /// Funds are unavailable. + #[error("Funds are unavailable.")] + FundsUnavailable, + /// Some part of the balance gives the only provider reference to the account and thus cannot be + /// (re)moved. + #[error( + "Some part of the balance gives the only provider reference to the account and thus cannot be (re)moved." + )] + OnlyProvider, + /// Account cannot exist with the funds that would be given. + #[error("Account cannot exist with the funds that would be given.")] + BelowMinimum, + /// Account cannot be created. + #[error("Account cannot be created.")] + CannotCreate, + /// The asset in question is unknown. + #[error("The asset in question is unknown.")] + UnknownAsset, + /// Funds exist but are frozen. + #[error("Funds exist but are frozen.")] + Frozen, + /// Operation is not supported by the asset. + #[error("Operation is not supported by the asset.")] + Unsupported, + /// Account cannot be created for a held balance. + #[error("Account cannot be created for a held balance.")] + CannotCreateHold, + /// Withdrawal would cause unwanted loss of account. + #[error("Withdrawal would cause unwanted loss of account.")] + NotExpendable, + /// Account cannot receive the assets. + #[error("Account cannot receive the assets.")] + Blocked, +} + +/// An error relating to arithmetic when dispatching a transaction. +#[derive(scale_decode::DecodeAsType, Debug, thiserror::Error, PartialEq, Eq)] +#[non_exhaustive] +pub enum ArithmeticError { + /// Underflow. + #[error("Underflow.")] + Underflow, + /// Overflow. + #[error("Overflow.")] + Overflow, + /// Division by zero. + #[error("Division by zero.")] + DivisionByZero, +} + +/// An error relating to the transactional layers when dispatching a transaction. +#[derive(scale_decode::DecodeAsType, Debug, thiserror::Error, PartialEq, Eq)] +#[non_exhaustive] +pub enum TransactionalError { + /// Too many transactional layers have been spawned. + #[error("Too many transactional layers have been spawned.")] + LimitReached, + /// A transactional layer was expected, but does not exist. + #[error("A transactional layer was expected, but does not exist.")] + NoLayer, +} + +/// Details about a module error that has occurred. +#[derive(Clone, thiserror::Error)] +#[non_exhaustive] +pub struct ModuleError { + metadata: Metadata, + /// Bytes representation: + /// - `bytes[0]`: pallet index + /// - `bytes[1]`: error index + /// - `bytes[2..]`: 3 bytes specific for the module error + bytes: [u8; 5], +} + +impl PartialEq for ModuleError { + fn eq(&self, other: &Self) -> bool { + // A module error is the same if the raw underlying details are the same. + self.bytes == other.bytes + } +} + +impl Eq for ModuleError {} + +/// Custom `Debug` implementation, ignores the very large `metadata` field, using it instead (as +/// intended) to resolve the actual pallet and error names. This is much more useful for debugging. +impl Debug for ModuleError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let details = self.details_string(); + write!(f, "ModuleError(<{details}>)") + } +} + +impl std::fmt::Display for ModuleError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let details = self.details_string(); + write!(f, "{details}") + } +} + +impl ModuleError { + /// Return more details about this error. + pub fn details(&self) -> Result, ModuleErrorDetailsError> { + let pallet = self + .metadata + .pallet_by_error_index(self.pallet_index()) + .ok_or(ModuleErrorDetailsError::PalletNotFound { pallet_index: self.pallet_index() })?; + + let variant = pallet.error_variant_by_index(self.error_index()).ok_or_else(|| { + ModuleErrorDetailsError::ErrorVariantNotFound { + pallet_name: pallet.name().into(), + error_index: self.error_index(), + } + })?; + + Ok(ModuleErrorDetails { pallet, variant }) + } + + /// Return a formatted string of the resolved error details for debugging/display purposes. + pub fn details_string(&self) -> String { + match self.details() { + Ok(details) => format!( + "{pallet_name}::{variant_name}", + pallet_name = details.pallet.name(), + variant_name = details.variant.name, + ), + Err(_) => format!( + "Unknown pallet error '{bytes:?}' (pallet and error details cannot be retrieved)", + bytes = self.bytes + ), + } + } + + /// Return the underlying module error data that was decoded. + pub fn bytes(&self) -> [u8; 5] { + self.bytes + } + + /// Obtain the pallet index from the underlying byte data. + pub fn pallet_index(&self) -> u8 { + self.bytes[0] + } + + /// Obtain the error index from the underlying byte data. + pub fn error_index(&self) -> u8 { + self.bytes[1] + } + + /// Attempts to decode the ModuleError into the top outer Error enum. + pub fn as_root_error(&self) -> Result { + let decoded = E::decode_as_type( + &mut &self.bytes[..], + self.metadata.outer_enums().error_enum_ty(), + self.metadata.types(), + ) + .map_err(ModuleErrorDecodeError)?; + + Ok(decoded) + } +} + +/// Details about the module error. +pub struct ModuleErrorDetails<'a> { + /// The pallet that the error is in + pub pallet: pezkuwi_subxt_metadata::PalletMetadata<'a>, + /// The variant representing the error + pub variant: &'a scale_info::Variant, +} + +impl DispatchError { + /// Attempt to decode a runtime [`DispatchError`]. + #[doc(hidden)] + pub fn decode_from<'a>( + bytes: impl Into>, + metadata: Metadata, + ) -> Result { + let bytes = bytes.into(); + let dispatch_error_ty_id = metadata + .dispatch_error_ty() + .ok_or(DispatchErrorDecodeError::DispatchErrorTypeIdNotFound)?; + + // The aim is to decode our bytes into roughly this shape. This is copied from + // `sp_runtime::DispatchError`; we need the variant names and any inner variant + // names/shapes to line up in order for decoding to be successful. + #[derive(scale_decode::DecodeAsType)] + enum DecodedDispatchError { + Other, + CannotLookup, + BadOrigin, + Module(DecodedModuleErrorBytes), + ConsumerRemaining, + NoProviders, + TooManyConsumers, + Token(TokenError), + Arithmetic(ArithmeticError), + Transactional(TransactionalError), + Exhausted, + Corruption, + Unavailable, + RootNotAllowed, + } + + // ModuleError is a bit special; we want to support being decoded from either + // a legacy format of 2 bytes, or a newer format of 5 bytes. So, just grab the bytes + // out when decoding to manually work with them. + struct DecodedModuleErrorBytes(Vec); + struct DecodedModuleErrorBytesVisitor(PhantomData); + impl scale_decode::Visitor for DecodedModuleErrorBytesVisitor { + type Error = scale_decode::Error; + type Value<'scale, 'info> = DecodedModuleErrorBytes; + type TypeResolver = R; + + fn unchecked_decode_as_type<'scale, 'info>( + self, + input: &mut &'scale [u8], + _type_id: R::TypeId, + _types: &'info R, + ) -> DecodeAsTypeResult, Self::Error>> { + DecodeAsTypeResult::Decoded(Ok(DecodedModuleErrorBytes(input.to_vec()))) + } + } + + impl scale_decode::IntoVisitor for DecodedModuleErrorBytes { + type AnyVisitor = DecodedModuleErrorBytesVisitor; + fn into_visitor() -> DecodedModuleErrorBytesVisitor { + DecodedModuleErrorBytesVisitor(PhantomData) + } + } + + // Decode into our temporary error: + let decoded_dispatch_err = DecodedDispatchError::decode_as_type( + &mut &*bytes, + dispatch_error_ty_id, + metadata.types(), + ) + .map_err(DispatchErrorDecodeError::CouldNotDecodeDispatchError)?; + + // Convert into the outward-facing error, mainly by handling the Module variant. + let dispatch_error = match decoded_dispatch_err { + // Mostly we don't change anything from our decoded to our outward-facing error: + DecodedDispatchError::Other => DispatchError::Other, + DecodedDispatchError::CannotLookup => DispatchError::CannotLookup, + DecodedDispatchError::BadOrigin => DispatchError::BadOrigin, + DecodedDispatchError::ConsumerRemaining => DispatchError::ConsumerRemaining, + DecodedDispatchError::NoProviders => DispatchError::NoProviders, + DecodedDispatchError::TooManyConsumers => DispatchError::TooManyConsumers, + DecodedDispatchError::Token(val) => DispatchError::Token(val), + DecodedDispatchError::Arithmetic(val) => DispatchError::Arithmetic(val), + DecodedDispatchError::Transactional(val) => DispatchError::Transactional(val), + DecodedDispatchError::Exhausted => DispatchError::Exhausted, + DecodedDispatchError::Corruption => DispatchError::Corruption, + DecodedDispatchError::Unavailable => DispatchError::Unavailable, + DecodedDispatchError::RootNotAllowed => DispatchError::RootNotAllowed, + // But we apply custom logic to transform the module error into the outward facing + // version: + DecodedDispatchError::Module(module_bytes) => { + let module_bytes = module_bytes.0; + + // The old version is 2 bytes; a pallet and error index. + // The new version is 5 bytes; a pallet and error index and then 3 extra bytes. + let bytes = if module_bytes.len() == 2 { + [module_bytes[0], module_bytes[1], 0, 0, 0] + } else if module_bytes.len() == 5 { + [ + module_bytes[0], + module_bytes[1], + module_bytes[2], + module_bytes[3], + module_bytes[4], + ] + } else { + tracing::warn!( + "Can't decode error sp_runtime::DispatchError: bytes do not match known shapes" + ); + // Return _all_ of the bytes; every "unknown" return should be consistent. + return Err(DispatchErrorDecodeError::CouldNotDecodeModuleError { + bytes: bytes.to_vec(), + }); + }; + + // And return our outward-facing version: + DispatchError::Module(ModuleError { metadata, bytes }) + }, + }; + + Ok(dispatch_error) + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/error/hex.rs b/vendor/pezkuwi-subxt/subxt/src/error/hex.rs new file mode 100644 index 00000000..e73b2ca0 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/error/hex.rs @@ -0,0 +1,15 @@ +/// Display hex strings. +#[derive(PartialEq, Eq, Clone, Debug, PartialOrd, Ord)] +pub struct Hex(String); + +impl> From for Hex { + fn from(value: T) -> Self { + Hex(hex::encode(value.as_ref())) + } +} + +impl std::fmt::Display for Hex { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/error/mod.rs b/vendor/pezkuwi-subxt/subxt/src/error/mod.rs new file mode 100644 index 00000000..a05db903 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/error/mod.rs @@ -0,0 +1,668 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Types representing the errors that can be returned. + +mod dispatch_error; +mod hex; + +crate::macros::cfg_unstable_light_client! { + pub use pezkuwi_subxt_lightclient::LightClientError; +} + +// Re-export dispatch error types: +pub use dispatch_error::{ + ArithmeticError, DispatchError, ModuleError, TokenError, TransactionalError, +}; + +// Re-expose the errors we use from other crates here: +pub use crate::Metadata; +pub use hex::Hex; +pub use pezkuwi_subxt_metadata::TryFromError as MetadataTryFromError; +pub use scale_decode::Error as DecodeError; +pub use scale_encode::Error as EncodeError; + +// Re-export core error types we're just reusing. +pub use pezkuwi_subxt_core::error::{ + ConstantError, + CustomValueError, + EventsError as CoreEventsError, + // These errors are exposed as-is: + ExtrinsicDecodeErrorAt, + // These errors are wrapped: + ExtrinsicError as CoreExtrinsicError, + RuntimeApiError as CoreRuntimeApiError, + StorageError as CoreStorageError, + StorageKeyError, + StorageValueError, + ViewFunctionError as CoreViewFunctionError, +}; + +/// A global error type. Any of the errors exposed here can convert into this +/// error via `.into()`, but this error isn't itself exposed from anything. +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum Error { + #[error(transparent)] + ExtrinsicDecodeErrorAt(#[from] ExtrinsicDecodeErrorAt), + #[error(transparent)] + ConstantError(#[from] ConstantError), + #[error(transparent)] + CustomValueError(#[from] CustomValueError), + #[error(transparent)] + StorageKeyError(#[from] StorageKeyError), + #[error(transparent)] + StorageValueError(#[from] StorageValueError), + #[error(transparent)] + BackendError(#[from] BackendError), + #[error(transparent)] + BlockError(#[from] BlockError), + #[error(transparent)] + AccountNonceError(#[from] AccountNonceError), + #[error(transparent)] + OnlineClientError(#[from] OnlineClientError), + #[error(transparent)] + RuntimeUpdaterError(#[from] RuntimeUpdaterError), + #[error(transparent)] + RuntimeUpdateeApplyError(#[from] RuntimeUpdateeApplyError), + #[error(transparent)] + RuntimeApiError(#[from] RuntimeApiError), + #[error(transparent)] + EventsError(#[from] EventsError), + #[error(transparent)] + ExtrinsicError(#[from] ExtrinsicError), + #[error(transparent)] + ViewFunctionError(#[from] ViewFunctionError), + #[error(transparent)] + TransactionProgressError(#[from] TransactionProgressError), + #[error(transparent)] + TransactionStatusError(#[from] TransactionStatusError), + #[error(transparent)] + TransactionEventsError(#[from] TransactionEventsError), + #[error(transparent)] + TransactionFinalizedSuccessError(#[from] TransactionFinalizedSuccessError), + #[error(transparent)] + ModuleErrorDetailsError(#[from] ModuleErrorDetailsError), + #[error(transparent)] + ModuleErrorDecodeError(#[from] ModuleErrorDecodeError), + #[error(transparent)] + DispatchErrorDecodeError(#[from] DispatchErrorDecodeError), + #[error(transparent)] + StorageError(#[from] StorageError), + // Dev note: Subxt doesn't directly return Raw* errors. These exist so that when + // users use common crates (like parity-scale-codec and subxt-rpcs), errors returned + // there can be handled automatically using ? when the expected error is pezkuwi_subxt::Error. + #[error("Other RPC client error: {0}")] + OtherRpcClientError(#[from] pezkuwi_subxt_rpcs::Error), + #[error("Other codec error: {0}")] + OtherCodecError(#[from] codec::Error), + #[cfg(feature = "unstable-light-client")] + #[error("Other light client error: {0}")] + OtherLightClientError(#[from] pezkuwi_subxt_lightclient::LightClientError), + #[cfg(feature = "unstable-light-client")] + #[error("Other light client RPC error: {0}")] + OtherLightClientRpcError(#[from] pezkuwi_subxt_lightclient::LightClientRpcError), + // Dev note: Nothing in subxt should ever emit this error. It can instead be used + // to easily map other errors into a pezkuwi_subxt::Error for convenience. Some From impls + // make this automatic for common "other" error types. + #[error("Other error: {0}")] + Other(Box), +} + +impl From for Error { + fn from(value: std::convert::Infallible) -> Self { + match value {} + } +} + +impl Error { + /// Create a generic error. This is a quick workaround when you are using + /// [`Error`] and have a non-Subxt error to return. + pub fn other(error: E) -> Error { + Error::Other(Box::new(error)) + } + + /// Create a generic error from a string. This is a quick workaround when you are using + /// [`Error`] and have a non-Subxt error to return. + pub fn other_str(error: impl Into) -> Error { + #[derive(thiserror::Error, Debug, Clone)] + #[error("{0}")] + struct StrError(String); + Error::Other(Box::new(StrError(error.into()))) + } + + /// Checks whether the error was caused by a RPC re-connection. + pub fn is_disconnected_will_reconnect(&self) -> bool { + matches!( + self.backend_error(), + Some(BackendError::Rpc(RpcError::ClientError( + pezkuwi_subxt_rpcs::Error::DisconnectedWillReconnect(_) + ))) + ) + } + + /// Checks whether the error was caused by a RPC request being rejected. + pub fn is_rpc_limit_reached(&self) -> bool { + matches!(self.backend_error(), Some(BackendError::Rpc(RpcError::LimitReached))) + } + + fn backend_error(&self) -> Option<&BackendError> { + match self { + Error::BlockError(e) => e.backend_error(), + Error::AccountNonceError(e) => e.backend_error(), + Error::OnlineClientError(e) => e.backend_error(), + Error::RuntimeUpdaterError(e) => e.backend_error(), + Error::RuntimeApiError(e) => e.backend_error(), + Error::EventsError(e) => e.backend_error(), + Error::ExtrinsicError(e) => e.backend_error(), + Error::ViewFunctionError(e) => e.backend_error(), + Error::TransactionProgressError(e) => e.backend_error(), + Error::TransactionEventsError(e) => e.backend_error(), + Error::TransactionFinalizedSuccessError(e) => e.backend_error(), + Error::StorageError(e) => e.backend_error(), + // Any errors that **don't** return a BackendError anywhere will return None: + _ => None, + } + } +} + +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum BackendError { + #[error("Backend error: RPC error: {0}")] + Rpc(#[from] RpcError), + #[error("Backend error: Could not find metadata version {0}")] + MetadataVersionNotFound(u32), + #[error("Backend error: Could not codec::Decode Runtime API response: {0}")] + CouldNotScaleDecodeRuntimeResponse(codec::Error), + #[error("Backend error: Could not codec::Decode metadata bytes into pezkuwi_subxt::Metadata: {0}")] + CouldNotDecodeMetadata(codec::Error), + // This is for errors in `Backend` implementations which aren't any of the "pre-defined" set + // above: + #[error("Custom backend error: {0}")] + Other(String), +} + +impl BackendError { + /// Checks whether the error was caused by a RPC re-connection. + pub fn is_disconnected_will_reconnect(&self) -> bool { + matches!( + self, + BackendError::Rpc(RpcError::ClientError( + pezkuwi_subxt_rpcs::Error::DisconnectedWillReconnect(_) + )) + ) + } + + /// Checks whether the error was caused by a RPC request being rejected. + pub fn is_rpc_limit_reached(&self) -> bool { + matches!(self, BackendError::Rpc(RpcError::LimitReached)) + } +} + +impl From for BackendError { + fn from(value: pezkuwi_subxt_rpcs::Error) -> Self { + BackendError::Rpc(RpcError::ClientError(value)) + } +} + +/// An RPC error. Since we are generic over the RPC client that is used, +/// the error is boxed and could be casted. +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum RpcError { + /// Error related to the RPC client. + #[error("RPC error: {0}")] + ClientError(#[from] pezkuwi_subxt_rpcs::Error), + /// This error signals that we got back a + /// [`pezkuwi_subxt_rpcs::methods::chain_head::MethodResponse::LimitReached`], which is not + /// technically an RPC error but is treated as an error in our own APIs. + #[error("RPC error: limit reached")] + LimitReached, + /// The RPC subscription was dropped. + #[error("RPC error: subscription dropped.")] + SubscriptionDropped, +} + +/// Block error +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum BlockError { + #[error( + "Could not find the block body with hash {block_hash} (perhaps it was on a non-finalized fork?)" + )] + BlockNotFound { block_hash: Hex }, + #[error("Could not download the block header with hash {block_hash}: {reason}")] + CouldNotGetBlockHeader { block_hash: Hex, reason: BackendError }, + #[error("Could not download the latest block header: {0}")] + CouldNotGetLatestBlock(BackendError), + #[error("Could not subscribe to all blocks: {0}")] + CouldNotSubscribeToAllBlocks(BackendError), + #[error("Could not subscribe to best blocks: {0}")] + CouldNotSubscribeToBestBlocks(BackendError), + #[error("Could not subscribe to finalized blocks: {0}")] + CouldNotSubscribeToFinalizedBlocks(BackendError), + #[error("Error getting account nonce at block {block_hash}")] + AccountNonceError { block_hash: Hex, account_id: Hex, reason: AccountNonceError }, +} + +impl BlockError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + BlockError::CouldNotGetBlockHeader { reason: e, .. } | + BlockError::CouldNotGetLatestBlock(e) | + BlockError::CouldNotSubscribeToAllBlocks(e) | + BlockError::CouldNotSubscribeToBestBlocks(e) | + BlockError::CouldNotSubscribeToFinalizedBlocks(e) => Some(e), + _ => None, + } + } +} + +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum AccountNonceError { + #[error("Could not retrieve account nonce: {0}")] + CouldNotRetrieve(#[from] BackendError), + #[error("Could not decode account nonce: {0}")] + CouldNotDecode(#[from] codec::Error), + #[error("Wrong number of account nonce bytes returned: {0} (expected 2, 4 or 8)")] + WrongNumberOfBytes(usize), +} + +impl AccountNonceError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + AccountNonceError::CouldNotRetrieve(e) => Some(e), + _ => None, + } + } +} + +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum OnlineClientError { + #[error("Cannot construct OnlineClient: {0}")] + RpcError(#[from] pezkuwi_subxt_rpcs::Error), + #[error( + "Cannot construct OnlineClient: Cannot fetch latest finalized block to obtain init details from: {0}" + )] + CannotGetLatestFinalizedBlock(BackendError), + #[error("Cannot construct OnlineClient: Cannot fetch genesis hash: {0}")] + CannotGetGenesisHash(BackendError), + #[error("Cannot construct OnlineClient: Cannot fetch current runtime version: {0}")] + CannotGetCurrentRuntimeVersion(BackendError), + #[error("Cannot construct OnlineClient: Cannot fetch metadata: {0}")] + CannotFetchMetadata(BackendError), +} + +impl OnlineClientError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + OnlineClientError::CannotGetLatestFinalizedBlock(e) | + OnlineClientError::CannotGetGenesisHash(e) | + OnlineClientError::CannotGetCurrentRuntimeVersion(e) | + OnlineClientError::CannotFetchMetadata(e) => Some(e), + _ => None, + } + } +} + +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum RuntimeUpdaterError { + #[error("Error subscribing to runtime updates: The update stream ended unexpectedly")] + UnexpectedEndOfUpdateStream, + #[error("Error subscribing to runtime updates: The finalized block stream ended unexpectedly")] + UnexpectedEndOfBlockStream, + #[error("Error subscribing to runtime updates: Can't stream runtime version: {0}")] + CannotStreamRuntimeVersion(BackendError), + #[error("Error subscribing to runtime updates: Can't get next runtime version in stream: {0}")] + CannotGetNextRuntimeVersion(BackendError), + #[error("Error subscribing to runtime updates: Cannot stream finalized blocks: {0}")] + CannotStreamFinalizedBlocks(BackendError), + #[error("Error subscribing to runtime updates: Cannot get next finalized block in stream: {0}")] + CannotGetNextFinalizedBlock(BackendError), + #[error("Cannot fetch new metadata for runtime update: {0}")] + CannotFetchNewMetadata(BackendError), + #[error( + "Error subscribing to runtime updates: Cannot find the System.LastRuntimeUpgrade storage entry" + )] + CantFindSystemLastRuntimeUpgrade, + #[error("Error subscribing to runtime updates: Cannot fetch last runtime upgrade: {0}")] + CantFetchLastRuntimeUpgrade(StorageError), + #[error("Error subscribing to runtime updates: Cannot decode last runtime upgrade: {0}")] + CannotDecodeLastRuntimeUpgrade(StorageValueError), +} + +impl RuntimeUpdaterError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + RuntimeUpdaterError::CannotStreamRuntimeVersion(e) | + RuntimeUpdaterError::CannotGetNextRuntimeVersion(e) | + RuntimeUpdaterError::CannotStreamFinalizedBlocks(e) | + RuntimeUpdaterError::CannotGetNextFinalizedBlock(e) | + RuntimeUpdaterError::CannotFetchNewMetadata(e) => Some(e), + _ => None, + } + } +} + +/// Error that can occur during upgrade. +#[non_exhaustive] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum RuntimeUpdateeApplyError { + #[error("The proposed runtime update is the same as the current version")] + SameVersion, +} + +/// Error working with Runtime APIs +#[non_exhaustive] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum RuntimeApiError { + #[error("Cannot access Runtime APIs at latest block: Cannot fetch latest finalized block: {0}")] + CannotGetLatestFinalizedBlock(BackendError), + #[error("{0}")] + OfflineError(#[from] CoreRuntimeApiError), + #[error("Cannot call the Runtime API: {0}")] + CannotCallApi(BackendError), +} + +impl RuntimeApiError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + RuntimeApiError::CannotGetLatestFinalizedBlock(e) | + RuntimeApiError::CannotCallApi(e) => Some(e), + _ => None, + } + } +} + +/// Error working with events. +#[non_exhaustive] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum EventsError { + #[error("{0}")] + OfflineError(#[from] CoreEventsError), + #[error("Cannot access events at latest block: Cannot fetch latest finalized block: {0}")] + CannotGetLatestFinalizedBlock(BackendError), + #[error("Cannot fetch event bytes: {0}")] + CannotFetchEventBytes(BackendError), +} + +impl EventsError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + EventsError::CannotGetLatestFinalizedBlock(e) | + EventsError::CannotFetchEventBytes(e) => Some(e), + _ => None, + } + } +} + +/// Error working with extrinsics. +#[non_exhaustive] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum ExtrinsicError { + #[error("{0}")] + OfflineError(#[from] CoreExtrinsicError), + #[error("Could not download block body to extract extrinsics from: {0}")] + CannotGetBlockBody(BackendError), + #[error("Block not found: {0}")] + BlockNotFound(Hex), + #[error("{0}")] + CouldNotDecodeExtrinsics(#[from] ExtrinsicDecodeErrorAt), + #[error( + "Extrinsic submission error: Cannot get latest finalized block to grab account nonce at: {0}" + )] + CannotGetLatestFinalizedBlock(BackendError), + #[error("Cannot find block header for block {block_hash}")] + CannotFindBlockHeader { block_hash: Hex }, + #[error("Error getting account nonce at block {block_hash}")] + AccountNonceError { block_hash: Hex, account_id: Hex, reason: AccountNonceError }, + #[error("Cannot submit extrinsic: {0}")] + ErrorSubmittingTransaction(BackendError), + #[error("A transaction status error was returned while submitting the extrinsic: {0}")] + TransactionStatusError(TransactionStatusError), + #[error( + "The transaction status stream encountered an error while submitting the extrinsic: {0}" + )] + TransactionStatusStreamError(BackendError), + #[error( + "The transaction status stream unexpectedly ended, so we don't know the status of the submitted extrinsic" + )] + UnexpectedEndOfTransactionStatusStream, + #[error("Cannot get fee info from Runtime API: {0}")] + CannotGetFeeInfo(BackendError), + #[error("Cannot get validation info from Runtime API: {0}")] + CannotGetValidationInfo(BackendError), + #[error("Cannot decode ValidationResult bytes: {0}")] + CannotDecodeValidationResult(codec::Error), + #[error("ValidationResult bytes could not be decoded")] + UnexpectedValidationResultBytes(Vec), +} + +impl ExtrinsicError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + ExtrinsicError::CannotGetBlockBody(e) | + ExtrinsicError::CannotGetLatestFinalizedBlock(e) | + ExtrinsicError::ErrorSubmittingTransaction(e) | + ExtrinsicError::TransactionStatusStreamError(e) | + ExtrinsicError::CannotGetFeeInfo(e) | + ExtrinsicError::CannotGetValidationInfo(e) => Some(e), + ExtrinsicError::AccountNonceError { reason, .. } => reason.backend_error(), + _ => None, + } + } +} + +/// Error working with View Functions. +#[non_exhaustive] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum ViewFunctionError { + #[error("{0}")] + OfflineError(#[from] CoreViewFunctionError), + #[error( + "Cannot access View Functions at latest block: Cannot fetch latest finalized block: {0}" + )] + CannotGetLatestFinalizedBlock(BackendError), + #[error("Cannot call the View Function Runtime API: {0}")] + CannotCallApi(BackendError), +} + +impl ViewFunctionError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + ViewFunctionError::CannotGetLatestFinalizedBlock(e) | + ViewFunctionError::CannotCallApi(e) => Some(e), + _ => None, + } + } +} + +/// Error during the transaction progress. +#[non_exhaustive] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum TransactionProgressError { + #[error("Cannot get the next transaction progress update: {0}")] + CannotGetNextProgressUpdate(BackendError), + #[error("Error during transaction progress: {0}")] + TransactionStatusError(#[from] TransactionStatusError), + #[error( + "The transaction status stream unexpectedly ended, so we have no further transaction progress updates" + )] + UnexpectedEndOfTransactionStatusStream, +} + +impl TransactionProgressError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + TransactionProgressError::CannotGetNextProgressUpdate(e) => Some(e), + TransactionProgressError::TransactionStatusError(_) => None, + TransactionProgressError::UnexpectedEndOfTransactionStatusStream => None, + } + } +} + +/// An error emitted as the result of a transaction progress update. +#[derive(Clone, Debug, Eq, thiserror::Error, PartialEq)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum TransactionStatusError { + /// An error happened on the node that the transaction was submitted to. + #[error("Error handling transaction: {0}")] + Error(String), + /// The transaction was deemed invalid. + #[error("The transaction is not valid: {0}")] + Invalid(String), + /// The transaction was dropped. + #[error("The transaction was dropped: {0}")] + Dropped(String), +} + +/// Error fetching events for a just-submitted transaction +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum TransactionEventsError { + #[error( + "The block containing the submitted transaction ({block_hash}) could not be downloaded: {error}" + )] + CannotFetchBlockBody { block_hash: Hex, error: BackendError }, + #[error( + "Cannot find the the submitted transaction (hash: {transaction_hash}) in the block (hash: {block_hash}) it is supposed to be in." + )] + CannotFindTransactionInBlock { block_hash: Hex, transaction_hash: Hex }, + #[error("The block containing the submitted transaction ({block_hash}) could not be found")] + BlockNotFound { block_hash: Hex }, + #[error( + "Could not decode event at index {event_index} for the submitted transaction at block {block_hash}: {error}" + )] + CannotDecodeEventInBlock { event_index: usize, block_hash: Hex, error: EventsError }, + #[error("Could not fetch events for the submitted transaction: {error}")] + CannotFetchEventsForTransaction { block_hash: Hex, transaction_hash: Hex, error: EventsError }, + #[error("The transaction led to a DispatchError, but we failed to decode it: {error}")] + CannotDecodeDispatchError { error: DispatchErrorDecodeError, bytes: Vec }, + #[error("The transaction failed with the following dispatch error: {0}")] + ExtrinsicFailed(#[from] DispatchError), +} + +impl TransactionEventsError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + TransactionEventsError::CannotFetchBlockBody { error, .. } => Some(error), + TransactionEventsError::CannotDecodeEventInBlock { error, .. } | + TransactionEventsError::CannotFetchEventsForTransaction { error, .. } => error.backend_error(), + _ => None, + } + } +} + +/// Error waiting for the transaction to be finalized and successful. +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs, clippy::large_enum_variant)] +pub enum TransactionFinalizedSuccessError { + #[error("Could not finalize the transaction: {0}")] + FinalizationError(#[from] TransactionProgressError), + #[error("The transaction did not succeed: {0}")] + SuccessError(#[from] TransactionEventsError), +} + +impl TransactionFinalizedSuccessError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + TransactionFinalizedSuccessError::FinalizationError(e) => e.backend_error(), + TransactionFinalizedSuccessError::SuccessError(e) => e.backend_error(), + } + } +} + +/// Error decoding the [`DispatchError`] +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum ModuleErrorDetailsError { + #[error( + "Could not get details for the DispatchError: could not find pallet index {pallet_index}" + )] + PalletNotFound { pallet_index: u8 }, + #[error( + "Could not get details for the DispatchError: could not find error index {error_index} in pallet {pallet_name}" + )] + ErrorVariantNotFound { pallet_name: String, error_index: u8 }, +} + +/// Error decoding the [`ModuleError`] +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +#[error("Could not decode the DispatchError::Module payload into the given type: {0}")] +pub struct ModuleErrorDecodeError(scale_decode::Error); + +/// Error decoding the [`DispatchError`] +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum DispatchErrorDecodeError { + #[error( + "Could not decode the DispatchError: could not find the corresponding type ID in the metadata" + )] + DispatchErrorTypeIdNotFound, + #[error("Could not decode the DispatchError: {0}")] + CouldNotDecodeDispatchError(scale_decode::Error), + #[error("Could not decode the DispatchError::Module variant")] + CouldNotDecodeModuleError { + /// The bytes corresponding to the Module variant we were unable to decode: + bytes: Vec, + }, +} + +/// Error working with storage. +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum StorageError { + #[error("{0}")] + Offline(#[from] CoreStorageError), + #[error("Cannot access storage at latest block: Cannot fetch latest finalized block: {0}")] + CannotGetLatestFinalizedBlock(BackendError), + #[error( + "No storage value found at the given address, and no default value to fall back to using." + )] + NoValueFound, + #[error("Cannot fetch the storage value: {0}")] + CannotFetchValue(BackendError), + #[error("Cannot iterate storage values: {0}")] + CannotIterateValues(BackendError), + #[error("Encountered an error iterating over storage values: {0}")] + StreamFailure(BackendError), + #[error("Cannot decode the storage version for a given entry: {0}")] + CannotDecodeStorageVersion(codec::Error), +} + +impl StorageError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + StorageError::CannotGetLatestFinalizedBlock(e) | + StorageError::CannotFetchValue(e) | + StorageError::CannotIterateValues(e) | + StorageError::StreamFailure(e) => Some(e), + _ => None, + } + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/events/events_client.rs b/vendor/pezkuwi-subxt/subxt/src/events/events_client.rs new file mode 100644 index 00000000..38dad3ca --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/events/events_client.rs @@ -0,0 +1,101 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::{ + backend::{Backend, BackendExt, BlockRef}, + client::OnlineClientT, + config::{Config, HashFor}, + error::EventsError, + events::Events, +}; +use derive_where::derive_where; +use std::future::Future; + +/// A client for working with events. +#[derive_where(Clone; Client)] +pub struct EventsClient { + client: Client, + _marker: std::marker::PhantomData, +} + +impl EventsClient { + /// Create a new [`EventsClient`]. + pub fn new(client: Client) -> Self { + Self { client, _marker: std::marker::PhantomData } + } +} + +impl EventsClient +where + T: Config, + Client: OnlineClientT, +{ + /// Obtain events at some block hash. + /// + /// # Warning + /// + /// This call only supports blocks produced since the most recent + /// runtime upgrade. You can attempt to retrieve events from older blocks, + /// but may run into errors attempting to work with them. + pub fn at( + &self, + block_ref: impl Into>>, + ) -> impl Future, EventsError>> + Send + 'static { + self.at_or_latest(Some(block_ref.into())) + } + + /// Obtain events for the latest finalized block. + pub fn at_latest( + &self, + ) -> impl Future, EventsError>> + Send + 'static { + self.at_or_latest(None) + } + + /// Obtain events at some block hash. + fn at_or_latest( + &self, + block_ref: Option>>, + ) -> impl Future, EventsError>> + Send + 'static { + // Clone and pass the client in like this so that we can explicitly + // return a Future that's Send + 'static, rather than tied to &self. + let client = self.client.clone(); + async move { + // If a block ref isn't provided, we'll get the latest finalized block to use. + let block_ref = match block_ref { + Some(r) => r, + None => client + .backend() + .latest_finalized_block_ref() + .await + .map_err(EventsError::CannotGetLatestFinalizedBlock)?, + }; + + let event_bytes = get_event_bytes(client.backend(), block_ref.hash()).await?; + Ok(Events::decode_from(event_bytes, client.metadata())) + } + } +} + +// The storage key needed to access events. +fn system_events_key() -> [u8; 32] { + let a = pezsp_crypto_hashing::twox_128(b"System"); + let b = pezsp_crypto_hashing::twox_128(b"Events"); + let mut res = [0; 32]; + res[0..16].clone_from_slice(&a); + res[16..32].clone_from_slice(&b); + res +} + +// Get the event bytes from the provided client, at the provided block hash. +pub(crate) async fn get_event_bytes( + backend: &dyn Backend, + block_hash: HashFor, +) -> Result, EventsError> { + let bytes = backend + .storage_fetch_value(system_events_key().to_vec(), block_hash) + .await + .map_err(EventsError::CannotFetchEventBytes)? + .unwrap_or_default(); + Ok(bytes) +} diff --git a/vendor/pezkuwi-subxt/subxt/src/events/events_type.rs b/vendor/pezkuwi-subxt/subxt/src/events/events_type.rs new file mode 100644 index 00000000..5c1372b7 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/events/events_type.rs @@ -0,0 +1,166 @@ +use crate::{ + Metadata, + config::{Config, HashFor}, + error::EventsError, +}; +use derive_where::derive_where; +use pezkuwi_subxt_core::events::{EventDetails as CoreEventDetails, Events as CoreEvents}; +use scale_decode::{DecodeAsFields, DecodeAsType}; + +pub use pezkuwi_subxt_core::events::{EventMetadataDetails, Phase, StaticEvent}; + +/// A collection of events obtained from a block, bundled with the necessary +/// information needed to decode and iterate over them. +// Dev note: we are just wrapping the pezkuwi_subxt_core types here to avoid leaking them +// in Subxt and map any errors into Subxt errors so that we don't have this part of the +// API returning a different error type (ie the pezkuwi_subxt_core::Error). +#[derive_where(Clone, Debug)] +pub struct Events { + inner: CoreEvents, +} + +impl Events { + /// Create a new [`Events`] instance from the given bytes. + pub fn decode_from(event_bytes: Vec, metadata: Metadata) -> Self { + Self { inner: CoreEvents::decode_from(event_bytes, metadata) } + } + + /// The number of events. + pub fn len(&self) -> u32 { + self.inner.len() + } + + /// Are there no events in this block? + // Note: mainly here to satisfy clippy.. + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + /// Return the bytes representing all of the events. + pub fn bytes(&self) -> &[u8] { + self.inner.bytes() + } + + /// Iterate over all of the events, using metadata to dynamically + /// decode them as we go, and returning the raw bytes and other associated + /// details. If an error occurs, all subsequent iterations return `None`. + // Dev note: The returned iterator is 'static + Send so that we can box it up and make + // use of it with our `FilterEvents` stuff. + pub fn iter( + &self, + ) -> impl Iterator, EventsError>> + Send + Sync + 'static { + self.inner + .iter() + .map(|item| item.map(|e| EventDetails { inner: e }).map_err(Into::into)) + } + + /// Iterate through the events using metadata to dynamically decode and skip + /// them, and return only those which should decode to the provided `Ev` type. + /// If an error occurs, all subsequent iterations return `None`. + pub fn find(&self) -> impl Iterator> { + self.inner.find::().map(|item| item.map_err(Into::into)) + } + + /// Iterate through the events using metadata to dynamically decode and skip + /// them, and return the first event found which decodes to the provided `Ev` type. + pub fn find_first(&self) -> Result, EventsError> { + self.inner.find_first::().map_err(Into::into) + } + + /// Iterate through the events using metadata to dynamically decode and skip + /// them, and return the last event found which decodes to the provided `Ev` type. + pub fn find_last(&self) -> Result, EventsError> { + self.inner.find_last::().map_err(Into::into) + } + + /// Find an event that decodes to the type provided. Returns true if it was found. + pub fn has(&self) -> Result { + self.inner.has::().map_err(Into::into) + } +} + +/// The event details. +#[derive(Debug, Clone)] +pub struct EventDetails { + inner: CoreEventDetails, +} + +impl EventDetails { + /// When was the event produced? + pub fn phase(&self) -> Phase { + self.inner.phase() + } + + /// What index is this event in the stored events for this block. + pub fn index(&self) -> u32 { + self.inner.index() + } + + /// The index of the pallet that the event originated from. + pub fn pallet_index(&self) -> u8 { + self.inner.pallet_index() + } + + /// The index of the event variant that the event originated from. + pub fn variant_index(&self) -> u8 { + self.inner.variant_index() + } + + /// The name of the pallet from whence the Event originated. + pub fn pallet_name(&self) -> &str { + self.inner.pallet_name() + } + + /// Alias for pallet_name() - rebranded terminology (pezpallet) + pub fn pezpallet_name(&self) -> &str { + self.pallet_name() + } + + /// The name of the event (ie the name of the variant that it corresponds to). + pub fn variant_name(&self) -> &str { + self.inner.variant_name() + } + + /// Fetch details from the metadata for this event. + pub fn event_metadata(&self) -> EventMetadataDetails<'_> { + self.inner.event_metadata() + } + + /// Return _all_ of the bytes representing this event, which include, in order: + /// - The phase. + /// - Pallet and event index. + /// - Event fields. + /// - Event Topics. + pub fn bytes(&self) -> &[u8] { + self.inner.bytes() + } + + /// Return the bytes representing the fields stored in this event. + pub fn field_bytes(&self) -> &[u8] { + self.inner.field_bytes() + } + + /// Decode and provide the event fields back in the form of a [`scale_value::Composite`] + /// type which represents the named or unnamed fields that were present in the event. + pub fn decode_as_fields(&self) -> Result { + self.inner.decode_as_fields().map_err(Into::into) + } + + /// Attempt to decode these [`EventDetails`] into a type representing the event fields. + /// Such types are exposed in the codegen as `pallet_name::events::EventName` types. + pub fn as_event(&self) -> Result, EventsError> { + self.inner.as_event::().map_err(Into::into) + } + + /// Attempt to decode these [`EventDetails`] into a root event type (which includes + /// the pallet and event enum variants as well as the event fields). A compatible + /// type for this is exposed via static codegen as a root level `Event` type. + pub fn as_root_event(&self) -> Result { + self.inner.as_root_event::().map_err(Into::into) + } + + /// Return the topics associated with this event. + pub fn topics(&self) -> &[HashFor] { + self.inner.topics() + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/events/mod.rs b/vendor/pezkuwi-subxt/subxt/src/events/mod.rs new file mode 100644 index 00000000..3c058099 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/events/mod.rs @@ -0,0 +1,34 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module exposes the types and such necessary for working with events. +//! The two main entry points into events are [`crate::OnlineClient::events()`] +//! and calls like [crate::tx::TxProgress::wait_for_finalized_success()]. + +mod events_client; +mod events_type; + +use crate::{client::OnlineClientT, error::EventsError}; +use pezkuwi_subxt_core::{ + Metadata, + config::{Config, HashFor}, +}; + +pub use events_client::EventsClient; +pub use events_type::{EventDetails, EventMetadataDetails, Events, Phase, StaticEvent}; + +/// Creates a new [`Events`] instance by fetching the corresponding bytes at `block_hash` from the +/// client. +pub async fn new_events_from_client( + metadata: Metadata, + block_hash: HashFor, + client: C, +) -> Result, EventsError> +where + T: Config, + C: OnlineClientT, +{ + let event_bytes = events_client::get_event_bytes(client.backend(), block_hash).await?; + Ok(Events::::decode_from(event_bytes, metadata)) +} diff --git a/vendor/pezkuwi-subxt/subxt/src/lib.rs b/vendor/pezkuwi-subxt/subxt/src/lib.rs new file mode 100644 index 00000000..8103cf06 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/lib.rs @@ -0,0 +1,383 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Subxt is a library for interacting with Bizinikiwi based nodes. Using it looks something like +//! this: +//! +//! ```rust,ignore +#![doc = include_str!("../examples/tx_basic.rs")] +//! ``` +//! +//! Take a look at [the Subxt guide](book) to learn more about how to use Subxt. + +#![cfg_attr(docsrs, feature(doc_cfg))] + +#[cfg(any( + all(feature = "web", feature = "native"), + not(any(feature = "web", feature = "native")) +))] +compile_error!("subxt: exactly one of the 'web' and 'native' features should be used."); + +// Internal helper macros +#[macro_use] +mod macros; + +// The guide is here. +pub mod book; + +// Suppress an unused dependency warning because tokio is +// only used in example code snippets at the time of writing. +#[cfg(test)] +mod only_used_in_docs_or_tests { + use pezkuwi_subxt_signer as _; + use tokio as _; +} + +// Suppress an unused dependency warning because tracing_subscriber is +// only used in example code snippets at the time of writing. +#[cfg(test)] +use tracing_subscriber as _; + +pub mod backend; +pub mod blocks; +pub mod client; +pub mod constants; +pub mod custom_values; +pub mod error; +pub mod events; +pub mod runtime_api; +pub mod storage; +pub mod tx; +pub mod utils; +pub mod view_functions; + +/// This module provides a [`Config`] type, which is used to define various +/// types that are important in order to speak to a particular chain. +/// [`BizinikiwConfig`] provides a default set of these types suitable for the +/// default Bizinikiwi node implementation, and [`PezkuwiConfig`] for a +/// Pezkuwi node. +pub mod config { + pub use pezkuwi_subxt_core::{ + config::{ + Config, DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder, ExtrinsicParams, + ExtrinsicParamsEncoder, Hash, HashFor, Hasher, Header, PezkuwiConfig, + PezkuwiExtrinsicParams, BizinikiwConfig, BizinikiwiExtrinsicParams, + TransactionExtension, pezkuwi, bizinikiwi, transaction_extensions, + }, + error::ExtrinsicParamsError, + }; +} + +/// Types representing the metadata obtained from a node. +pub mod metadata { + pub use pezkuwi_subxt_metadata::*; +} + +/// Submit dynamic transactions. +pub mod dynamic { + pub use pezkuwi_subxt_core::dynamic::*; +} + +// Expose light client bits +cfg_unstable_light_client! { + pub use pezkuwi_subxt_lightclient as lightclient; +} + +// Expose a few of the most common types at root, +// but leave most types behind their respective modules. +pub use crate::{ + client::{OfflineClient, OnlineClient}, + config::{Config, PezkuwiConfig, BizinikiwConfig}, + error::Error, + metadata::Metadata, +}; + +/// Re-export external crates that are made use of in the subxt API. +pub mod ext { + pub use codec; + pub use frame_metadata; + pub use futures; + pub use pezkuwi_subxt_core; + pub use pezkuwi_subxt_rpcs; + pub use scale_bits; + pub use scale_decode; + pub use scale_encode; + pub use scale_value; + + cfg_jsonrpsee! { + pub use jsonrpsee; + } +} + +/// Generate a strongly typed API for interacting with a Bizinikiwi runtime from its metadata of +/// WASM. +/// +/// # Metadata +/// +/// First, you'll need to get hold of some metadata for the node you'd like to interact with. +/// One way to do this is by using the `subxt` CLI tool: +/// +/// ```bash +/// # Install the CLI tool: +/// cargo install subxt-cli +/// # Use it to download metadata (in this case, from a node running locally) +/// subxt metadata > pezkuwi_metadata.scale +/// ``` +/// +/// Run `subxt metadata --help` for more options. +/// +/// # Basic usage +/// +/// We can generate an interface to a chain given either: +/// - A locally saved SCALE encoded metadata file (see above) for that chain, +/// - The Runtime WASM for that chain, or +/// - A URL pointing at the JSON-RPC interface for a node on that chain. +/// +/// In each case, the `subxt` macro will use this data to populate the annotated module with +/// all of the methods and types required for interacting with the chain that the +/// Runtime/metadata was loaded from. +/// +/// Let's look at each of these: +/// +/// ## Using a locally saved metadata file +/// +/// Annotate a Rust module with the `subxt` attribute referencing a metadata file like so: +/// +/// ```rust,no_run,standalone_crate +/// #[pezkuwi_subxt::subxt( +/// runtime_metadata_path = "../artifacts/pezkuwi_metadata_full.scale", +/// )] +/// mod pezkuwi {} +/// ``` +/// +/// You can use the `$OUT_DIR` placeholder in the path to reference metadata generated at build +/// time: +/// +/// ```rust,ignore +/// #[pezkuwi_subxt::subxt( +/// runtime_metadata_path = "$OUT_DIR/metadata.scale", +/// )] +/// mod pezkuwi {} +/// ``` +/// +/// ## Using a WASM runtime via `runtime_path = "..."` +/// +/// This requires the `runtime-wasm-path` feature flag. +/// +/// Annotate a Rust module with the `subxt` attribute referencing some runtime WASM like so: +/// +/// ```rust,ignore +/// #[pezkuwi_subxt::subxt( +/// runtime_path = "../artifacts/zagros_runtime.wasm", +/// )] +/// mod pezkuwi {} +/// ``` +/// +/// You can also use the `$OUT_DIR` placeholder in the path to reference WASM files generated +/// at build time: +/// +/// ```rust,ignore +/// #[pezkuwi_subxt::subxt( +/// runtime_path = "$OUT_DIR/runtime.wasm", +/// )] +/// mod pezkuwi {} +/// ``` +/// +/// ## Connecting to a node to download metadata via `runtime_metadata_insecure_url = "..."` +/// +/// This will, at compile time, connect to the JSON-RPC interface for some node at the URL +/// given, download the metadata from it, and use that. This can be useful in CI, but is **not +/// recommended** in production code, because: +/// +/// - The compilation time is increased since we have to download metadata from a URL each +/// time. If the node we connect to is unresponsive, this will be slow or could fail. +/// - The metadata may change from what is expected without notice, causing compilation to fail +/// if it leads to changes in the generated interfaces that are being used. +/// - The node that you connect to could be malicious and provide incorrect metadata for the +/// chain. +/// +/// ```rust,ignore +/// #[pezkuwi_subxt::subxt( +/// runtime_metadata_insecure_url = "wss://rpc.pezkuwi.io:443" +/// )] +/// mod pezkuwi {} +/// ``` +/// +/// # Configuration +/// +/// This macro supports a number of attributes to configure what is generated: +/// +/// ## `crate = "..."` +/// +/// Use this attribute to specify a custom path to the `pezkuwi_subxt_core` crate: +/// +/// ```rust,standalone_crate +/// # pub extern crate pezkuwi_subxt_core; +/// # pub mod path { pub mod to { pub use pezkuwi_subxt_core; } } +/// # fn main() {} +/// #[pezkuwi_subxt::subxt( +/// runtime_metadata_path = "../artifacts/pezkuwi_metadata_full.scale", +/// crate = "crate::path::to::pezkuwi_subxt_core" +/// )] +/// mod pezkuwi {} +/// ``` +/// +/// This is useful if you write a library which uses this macro, but don't want to force users +/// to depend on `subxt` at the top level too. By default the path `::subxt` is used. +/// +/// ## `substitute_type(path = "...", with = "...")` +/// +/// This attribute replaces any reference to the generated type at the path given by `path` +/// with a reference to the path given by `with`. +/// +/// ```rust,standalone_crate +/// #[pezkuwi_subxt::subxt( +/// runtime_metadata_path = "../artifacts/pezkuwi_metadata_full.scale", +/// substitute_type(path = "sp_arithmetic::per_things::Perbill", with = "crate::Foo") +/// )] +/// mod pezkuwi {} +/// +/// # #[derive( +/// # scale_encode::EncodeAsType, +/// # scale_decode::DecodeAsType, +/// # codec::Encode, +/// # codec::Decode, +/// # Clone, +/// # Debug, +/// # )] +/// // In reality this needs some traits implementing on +/// // it to allow it to be used in place of Perbill: +/// pub struct Foo(u32); +/// # impl codec::CompactAs for Foo { +/// # type As = u32; +/// # fn encode_as(&self) -> &Self::As { +/// # &self.0 +/// # } +/// # fn decode_from(x: Self::As) -> Result { +/// # Ok(Foo(x)) +/// # } +/// # } +/// # impl From> for Foo { +/// # fn from(v: codec::Compact) -> Foo { +/// # v.0 +/// # } +/// # } +/// # fn main() {} +/// ``` +/// +/// If the type you're substituting contains generic parameters, you can "pattern match" on +/// those, and make use of them in the substituted type, like so: +/// +/// ```rust,no_run,standalone_crate +/// #[pezkuwi_subxt::subxt( +/// runtime_metadata_path = "../artifacts/pezkuwi_metadata_full.scale", +/// substitute_type( +/// path = "sp_runtime::multiaddress::MultiAddress", +/// with = "::pezkuwi_subxt::utils::Static>" +/// ) +/// )] +/// mod pezkuwi {} +/// ``` +/// +/// The above is also an example of using the [`crate::utils::Static`] type to wrap some type +/// which doesn't on it's own implement [`scale_encode::EncodeAsType`] or +/// [`scale_decode::DecodeAsType`], which are required traits for any substitute type to +/// implement by default. +/// +/// ## `derive_for_all_types = "..."` +/// +/// By default, all generated types derive a small set of traits. This attribute allows you to +/// derive additional traits on all generated types: +/// +/// ```rust,no_run,standalone_crate +/// #[pezkuwi_subxt::subxt( +/// runtime_metadata_path = "../artifacts/pezkuwi_metadata_full.scale", +/// derive_for_all_types = "Eq, PartialEq" +/// )] +/// mod pezkuwi {} +/// ``` +/// +/// Any substituted types (including the default substitutes) must also implement these traits +/// in order to avoid errors here. +/// +/// ## `derive_for_type(path = "...", derive = "...")` +/// +/// Unlike the above, which derives some trait on every generated type, this attribute allows +/// you to derive traits only for specific types. Note that any types which are used inside the +/// specified type may also need to derive the same traits. +/// +/// ```rust,no_run,standalone_crate +/// #[pezkuwi_subxt::subxt( +/// runtime_metadata_path = "../artifacts/pezkuwi_metadata_full.scale", +/// derive_for_all_types = "Eq, PartialEq", +/// derive_for_type(path = "frame_support::PalletId", derive = "Ord, PartialOrd"), +/// derive_for_type(path = "sp_runtime::ModuleError", derive = "Hash"), +/// )] +/// mod pezkuwi {} +/// ``` +/// +/// ## `generate_docs` +/// +/// By default, documentation is not generated via the macro, since IDEs do not typically make +/// use of it. This attribute forces documentation to be generated, too. +/// +/// ```rust,no_run,standalone_crate +/// #[pezkuwi_subxt::subxt( +/// runtime_metadata_path = "../artifacts/pezkuwi_metadata_full.scale", +/// generate_docs +/// )] +/// mod pezkuwi {} +/// ``` +/// +/// ## `runtime_types_only` +/// +/// By default, the macro will generate various interfaces to make using Subxt simpler in +/// addition with any types that need generating to make this possible. This attribute makes +/// the codegen only generate the types and not the Subxt interface. +/// +/// ```rust,no_run,standalone_crate +/// #[pezkuwi_subxt::subxt( +/// runtime_metadata_path = "../artifacts/pezkuwi_metadata_full.scale", +/// runtime_types_only +/// )] +/// mod pezkuwi {} +/// ``` +/// +/// ## `no_default_derives` +/// +/// By default, the macro will add all derives necessary for the generated code to play nicely +/// with Subxt. Adding this attribute removes all default derives. +/// +/// ```rust,no_run,standalone_crate +/// #[pezkuwi_subxt::subxt( +/// runtime_metadata_path = "../artifacts/pezkuwi_metadata_full.scale", +/// runtime_types_only, +/// no_default_derives, +/// derive_for_all_types="codec::Encode, codec::Decode" +/// )] +/// mod pezkuwi {} +/// ``` +/// +/// **Note**: At the moment, you must derive at least one of `codec::Encode` or `codec::Decode` +/// or `scale_encode::EncodeAsType` or `scale_decode::DecodeAsType` (because we add +/// `#[codec(..)]` attributes on some fields/types during codegen), and you must use this +/// feature in conjunction with `runtime_types_only` (or manually specify a bunch of defaults +/// to make codegen work properly when generating the subxt interfaces). +/// +/// ## `unstable_metadata` +/// +/// This attribute works only in combination with `runtime_metadata_insecure_url`. By default, +/// the macro will fetch the latest stable version of the metadata from the target node. This +/// attribute makes the codegen attempt to fetch the unstable version of the metadata first. +/// This is **not recommended** in production code, since the unstable metadata a node is +/// providing is likely to be incompatible with Subxt. +/// +/// ```rust,ignore +/// #[pezkuwi_subxt::subxt( +/// runtime_metadata_insecure_url = "wss://rpc.pezkuwi.io:443", +/// unstable_metadata +/// )] +/// mod pezkuwi {} +/// ``` +pub use pezkuwi_subxt_macro::subxt; diff --git a/vendor/pezkuwi-subxt/subxt/src/macros.rs b/vendor/pezkuwi-subxt/subxt/src/macros.rs new file mode 100644 index 00000000..122937a7 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/macros.rs @@ -0,0 +1,61 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +macro_rules! cfg_feature { + ($feature:literal, $($item:item)*) => { + $( + #[cfg(feature = $feature)] + #[cfg_attr(docsrs, doc(cfg(feature = $feature)))] + $item + )* + } +} + +macro_rules! cfg_unstable_light_client { + ($($item:item)*) => { + crate::macros::cfg_feature!("unstable-light-client", $($item)*); + }; +} + +macro_rules! cfg_reconnecting_rpc_client { + ($($item:item)*) => { + crate::macros::cfg_feature!("reconnecting-rpc-client", $($item)*); + }; +} + +macro_rules! cfg_jsonrpsee { + ($($item:item)*) => { + crate::macros::cfg_feature!("jsonrpsee", $($item)*); + }; +} + +#[allow(unused)] +macro_rules! cfg_jsonrpsee_native { + ($($item:item)*) => { + $( + #[cfg(all(feature = "jsonrpsee", feature = "native"))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "jsonrpsee", feature = "native"))))] + $item + )* + } +} + +#[allow(unused)] +macro_rules! cfg_jsonrpsee_web { + ($($item:item)*) => { + $( + #[cfg(all(feature = "jsonrpsee", feature = "web"))] + #[cfg_attr(docsrs, doc(cfg(all(feature = "jsonrpsee", feature = "web"))))] + $item + )* + } +} + +pub(crate) use cfg_feature; +pub(crate) use cfg_jsonrpsee; +pub(crate) use cfg_unstable_light_client; + +// Only used by light-client. +#[allow(unused)] +pub(crate) use {cfg_jsonrpsee_native, cfg_jsonrpsee_web, cfg_reconnecting_rpc_client}; diff --git a/vendor/pezkuwi-subxt/subxt/src/runtime_api/mod.rs b/vendor/pezkuwi-subxt/subxt/src/runtime_api/mod.rs new file mode 100644 index 00000000..6be9388b --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/runtime_api/mod.rs @@ -0,0 +1,14 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Types associated with executing runtime API calls. + +mod runtime_client; +mod runtime_types; + +pub use pezkuwi_subxt_core::runtime_api::payload::{ + DynamicPayload, Payload, StaticPayload, dynamic, +}; +pub use runtime_client::RuntimeApiClient; +pub use runtime_types::RuntimeApi; diff --git a/vendor/pezkuwi-subxt/subxt/src/runtime_api/runtime_client.rs b/vendor/pezkuwi-subxt/subxt/src/runtime_api/runtime_client.rs new file mode 100644 index 00000000..7129c84f --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/runtime_api/runtime_client.rs @@ -0,0 +1,58 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::runtime_types::RuntimeApi; + +use crate::{ + backend::BlockRef, + client::OnlineClientT, + config::{Config, HashFor}, + error::RuntimeApiError, +}; +use derive_where::derive_where; +use std::{future::Future, marker::PhantomData}; + +/// Execute runtime API calls. +#[derive_where(Clone; Client)] +pub struct RuntimeApiClient { + client: Client, + _marker: PhantomData, +} + +impl RuntimeApiClient { + /// Create a new [`RuntimeApiClient`] + pub fn new(client: Client) -> Self { + Self { client, _marker: PhantomData } + } +} + +impl RuntimeApiClient +where + T: Config, + Client: OnlineClientT, +{ + /// Obtain a runtime API interface at some block hash. + pub fn at(&self, block_ref: impl Into>>) -> RuntimeApi { + RuntimeApi::new(self.client.clone(), block_ref.into()) + } + + /// Obtain a runtime API interface at the latest finalized block. + pub fn at_latest( + &self, + ) -> impl Future, RuntimeApiError>> + Send + 'static { + // Clone and pass the client in like this so that we can explicitly + // return a Future that's Send + 'static, rather than tied to &self. + let client = self.client.clone(); + async move { + // get the ref for the latest finalized block and use that. + let block_ref = client + .backend() + .latest_finalized_block_ref() + .await + .map_err(RuntimeApiError::CannotGetLatestFinalizedBlock)?; + + Ok(RuntimeApi::new(client, block_ref)) + } + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/runtime_api/runtime_types.rs b/vendor/pezkuwi-subxt/subxt/src/runtime_api/runtime_types.rs new file mode 100644 index 00000000..30b485ca --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/runtime_api/runtime_types.rs @@ -0,0 +1,97 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::Payload; +use crate::{ + backend::BlockRef, + client::OnlineClientT, + config::{Config, HashFor}, + error::RuntimeApiError, +}; +use derive_where::derive_where; +use std::{future::Future, marker::PhantomData}; + +/// Execute runtime API calls. +#[derive_where(Clone; Client)] +pub struct RuntimeApi { + client: Client, + block_ref: BlockRef>, + _marker: PhantomData, +} + +impl RuntimeApi { + /// Create a new [`RuntimeApi`] + pub(crate) fn new(client: Client, block_ref: BlockRef>) -> Self { + Self { client, block_ref, _marker: PhantomData } + } +} + +impl RuntimeApi +where + T: Config, + Client: OnlineClientT, +{ + /// Run the validation logic against some runtime API payload you'd like to use. Returns + /// `Ok(())` if the payload is valid (or if it's not possible to check since the payload has no + /// validation hash). Return an error if the payload was not valid or something went wrong + /// trying to validate it (ie the runtime API in question do not exist at all) + pub fn validate(&self, payload: Call) -> Result<(), RuntimeApiError> { + pezkuwi_subxt_core::runtime_api::validate(payload, &self.client.metadata()) + .map_err(Into::into) + } + + /// Execute a raw runtime API call. This returns the raw bytes representing the result + /// of this call. The caller is responsible for decoding the result. + pub fn call_raw<'a>( + &self, + function: &'a str, + call_parameters: Option<&'a [u8]>, + ) -> impl Future, RuntimeApiError>> + use<'a, Client, T> { + let client = self.client.clone(); + let block_hash = self.block_ref.hash(); + // Ensure that the returned future doesn't have a lifetime tied to api.runtime_api(), + // which is a temporary thing we'll be throwing away quickly: + async move { + let data = client + .backend() + .call(function, call_parameters, block_hash) + .await + .map_err(RuntimeApiError::CannotCallApi)?; + Ok(data) + } + } + + /// Execute a runtime API call. + pub fn call( + &self, + payload: Call, + ) -> impl Future> + use { + let client = self.client.clone(); + let block_hash = self.block_ref.hash(); + // Ensure that the returned future doesn't have a lifetime tied to api.runtime_api(), + // which is a temporary thing we'll be throwing away quickly: + async move { + let metadata = client.metadata(); + + // Validate the runtime API payload hash against the compile hash from codegen. + pezkuwi_subxt_core::runtime_api::validate(&payload, &metadata)?; + + // Encode the arguments of the runtime call. + let call_name = pezkuwi_subxt_core::runtime_api::call_name(&payload); + let call_args = pezkuwi_subxt_core::runtime_api::call_args(&payload, &metadata)?; + + // Make the call. + let bytes = client + .backend() + .call(&call_name, Some(call_args.as_slice()), block_hash) + .await + .map_err(RuntimeApiError::CannotCallApi)?; + + // Decode the response. + let value = + pezkuwi_subxt_core::runtime_api::decode_value(&mut &*bytes, &payload, &metadata)?; + Ok(value) + } + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/storage/mod.rs b/vendor/pezkuwi-subxt/subxt/src/storage/mod.rs new file mode 100644 index 00000000..a155a099 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/storage/mod.rs @@ -0,0 +1,12 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Types associated with accessing and working with storage items. + +mod storage_client; +mod storage_client_at; + +pub use pezkuwi_subxt_core::storage::address::{Address, DynamicAddress, StaticAddress, dynamic}; +pub use storage_client::StorageClient; +pub use storage_client_at::{StorageClientAt, StorageEntryClient, StorageKeyValue, StorageValue}; diff --git a/vendor/pezkuwi-subxt/subxt/src/storage/storage_client.rs b/vendor/pezkuwi-subxt/subxt/src/storage/storage_client.rs new file mode 100644 index 00000000..a7504d89 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/storage/storage_client.rs @@ -0,0 +1,72 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::storage_client_at::StorageClientAt; +use crate::{ + backend::BlockRef, + client::{OfflineClientT, OnlineClientT}, + config::{Config, HashFor}, + error::StorageError, +}; +use derive_where::derive_where; +use pezkuwi_subxt_core::storage::address::Address; +use std::{future::Future, marker::PhantomData}; + +/// Query the runtime storage. +#[derive_where(Clone; Client)] +pub struct StorageClient { + client: Client, + _marker: PhantomData, +} + +impl StorageClient { + /// Create a new [`StorageClient`] + pub fn new(client: Client) -> Self { + Self { client, _marker: PhantomData } + } +} + +impl StorageClient +where + T: Config, + Client: OfflineClientT, +{ + /// Run the validation logic against some storage address you'd like to access. Returns `Ok(())` + /// if the address is valid (or if it's not possible to check since the address has no + /// validation hash). Return an error if the address was not valid or something went wrong + /// trying to validate it (ie the pallet or storage entry in question do not exist at all). + pub fn validate(&self, address: &Addr) -> Result<(), StorageError> { + pezkuwi_subxt_core::storage::validate(address, &self.client.metadata()).map_err(Into::into) + } +} + +impl StorageClient +where + T: Config, + Client: OnlineClientT, +{ + /// Obtain storage at some block hash. + pub fn at(&self, block_ref: impl Into>>) -> StorageClientAt { + StorageClientAt::new(self.client.clone(), block_ref.into()) + } + + /// Obtain storage at the latest finalized block. + pub fn at_latest( + &self, + ) -> impl Future, StorageError>> + Send + 'static { + // Clone and pass the client in like this so that we can explicitly + // return a Future that's Send + 'static, rather than tied to &self. + let client = self.client.clone(); + async move { + // get the ref for the latest finalized block and use that. + let block_ref = client + .backend() + .latest_finalized_block_ref() + .await + .map_err(StorageError::CannotGetLatestFinalizedBlock)?; + + Ok(StorageClientAt::new(client, block_ref)) + } + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/storage/storage_client_at.rs b/vendor/pezkuwi-subxt/subxt/src/storage/storage_client_at.rs new file mode 100644 index 00000000..725dc0e7 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/storage/storage_client_at.rs @@ -0,0 +1,379 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::{ + backend::{BackendExt, BlockRef}, + client::{OfflineClientT, OnlineClientT}, + config::{Config, HashFor}, + error::StorageError, +}; +use derive_where::derive_where; +use futures::StreamExt; +use pezkuwi_subxt_core::{ + Metadata, + storage::{PrefixOf, address::Address}, + utils::{Maybe, Yes}, +}; +use std::marker::PhantomData; + +pub use pezkuwi_subxt_core::storage::{StorageKeyValue, StorageValue}; + +/// Query the runtime storage. +#[derive_where(Clone; Client)] +pub struct StorageClientAt { + client: Client, + metadata: Metadata, + block_ref: BlockRef>, + _marker: PhantomData, +} + +impl StorageClientAt +where + T: Config, + Client: OfflineClientT, +{ + /// Create a new [`StorageClientAt`]. + pub(crate) fn new(client: Client, block_ref: BlockRef>) -> Self { + // Retrieve and store metadata here so that we can borrow it in + // subsequent structs, and thus also borrow storage info and + // things that borrow from metadata. + let metadata = client.metadata(); + + Self { client, metadata, block_ref, _marker: PhantomData } + } +} + +impl StorageClientAt +where + T: Config, + Client: OfflineClientT, +{ + /// This returns a [`StorageEntryClient`], which allows working with the storage entry at the + /// provided address. + pub fn entry( + &self, + address: Addr, + ) -> Result, StorageError> { + let inner = pezkuwi_subxt_core::storage::entry(address, &self.metadata)?; + Ok(StorageEntryClient { + inner, + client: self.client.clone(), + block_ref: self.block_ref.clone(), + _marker: core::marker::PhantomData, + }) + } +} + +impl StorageClientAt +where + T: Config, + Client: OnlineClientT, +{ + /// This is essentially a shorthand for `client.entry(addr)?.fetch(key_parts)`. See + /// [`StorageEntryClient::fetch()`]. + pub async fn fetch( + &self, + addr: Addr, + key_parts: Addr::KeyParts, + ) -> Result, StorageError> { + let entry = pezkuwi_subxt_core::storage::entry(addr, &self.metadata)?; + fetch(&entry, &self.client, self.block_ref.hash(), key_parts).await + } + + /// This is essentially a shorthand for `client.entry(addr)?.try_fetch(key_parts)`. See + /// [`StorageEntryClient::try_fetch()`]. + pub async fn try_fetch( + &self, + addr: Addr, + key_parts: Addr::KeyParts, + ) -> Result>, StorageError> { + let entry = pezkuwi_subxt_core::storage::entry(addr, &self.metadata)?; + try_fetch(&entry, &self.client, self.block_ref.hash(), key_parts).await + } + + /// This is essentially a shorthand for `client.entry(addr)?.iter(key_parts)`. See + /// [`StorageEntryClient::iter()`]. + pub async fn iter>( + &'_ self, + addr: Addr, + key_parts: KeyParts, + ) -> Result< + impl futures::Stream, StorageError>> + + use<'_, Addr, Client, T, KeyParts>, + StorageError, + > { + let entry = pezkuwi_subxt_core::storage::entry(addr, &self.metadata)?; + iter(entry, &self.client, self.block_ref.hash(), key_parts).await + } + + /// In rare cases, you may wish to fetch a storage value that does not live at a typical + /// address. This method is a fallback for those cases, and allows you to provide the raw + /// storage key bytes corresponding to the entry you wish to obtain. The response will either + /// be the bytes for the value found at that location, or otherwise an error. + /// [`StorageError::NoValueFound`] will be returned in the event that the request was valid but + /// no value lives at the given location). + pub async fn fetch_raw(&self, key_bytes: Vec) -> Result, StorageError> { + let block_hash = self.block_ref.hash(); + let value = self + .client + .backend() + .storage_fetch_value(key_bytes, block_hash) + .await + .map_err(StorageError::CannotFetchValue)? + .ok_or(StorageError::NoValueFound)?; + + Ok(value) + } + + /// The storage version of a pallet. + /// The storage version refers to the `frame_support::traits::Metadata::StorageVersion` type. + pub async fn storage_version(&self, pallet_name: impl AsRef) -> Result { + // construct the storage key. This is done similarly in + // `frame_support::traits::metadata::StorageVersion::storage_key()`: + let mut key_bytes: Vec = vec![]; + key_bytes.extend(&pezsp_crypto_hashing::twox_128(pallet_name.as_ref().as_bytes())); + key_bytes.extend(&pezsp_crypto_hashing::twox_128(b":__STORAGE_VERSION__:")); + + // fetch the raw bytes and decode them into the StorageVersion struct: + let storage_version_bytes = self.fetch_raw(key_bytes).await?; + + ::decode(&mut &storage_version_bytes[..]) + .map_err(StorageError::CannotDecodeStorageVersion) + } + + /// Fetch the runtime WASM code. + pub async fn runtime_wasm_code(&self) -> Result, StorageError> { + // note: this should match the `CODE` constant in `sp_core::storage::well_known_keys` + self.fetch_raw(b":code".to_vec()).await + } +} + +/// This represents a single storage entry (be it a plain value or map) +/// and the operations that can be performed on it. +pub struct StorageEntryClient<'atblock, T: Config, Client, Addr, IsPlain> { + inner: pezkuwi_subxt_core::storage::StorageEntry<'atblock, Addr>, + client: Client, + block_ref: BlockRef>, + _marker: PhantomData<(T, IsPlain)>, +} + +impl<'atblock, T, Client, Addr, IsPlain> StorageEntryClient<'atblock, T, Client, Addr, IsPlain> +where + T: Config, + Addr: Address, +{ + /// Name of the pallet containing this storage entry. + pub fn pallet_name(&self) -> &str { + self.inner.pallet_name() + } + + /// Name of the storage entry. + pub fn entry_name(&self) -> &str { + self.inner.entry_name() + } + + /// Is the storage entry a plain value? + pub fn is_plain(&self) -> bool { + self.inner.is_plain() + } + + /// Is the storage entry a map? + pub fn is_map(&self) -> bool { + self.inner.is_map() + } + + /// Return the default value for this storage entry, if there is one. Returns `None` if there + /// is no default value. + pub fn default_value(&self) -> Option> { + self.inner.default_value() + } +} + +// Plain values get a fetch method with no extra arguments. +impl<'atblock, T, Client, Addr> StorageEntryClient<'atblock, T, Client, Addr, Yes> +where + T: Config, + Addr: Address, + Client: OnlineClientT, +{ + /// Fetch the storage value at this location. If no value is found, the default value will be + /// returned for this entry if one exists. If no value is found and no default value exists, an + /// error will be returned. + pub async fn fetch(&self) -> Result, StorageError> { + let value = self + .try_fetch() + .await? + .map_or_else(|| self.inner.default_value().ok_or(StorageError::NoValueFound), Ok)?; + + Ok(value) + } + + /// Fetch the storage value at this location. If no value is found, `None` will be returned. + pub async fn try_fetch( + &self, + ) -> Result>, StorageError> { + let value = self + .client + .backend() + .storage_fetch_value(self.key_prefix().to_vec(), self.block_ref.hash()) + .await + .map_err(StorageError::CannotFetchValue)? + .map(|bytes| self.inner.value(bytes)); + + Ok(value) + } + + /// This is identical to [`StorageEntryClient::key_prefix()`] and is the full + /// key for this storage entry. + pub fn key(&self) -> [u8; 32] { + self.inner.key_prefix() + } + + /// The keys for plain storage values are always 32 byte hashes. + pub fn key_prefix(&self) -> [u8; 32] { + self.inner.key_prefix() + } +} + +// When HasDefaultValue = Yes, we expect there to exist a valid default value and will use that +// if we fetch an entry and get nothing back. +impl<'atblock, T, Client, Addr> StorageEntryClient<'atblock, T, Client, Addr, Maybe> +where + T: Config, + Addr: Address, + Client: OnlineClientT, +{ + /// Fetch a storage value within this storage entry. + /// + /// This entry may be a map, and so you must provide the relevant values for each part of the + /// storage key that is required in order to point to a single value. + /// + /// If no value is found, the default value will be returned for this entry if one exists. If no + /// value is found and no default value exists, an error will be returned. + pub async fn fetch( + &self, + key_parts: Addr::KeyParts, + ) -> Result, StorageError> { + fetch(&self.inner, &self.client, self.block_ref.hash(), key_parts).await + } + + /// Fetch a storage value within this storage entry. + /// + /// This entry may be a map, and so you must provide the relevant values for each part of the + /// storage key that is required in order to point to a single value. + /// + /// If no value is found, `None` will be returned. + pub async fn try_fetch( + &self, + key_parts: Addr::KeyParts, + ) -> Result>, StorageError> { + try_fetch(&self.inner, &self.client, self.block_ref.hash(), key_parts).await + } + + /// Iterate over storage values within this storage entry. + /// + /// You may provide any prefix of the values needed to point to a single value. Normally you + /// will provide `()` to iterate over _everything_, or `(first_key,)` to iterate over + /// everything underneath `first_key` in the map, or `(first_key, second_key)` to iterate over + /// everything underneath `first_key` and `second_key` in the map, and so on, up to the actual + /// depth of the map - 1. + pub async fn iter>( + &self, + key_parts: KeyParts, + ) -> Result< + impl futures::Stream, StorageError>> + + use<'atblock, Addr, Client, T, KeyParts>, + StorageError, + > { + iter(self.inner.clone(), &self.client, self.block_ref.hash(), key_parts).await + } + + /// This returns a full key to a single value in this storage entry. + pub fn key(&self, key_parts: Addr::KeyParts) -> Result, StorageError> { + let key = self.inner.fetch_key(key_parts)?; + Ok(key) + } + + /// This returns valid keys to iterate over the storage entry at the available levels. + pub fn iter_key>( + &self, + key_parts: KeyParts, + ) -> Result, StorageError> { + let key = self.inner.iter_key(key_parts)?; + Ok(key) + } + + /// The first 32 bytes of the storage entry key, which points to the entry but not necessarily + /// a single storage value (unless the entry is a plain value). + pub fn key_prefix(&self) -> [u8; 32] { + self.inner.key_prefix() + } +} + +async fn fetch<'atblock, T: Config, Client: OnlineClientT, Addr: Address>( + entry: &pezkuwi_subxt_core::storage::StorageEntry<'atblock, Addr>, + client: &Client, + block_hash: HashFor, + key_parts: Addr::KeyParts, +) -> Result, StorageError> { + let value = try_fetch(entry, client, block_hash, key_parts) + .await? + .or_else(|| entry.default_value()) + .unwrap(); + + Ok(value) +} + +async fn try_fetch<'atblock, T: Config, Client: OnlineClientT, Addr: Address>( + entry: &pezkuwi_subxt_core::storage::StorageEntry<'atblock, Addr>, + client: &Client, + block_hash: HashFor, + key_parts: Addr::KeyParts, +) -> Result>, StorageError> { + let key = entry.fetch_key(key_parts)?; + + let value = client + .backend() + .storage_fetch_value(key, block_hash) + .await + .map_err(StorageError::CannotFetchValue)? + .map(|bytes| entry.value(bytes)) + .or_else(|| entry.default_value()); + + Ok(value) +} + +async fn iter< + 'atblock, + T: Config, + Client: OnlineClientT, + Addr: Address, + KeyParts: PrefixOf, +>( + entry: pezkuwi_subxt_core::storage::StorageEntry<'atblock, Addr>, + client: &Client, + block_hash: HashFor, + key_parts: KeyParts, +) -> Result< + impl futures::Stream, StorageError>> + + use<'atblock, Addr, Client, T, KeyParts>, + StorageError, +> { + let key_bytes = entry.iter_key(key_parts)?; + + let stream = client + .backend() + .storage_fetch_descendant_values(key_bytes, block_hash) + .await + .map_err(StorageError::CannotIterateValues)? + .map(move |kv| { + let kv = match kv { + Ok(kv) => kv, + Err(e) => return Err(StorageError::StreamFailure(e)), + }; + Ok(entry.key_value(kv.key, kv.value)) + }); + + Ok(Box::pin(stream)) +} diff --git a/vendor/pezkuwi-subxt/subxt/src/tx/mod.rs b/vendor/pezkuwi-subxt/subxt/src/tx/mod.rs new file mode 100644 index 00000000..8faed79c --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/tx/mod.rs @@ -0,0 +1,23 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Create and submit extrinsics. +//! +//! An extrinsic is submitted with an "signed extra" and "additional" parameters, which can be +//! different for each chain. The trait [`crate::config::ExtrinsicParams`] determines exactly which +//! additional and signed extra parameters are used when constructing an extrinsic, and is a part +//! of the chain configuration (see [`crate::config::Config`]). + +mod tx_client; +mod tx_progress; + +pub use pezkuwi_subxt_core::tx::{ + payload::{DefaultPayload, DynamicPayload, Payload, dynamic}, + signer::{self, Signer}, +}; +pub use tx_client::{ + DefaultParams, PartialTransaction, SubmittableTransaction, TransactionInvalid, + TransactionUnknown, TxClient, ValidationResult, +}; +pub use tx_progress::{TxInBlock, TxProgress, TxStatus}; diff --git a/vendor/pezkuwi-subxt/subxt/src/tx/tx_client.rs b/vendor/pezkuwi-subxt/subxt/src/tx/tx_client.rs new file mode 100644 index 00000000..4718f131 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/tx/tx_client.rs @@ -0,0 +1,950 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::{ + backend::{BackendExt, BlockRef, TransactionStatus}, + client::{OfflineClientT, OnlineClientT}, + config::{Config, ExtrinsicParams, HashFor, Header}, + error::{ExtrinsicError, TransactionStatusError}, + tx::{Payload, Signer as SignerT, TxProgress}, + utils::PhantomDataSendSync, +}; +use codec::{Compact, Decode, Encode}; +use derive_where::derive_where; +use futures::future::{TryFutureExt, try_join}; +use pezkuwi_subxt_core::tx::TransactionVersion; + +/// A client for working with transactions. +#[derive_where(Clone; Client)] +pub struct TxClient { + client: Client, + _marker: PhantomDataSendSync, +} + +impl TxClient { + /// Create a new [`TxClient`] + pub fn new(client: Client) -> Self { + Self { client, _marker: PhantomDataSendSync::new() } + } +} + +impl> TxClient { + /// Run the validation logic against some transaction you'd like to submit. Returns `Ok(())` + /// if the call is valid (or if it's not possible to check since the call has no validation + /// hash). Return an error if the call was not valid or something went wrong trying to validate + /// it (ie the pallet or call in question do not exist at all). + pub fn validate(&self, call: &Call) -> Result<(), ExtrinsicError> + where + Call: Payload, + { + pezkuwi_subxt_core::tx::validate(call, &self.client.metadata()).map_err(Into::into) + } + + /// Return the SCALE encoded bytes representing the call data of the transaction. + pub fn call_data(&self, call: &Call) -> Result, ExtrinsicError> + where + Call: Payload, + { + pezkuwi_subxt_core::tx::call_data(call, &self.client.metadata()).map_err(Into::into) + } + + /// Creates an unsigned transaction without submitting it. Depending on the metadata, we might + /// end up constructing either a v4 or v5 transaction. See [`Self::create_v4_unsigned`] or + /// [`Self::create_v5_bare`] if you'd like to explicitly create an unsigned transaction of a + /// certain version. + pub fn create_unsigned( + &self, + call: &Call, + ) -> Result, ExtrinsicError> + where + Call: Payload, + { + let metadata = self.client.metadata(); + let tx = match pezkuwi_subxt_core::tx::suggested_version(&metadata)? { + TransactionVersion::V4 => pezkuwi_subxt_core::tx::create_v4_unsigned(call, &metadata), + TransactionVersion::V5 => pezkuwi_subxt_core::tx::create_v5_bare(call, &metadata), + }?; + + Ok(SubmittableTransaction { client: self.client.clone(), inner: tx }) + } + + /// Creates a v4 unsigned (no signature or transaction extensions) transaction without + /// submitting it. + /// + /// Prefer [`Self::create_unsigned()`] if you don't know which version to create; this will pick + /// the most suitable one for the given chain. + pub fn create_v4_unsigned( + &self, + call: &Call, + ) -> Result, ExtrinsicError> + where + Call: Payload, + { + let metadata = self.client.metadata(); + let tx = pezkuwi_subxt_core::tx::create_v4_unsigned(call, &metadata)?; + + Ok(SubmittableTransaction { client: self.client.clone(), inner: tx }) + } + + /// Creates a v5 "bare" (no signature or transaction extensions) transaction without submitting + /// it. + /// + /// Prefer [`Self::create_unsigned()`] if you don't know which version to create; this will pick + /// the most suitable one for the given chain. + pub fn create_v5_bare( + &self, + call: &Call, + ) -> Result, ExtrinsicError> + where + Call: Payload, + { + let metadata = self.client.metadata(); + let tx = pezkuwi_subxt_core::tx::create_v5_bare(call, &metadata)?; + + Ok(SubmittableTransaction { client: self.client.clone(), inner: tx }) + } + + /// Create a partial transaction. Depending on the metadata, we might end up constructing either + /// a v4 or v5 transaction. See [`pezkuwi_subxt_core::tx`] if you'd like to manually pick the + /// version to construct + /// + /// Note: if not provided, the default account nonce will be set to 0 and the default mortality + /// will be _immortal_. This is because this method runs offline, and so is unable to fetch the + /// data needed for more appropriate values. + pub fn create_partial_offline( + &self, + call: &Call, + params: >::Params, + ) -> Result, ExtrinsicError> + where + Call: Payload, + { + let metadata = self.client.metadata(); + let tx = match pezkuwi_subxt_core::tx::suggested_version(&metadata)? { + TransactionVersion::V4 => + PartialTransactionInner::V4(pezkuwi_subxt_core::tx::create_v4_signed( + call, + &self.client.client_state(), + params, + )?), + TransactionVersion::V5 => + PartialTransactionInner::V5(pezkuwi_subxt_core::tx::create_v5_general( + call, + &self.client.client_state(), + params, + )?), + }; + + Ok(PartialTransaction { client: self.client.clone(), inner: tx }) + } + + /// Create a v4 partial transaction, ready to sign. + /// + /// Note: if not provided, the default account nonce will be set to 0 and the default mortality + /// will be _immortal_. This is because this method runs offline, and so is unable to fetch the + /// data needed for more appropriate values. + /// + /// Prefer [`Self::create_partial_offline()`] if you don't know which version to create; this + /// will pick the most suitable one for the given chain. + pub fn create_v4_partial_offline( + &self, + call: &Call, + params: >::Params, + ) -> Result, ExtrinsicError> + where + Call: Payload, + { + let tx = PartialTransactionInner::V4(pezkuwi_subxt_core::tx::create_v4_signed( + call, + &self.client.client_state(), + params, + )?); + + Ok(PartialTransaction { client: self.client.clone(), inner: tx }) + } + + /// Create a v5 partial transaction, ready to sign. + /// + /// Note: if not provided, the default account nonce will be set to 0 and the default mortality + /// will be _immortal_. This is because this method runs offline, and so is unable to fetch the + /// data needed for more appropriate values. + /// + /// Prefer [`Self::create_partial_offline()`] if you don't know which version to create; this + /// will pick the most suitable one for the given chain. + pub fn create_v5_partial_offline( + &self, + call: &Call, + params: >::Params, + ) -> Result, ExtrinsicError> + where + Call: Payload, + { + let tx = PartialTransactionInner::V5(pezkuwi_subxt_core::tx::create_v5_general( + call, + &self.client.client_state(), + params, + )?); + + Ok(PartialTransaction { client: self.client.clone(), inner: tx }) + } +} + +impl TxClient +where + T: Config, + C: OnlineClientT, +{ + /// Get the account nonce for a given account ID. + pub async fn account_nonce(&self, account_id: &T::AccountId) -> Result { + let block_ref = self + .client + .backend() + .latest_finalized_block_ref() + .await + .map_err(ExtrinsicError::CannotGetLatestFinalizedBlock)?; + + crate::blocks::get_account_nonce(&self.client, account_id, block_ref.hash()) + .await + .map_err(|e| ExtrinsicError::AccountNonceError { + block_hash: block_ref.hash().into(), + account_id: account_id.encode().into(), + reason: e, + }) + } + + /// Creates a partial transaction, without submitting it. This can then be signed and submitted. + pub async fn create_partial( + &self, + call: &Call, + account_id: &T::AccountId, + mut params: >::Params, + ) -> Result, ExtrinsicError> + where + Call: Payload, + { + inject_account_nonce_and_block(&self.client, account_id, &mut params).await?; + self.create_partial_offline(call, params) + } + + /// Creates a partial V4 transaction, without submitting it. This can then be signed and + /// submitted. + /// + /// Prefer [`Self::create_partial()`] if you don't know which version to create; this will pick + /// the most suitable one for the given chain. + pub async fn create_v4_partial( + &self, + call: &Call, + account_id: &T::AccountId, + mut params: >::Params, + ) -> Result, ExtrinsicError> + where + Call: Payload, + { + inject_account_nonce_and_block(&self.client, account_id, &mut params).await?; + self.create_v4_partial_offline(call, params) + } + + /// Creates a partial V5 transaction, without submitting it. This can then be signed and + /// submitted. + /// + /// Prefer [`Self::create_partial()`] if you don't know which version to create; this will pick + /// the most suitable one for the given chain. + pub async fn create_v5_partial( + &self, + call: &Call, + account_id: &T::AccountId, + mut params: >::Params, + ) -> Result, ExtrinsicError> + where + Call: Payload, + { + inject_account_nonce_and_block(&self.client, account_id, &mut params).await?; + self.create_v5_partial_offline(call, params) + } + + /// Creates a signed transaction, without submitting it. + pub async fn create_signed( + &mut self, + call: &Call, + signer: &Signer, + params: >::Params, + ) -> Result, ExtrinsicError> + where + Call: Payload, + Signer: SignerT, + { + let mut partial = self.create_partial(call, &signer.account_id(), params).await?; + + Ok(partial.sign(signer)) + } + + /// Creates and signs an transaction and submits it to the chain. Passes default parameters + /// to construct the "signed extra" and "additional" payloads needed by the transaction. + /// + /// Returns a [`TxProgress`], which can be used to track the status of the transaction + /// and obtain details about it, once it has made it into a block. + pub async fn sign_and_submit_then_watch_default( + &mut self, + call: &Call, + signer: &Signer, + ) -> Result, ExtrinsicError> + where + Call: Payload, + Signer: SignerT, + >::Params: DefaultParams, + { + self.sign_and_submit_then_watch(call, signer, DefaultParams::default_params()) + .await + } + + /// Creates and signs an transaction and submits it to the chain. + /// + /// Returns a [`TxProgress`], which can be used to track the status of the transaction + /// and obtain details about it, once it has made it into a block. + pub async fn sign_and_submit_then_watch( + &mut self, + call: &Call, + signer: &Signer, + params: >::Params, + ) -> Result, ExtrinsicError> + where + Call: Payload, + Signer: SignerT, + { + self.create_signed(call, signer, params).await?.submit_and_watch().await + } + + /// Creates and signs an transaction and submits to the chain for block inclusion. Passes + /// default parameters to construct the "signed extra" and "additional" payloads needed + /// by the transaction. + /// + /// Returns `Ok` with the transaction hash if it is valid transaction. + /// + /// # Note + /// + /// Success does not mean the transaction has been included in the block, just that it is valid + /// and has been included in the transaction pool. + pub async fn sign_and_submit_default( + &mut self, + call: &Call, + signer: &Signer, + ) -> Result, ExtrinsicError> + where + Call: Payload, + Signer: SignerT, + >::Params: DefaultParams, + { + self.sign_and_submit(call, signer, DefaultParams::default_params()).await + } + + /// Creates and signs an transaction and submits to the chain for block inclusion. + /// + /// Returns `Ok` with the transaction hash if it is valid transaction. + /// + /// # Note + /// + /// Success does not mean the transaction has been included in the block, just that it is valid + /// and has been included in the transaction pool. + pub async fn sign_and_submit( + &mut self, + call: &Call, + signer: &Signer, + params: >::Params, + ) -> Result, ExtrinsicError> + where + Call: Payload, + Signer: SignerT, + { + self.create_signed(call, signer, params).await?.submit().await + } +} + +/// This payload contains the information needed to produce an transaction. +pub struct PartialTransaction { + client: C, + inner: PartialTransactionInner, +} + +enum PartialTransactionInner { + V4(pezkuwi_subxt_core::tx::PartialTransactionV4), + V5(pezkuwi_subxt_core::tx::PartialTransactionV5), +} + +impl PartialTransaction +where + T: Config, + C: OfflineClientT, +{ + /// Return the signer payload for this transaction. These are the bytes that must + /// be signed in order to produce a valid signature for the transaction. + pub fn signer_payload(&self) -> Vec { + match &self.inner { + PartialTransactionInner::V4(tx) => tx.signer_payload(), + PartialTransactionInner::V5(tx) => tx.signer_payload().to_vec(), + } + } + + /// Return the bytes representing the call data for this partially constructed + /// transaction. + pub fn call_data(&self) -> &[u8] { + match &self.inner { + PartialTransactionInner::V4(tx) => tx.call_data(), + PartialTransactionInner::V5(tx) => tx.call_data(), + } + } + + /// Convert this [`PartialTransaction`] into a [`SubmittableTransaction`], ready to submit. + /// The provided `signer` is responsible for providing the "from" address for the transaction, + /// as well as providing a signature to attach to it. + pub fn sign(&mut self, signer: &Signer) -> SubmittableTransaction + where + Signer: SignerT, + { + let tx = match &mut self.inner { + PartialTransactionInner::V4(tx) => tx.sign(signer), + PartialTransactionInner::V5(tx) => tx.sign(signer), + }; + + SubmittableTransaction { client: self.client.clone(), inner: tx } + } + + /// Convert this [`PartialTransaction`] into a [`SubmittableTransaction`], ready to submit. + /// An address, and something representing a signature that can be SCALE encoded, are both + /// needed in order to construct it. If you have a `Signer` to hand, you can use + /// [`PartialTransaction::sign()`] instead. + pub fn sign_with_account_and_signature( + &mut self, + account_id: &T::AccountId, + signature: &T::Signature, + ) -> SubmittableTransaction { + let tx = match &mut self.inner { + PartialTransactionInner::V4(tx) => + tx.sign_with_account_and_signature(account_id.clone(), signature), + PartialTransactionInner::V5(tx) => + tx.sign_with_account_and_signature(account_id, signature), + }; + + SubmittableTransaction { client: self.client.clone(), inner: tx } + } +} + +/// This represents an transaction that has been signed and is ready to submit. +pub struct SubmittableTransaction { + client: C, + inner: pezkuwi_subxt_core::tx::Transaction, +} + +impl SubmittableTransaction +where + T: Config, + C: OfflineClientT, +{ + /// Create a [`SubmittableTransaction`] from some already-signed and prepared + /// transaction bytes, and some client (anything implementing [`OfflineClientT`] + /// or [`OnlineClientT`]). + /// + /// Prefer to use [`TxClient`] to create and sign transactions. This is simply + /// exposed in case you want to skip this process and submit something you've + /// already created. + pub fn from_bytes(client: C, tx_bytes: Vec) -> Self { + Self { client, inner: pezkuwi_subxt_core::tx::Transaction::from_bytes(tx_bytes) } + } + + /// Calculate and return the hash of the transaction, based on the configured hasher. + pub fn hash(&self) -> HashFor { + self.inner.hash_with(self.client.hasher()) + } + + /// Returns the SCALE encoded transaction bytes. + pub fn encoded(&self) -> &[u8] { + self.inner.encoded() + } + + /// Consumes [`SubmittableTransaction`] and returns the SCALE encoded + /// transaction bytes. + pub fn into_encoded(self) -> Vec { + self.inner.into_encoded() + } +} + +impl SubmittableTransaction +where + T: Config, + C: OnlineClientT, +{ + /// Submits the transaction to the chain. + /// + /// Returns a [`TxProgress`], which can be used to track the status of the transaction + /// and obtain details about it, once it has made it into a block. + pub async fn submit_and_watch(&self) -> Result, ExtrinsicError> { + // Get a hash of the transaction (we'll need this later). + let ext_hash = self.hash(); + + // Submit and watch for transaction progress. + let sub = self + .client + .backend() + .submit_transaction(self.encoded()) + .await + .map_err(ExtrinsicError::ErrorSubmittingTransaction)?; + + Ok(TxProgress::new(sub, self.client.clone(), ext_hash)) + } + + /// Submits the transaction to the chain for block inclusion. + /// + /// It's usually better to call `submit_and_watch` to get an idea of the progress of the + /// submission and whether it's eventually successful or not. This call does not guarantee + /// success, and is just sending the transaction to the chain. + pub async fn submit(&self) -> Result, ExtrinsicError> { + let ext_hash = self.hash(); + let mut sub = self + .client + .backend() + .submit_transaction(self.encoded()) + .await + .map_err(ExtrinsicError::ErrorSubmittingTransaction)?; + + // If we get a bad status or error back straight away then error, else return the hash. + match sub.next().await { + Some(Ok(status)) => match status { + TransactionStatus::Validated | + TransactionStatus::Broadcasted | + TransactionStatus::InBestBlock { .. } | + TransactionStatus::NoLongerInBestBlock | + TransactionStatus::InFinalizedBlock { .. } => Ok(ext_hash), + TransactionStatus::Error { message } => Err( + ExtrinsicError::TransactionStatusError(TransactionStatusError::Error(message)), + ), + TransactionStatus::Invalid { message } => + Err(ExtrinsicError::TransactionStatusError(TransactionStatusError::Invalid( + message, + ))), + TransactionStatus::Dropped { message } => + Err(ExtrinsicError::TransactionStatusError(TransactionStatusError::Dropped( + message, + ))), + }, + Some(Err(e)) => Err(ExtrinsicError::TransactionStatusStreamError(e)), + None => Err(ExtrinsicError::UnexpectedEndOfTransactionStatusStream), + } + } + + /// Validate a transaction by submitting it to the relevant Runtime API. A transaction that is + /// valid can be added to a block, but may still end up in an error state. + /// + /// Returns `Ok` with a [`ValidationResult`], which is the result of attempting to dry run the + /// transaction. + pub async fn validate(&self) -> Result { + let latest_block_ref = self + .client + .backend() + .latest_finalized_block_ref() + .await + .map_err(ExtrinsicError::CannotGetLatestFinalizedBlock)?; + self.validate_at(latest_block_ref).await + } + + /// Validate a transaction by submitting it to the relevant Runtime API. A transaction that is + /// valid can be added to a block, but may still end up in an error state. + /// + /// Returns `Ok` with a [`ValidationResult`], which is the result of attempting to dry run the + /// transaction. + pub async fn validate_at( + &self, + at: impl Into>>, + ) -> Result { + let block_hash = at.into().hash(); + + // Approach taken from https://github.com/pezkuwichain/json-rpc-interface-spec/issues/55. + let mut params = Vec::with_capacity(8 + self.encoded().len() + 8); + 2u8.encode_to(&mut params); + params.extend(self.encoded().iter()); + block_hash.encode_to(&mut params); + + let res: Vec = self + .client + .backend() + .call("TaggedTransactionQueue_validate_transaction", Some(¶ms), block_hash) + .await + .map_err(ExtrinsicError::CannotGetValidationInfo)?; + + ValidationResult::try_from_bytes(res) + } + + /// This returns an estimate for what the transaction is expected to cost to execute, less any + /// tips. The actual amount paid can vary from block to block based on node traffic and other + /// factors. + pub async fn partial_fee_estimate(&self) -> Result { + let mut params = self.encoded().to_vec(); + (self.encoded().len() as u32).encode_to(&mut params); + let latest_block_ref = self + .client + .backend() + .latest_finalized_block_ref() + .await + .map_err(ExtrinsicError::CannotGetLatestFinalizedBlock)?; + + // destructuring RuntimeDispatchInfo, see type information + // data layout: {weight_ref_time: Compact, weight_proof_size: Compact, class: u8, + // partial_fee: u128} + let (_, _, _, partial_fee) = self + .client + .backend() + .call_decoding::<(Compact, Compact, u8, u128)>( + "TransactionPaymentApi_query_info", + Some(¶ms), + latest_block_ref.hash(), + ) + .await + .map_err(ExtrinsicError::CannotGetFeeInfo)?; + + Ok(partial_fee) + } +} + +/// Fetch the latest block header and account nonce from the backend and use them to refine +/// [`ExtrinsicParams::Params`]. +async fn inject_account_nonce_and_block>( + client: &Client, + account_id: &T::AccountId, + params: &mut >::Params, +) -> Result<(), ExtrinsicError> { + use pezkuwi_subxt_core::config::transaction_extensions::Params; + + let block_ref = client + .backend() + .latest_finalized_block_ref() + .await + .map_err(ExtrinsicError::CannotGetLatestFinalizedBlock)?; + + let (block_header, account_nonce) = try_join( + client + .backend() + .block_header(block_ref.hash()) + .map_err(ExtrinsicError::CannotGetLatestFinalizedBlock), + crate::blocks::get_account_nonce(client, account_id, block_ref.hash()).map_err(|e| { + ExtrinsicError::AccountNonceError { + block_hash: block_ref.hash().into(), + account_id: account_id.encode().into(), + reason: e, + } + }), + ) + .await?; + + let block_header = block_header.ok_or_else(|| ExtrinsicError::CannotFindBlockHeader { + block_hash: block_ref.hash().into(), + })?; + + params.inject_account_nonce(account_nonce); + params.inject_block(block_header.number().into(), block_ref.hash()); + + Ok(()) +} + +impl ValidationResult { + #[allow(clippy::get_first)] + fn try_from_bytes(bytes: Vec) -> Result { + // TaggedTransactionQueue_validate_transaction returns this: + // https://github.com/pezkuwichain/bizinikiwi/blob/0cdf7029017b70b7c83c21a4dc0aa1020e7914f6/primitives/runtime/src/transaction_validity.rs#L210 + // We copy some of the inner types and put the three states (valid, invalid, unknown) into + // one enum, because from our perspective, the call was successful regardless. + if bytes.get(0) == Some(&0) { + // ok: valid. Decode but, for now we discard most of the information + let res = TransactionValid::decode(&mut &bytes[1..]) + .map_err(ExtrinsicError::CannotDecodeValidationResult)?; + Ok(ValidationResult::Valid(res)) + } else if bytes.get(0) == Some(&1) && bytes.get(1) == Some(&0) { + // error: invalid + let res = TransactionInvalid::decode(&mut &bytes[2..]) + .map_err(ExtrinsicError::CannotDecodeValidationResult)?; + Ok(ValidationResult::Invalid(res)) + } else if bytes.get(0) == Some(&1) && bytes.get(1) == Some(&1) { + // error: unknown + let res = TransactionUnknown::decode(&mut &bytes[2..]) + .map_err(ExtrinsicError::CannotDecodeValidationResult)?; + Ok(ValidationResult::Unknown(res)) + } else { + // unable to decode the bytes; they aren't what we expect. + Err(ExtrinsicError::UnexpectedValidationResultBytes(bytes)) + } + } +} + +/// The result of performing [`SubmittableTransaction::validate()`]. +#[derive(Clone, Debug, PartialEq)] +pub enum ValidationResult { + /// The transaction is valid + Valid(TransactionValid), + /// The transaction is invalid + Invalid(TransactionInvalid), + /// Unable to validate the transaction + Unknown(TransactionUnknown), +} + +impl ValidationResult { + /// Is the transaction valid. + pub fn is_valid(&self) -> bool { + matches!(self, ValidationResult::Valid(_)) + } +} + +/// Transaction is valid; here is some more information about it. +#[derive(Decode, Clone, Debug, PartialEq)] +pub struct TransactionValid { + /// Priority of the transaction. + /// + /// Priority determines the ordering of two transactions that have all + /// their dependencies (required tags) satisfied. + pub priority: u64, + /// Transaction dependencies + /// + /// A non-empty list signifies that some other transactions which provide + /// given tags are required to be included before that one. + pub requires: Vec>, + /// Provided tags + /// + /// A list of tags this transaction provides. Successfully importing the transaction + /// will enable other transactions that depend on (require) those tags to be included as well. + /// Provided and required tags allow Bizinikiwi to build a dependency graph of transactions + /// and import them in the right (linear) order. + pub provides: Vec>, + /// Transaction longevity + /// + /// Longevity describes minimum number of blocks the validity is correct. + /// After this period transaction should be removed from the pool or revalidated. + pub longevity: u64, + /// A flag indicating if the transaction should be propagated to other peers. + /// + /// By setting `false` here the transaction will still be considered for + /// including in blocks that are authored on the current node, but will + /// never be sent to other peers. + pub propagate: bool, +} + +/// The runtime was unable to validate the transaction. +#[derive(Decode, Clone, Debug, PartialEq)] +pub enum TransactionUnknown { + /// Could not lookup some information that is required to validate the transaction. + CannotLookup, + /// No validator found for the given unsigned transaction. + NoUnsignedValidator, + /// Any other custom unknown validity that is not covered by this enum. + Custom(u8), +} + +/// The transaction is invalid. +#[derive(Decode, Clone, Debug, PartialEq)] +pub enum TransactionInvalid { + /// The call of the transaction is not expected. + Call, + /// General error to do with the inability to pay some fees (e.g. account balance too low). + Payment, + /// General error to do with the transaction not yet being valid (e.g. nonce too high). + Future, + /// General error to do with the transaction being outdated (e.g. nonce too low). + Stale, + /// General error to do with the transaction's proofs (e.g. signature). + /// + /// # Possible causes + /// + /// When using a signed extension that provides additional data for signing, it is required + /// that the signing and the verifying side use the same additional data. Additional + /// data will only be used to generate the signature, but will not be part of the transaction + /// itself. As the verifying side does not know which additional data was used while signing + /// it will only be able to assume a bad signature and cannot express a more meaningful error. + BadProof, + /// The transaction birth block is ancient. + /// + /// # Possible causes + /// + /// For `FRAME`-based runtimes this would be caused by `current block number` + /// - Era::birth block number > BlockHashCount`. (e.g. in Pezkuwi `BlockHashCount` = 2400, so a + /// transaction with birth block number 1337 would be valid up until block number 1337 + 2400, + /// after which point the transaction would be considered to have an ancient birth block.) + AncientBirthBlock, + /// The transaction would exhaust the resources of current block. + /// + /// The transaction might be valid, but there are not enough resources + /// left in the current block. + ExhaustsResources, + /// Any other custom invalid validity that is not covered by this enum. + Custom(u8), + /// An transaction with a Mandatory dispatch resulted in Error. This is indicative of either a + /// malicious validator or a buggy `provide_inherent`. In any case, it can result in + /// dangerously overweight blocks and therefore if found, invalidates the block. + BadMandatory, + /// An transaction with a mandatory dispatch tried to be validated. + /// This is invalid; only inherent transactions are allowed to have mandatory dispatches. + MandatoryValidation, + /// The sending address is disabled or known to be invalid. + BadSigner, +} + +/// This trait is used to create default values for extrinsic params. We use this instead of +/// [`Default`] because we want to be able to support params which are tuples of more than 12 +/// entries (which is the maximum tuple size Rust currently implements [`Default`] for on tuples), +/// given that we aren't far off having more than 12 transaction extensions already. +/// +/// If you have params which are _not_ a tuple and which you'd like to be instantiated automatically +/// when calling [`TxClient::sign_and_submit_default()`] or +/// [`TxClient::sign_and_submit_then_watch_default()`], then you'll need to implement this trait for +/// them. +pub trait DefaultParams: Sized { + /// Instantiate a default instance of the parameters. + fn default_params() -> Self; +} + +impl DefaultParams for [P; N] { + fn default_params() -> Self { + core::array::from_fn(|_| P::default()) + } +} + +macro_rules! impl_default_params_for_tuple { + ($($ident:ident),+) => { + impl <$($ident : Default),+> DefaultParams for ($($ident,)+){ + fn default_params() -> Self { + ( + $($ident::default(),)+ + ) + } + } + } +} + +#[rustfmt::skip] +const _: () = { + impl_default_params_for_tuple!(A); + impl_default_params_for_tuple!(A, B); + impl_default_params_for_tuple!(A, B, C); + impl_default_params_for_tuple!(A, B, C, D); + impl_default_params_for_tuple!(A, B, C, D, E); + impl_default_params_for_tuple!(A, B, C, D, E, F); + impl_default_params_for_tuple!(A, B, C, D, E, F, G); + impl_default_params_for_tuple!(A, B, C, D, E, F, G, H); + impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I); + impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J); + impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K); + impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L); + impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M); + impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N); + impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O); + impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P); + impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q); + impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R); + impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S); + impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T); + impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U); + impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V); + impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W); + impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X); + impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y); + impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z); +}; + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn transaction_validity_decoding_empty_bytes() { + // No panic should occur decoding empty bytes. + let decoded = ValidationResult::try_from_bytes(vec![]); + assert!(decoded.is_err()) + } + + #[test] + fn transaction_validity_decoding_is_ok() { + use sp_runtime::{ + transaction_validity as sp, transaction_validity::TransactionValidity as T, + }; + + let pairs = vec![ + ( + T::Ok(sp::ValidTransaction { ..Default::default() }), + ValidationResult::Valid(TransactionValid { + // By default, tx is immortal + longevity: u64::MAX, + // Default is true + propagate: true, + priority: 0, + provides: vec![], + requires: vec![], + }), + ), + ( + T::Err(sp::TransactionValidityError::Invalid(sp::InvalidTransaction::BadProof)), + ValidationResult::Invalid(TransactionInvalid::BadProof), + ), + ( + T::Err(sp::TransactionValidityError::Invalid(sp::InvalidTransaction::Call)), + ValidationResult::Invalid(TransactionInvalid::Call), + ), + ( + T::Err(sp::TransactionValidityError::Invalid(sp::InvalidTransaction::Payment)), + ValidationResult::Invalid(TransactionInvalid::Payment), + ), + ( + T::Err(sp::TransactionValidityError::Invalid(sp::InvalidTransaction::Future)), + ValidationResult::Invalid(TransactionInvalid::Future), + ), + ( + T::Err(sp::TransactionValidityError::Invalid(sp::InvalidTransaction::Stale)), + ValidationResult::Invalid(TransactionInvalid::Stale), + ), + ( + T::Err(sp::TransactionValidityError::Invalid( + sp::InvalidTransaction::AncientBirthBlock, + )), + ValidationResult::Invalid(TransactionInvalid::AncientBirthBlock), + ), + ( + T::Err(sp::TransactionValidityError::Invalid( + sp::InvalidTransaction::ExhaustsResources, + )), + ValidationResult::Invalid(TransactionInvalid::ExhaustsResources), + ), + ( + T::Err(sp::TransactionValidityError::Invalid(sp::InvalidTransaction::BadMandatory)), + ValidationResult::Invalid(TransactionInvalid::BadMandatory), + ), + ( + T::Err(sp::TransactionValidityError::Invalid( + sp::InvalidTransaction::MandatoryValidation, + )), + ValidationResult::Invalid(TransactionInvalid::MandatoryValidation), + ), + ( + T::Err(sp::TransactionValidityError::Invalid(sp::InvalidTransaction::BadSigner)), + ValidationResult::Invalid(TransactionInvalid::BadSigner), + ), + ( + T::Err(sp::TransactionValidityError::Invalid(sp::InvalidTransaction::Custom(123))), + ValidationResult::Invalid(TransactionInvalid::Custom(123)), + ), + ( + T::Err(sp::TransactionValidityError::Unknown(sp::UnknownTransaction::CannotLookup)), + ValidationResult::Unknown(TransactionUnknown::CannotLookup), + ), + ( + T::Err(sp::TransactionValidityError::Unknown( + sp::UnknownTransaction::NoUnsignedValidator, + )), + ValidationResult::Unknown(TransactionUnknown::NoUnsignedValidator), + ), + ( + T::Err(sp::TransactionValidityError::Unknown(sp::UnknownTransaction::Custom(123))), + ValidationResult::Unknown(TransactionUnknown::Custom(123)), + ), + ]; + + for (sp, validation_result) in pairs { + let encoded = sp.encode(); + let decoded = ValidationResult::try_from_bytes(encoded).expect("should decode OK"); + assert_eq!(decoded, validation_result); + } + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/tx/tx_progress.rs b/vendor/pezkuwi-subxt/subxt/src/tx/tx_progress.rs new file mode 100644 index 00000000..088ab6ec --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/tx/tx_progress.rs @@ -0,0 +1,445 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Types representing extrinsics/transactions that have been submitted to a node. + +use std::task::Poll; + +use crate::{ + backend::{BlockRef, StreamOfResults, TransactionStatus as BackendTxStatus}, + client::OnlineClientT, + config::{Config, HashFor}, + error::{ + DispatchError, TransactionEventsError, TransactionFinalizedSuccessError, + TransactionProgressError, TransactionStatusError, + }, + events::EventsClient, + utils::strip_compact_prefix, +}; +use derive_where::derive_where; +use futures::{Stream, StreamExt}; + +/// This struct represents a subscription to the progress of some transaction. +pub struct TxProgress { + sub: Option>>>, + ext_hash: HashFor, + client: C, +} + +impl std::fmt::Debug for TxProgress { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TxProgress") + .field("sub", &"") + .field("ext_hash", &self.ext_hash) + .field("client", &"") + .finish() + } +} + +// The above type is not `Unpin` by default unless the generic param `T` is, +// so we manually make it clear that Unpin is actually fine regardless of `T` +// (we don't care if this moves around in memory while it's "pinned"). +impl Unpin for TxProgress {} + +impl TxProgress { + /// Instantiate a new [`TxProgress`] from a custom subscription. + pub fn new( + sub: StreamOfResults>>, + client: C, + ext_hash: HashFor, + ) -> Self { + Self { sub: Some(sub), client, ext_hash } + } + + /// Return the hash of the extrinsic. + pub fn extrinsic_hash(&self) -> HashFor { + self.ext_hash + } +} + +impl TxProgress +where + T: Config, + C: OnlineClientT, +{ + /// Return the next transaction status when it's emitted. This just delegates to the + /// [`futures::Stream`] implementation for [`TxProgress`], but allows you to + /// avoid importing that trait if you don't otherwise need it. + pub async fn next(&mut self) -> Option, TransactionProgressError>> { + StreamExt::next(self).await + } + + /// Wait for the transaction to be finalized, and return a [`TxInBlock`] + /// instance when it is, or an error if there was a problem waiting for finalization. + /// + /// **Note:** consumes `self`. If you'd like to perform multiple actions as the state of the + /// transaction progresses, use [`TxProgress::next()`] instead. + /// + /// **Note:** transaction statuses like `Invalid`/`Usurped`/`Dropped` indicate with some + /// probability that the transaction will not make it into a block but there is no guarantee + /// that this is true. In those cases the stream is closed however, so you currently have no way + /// to find out if they finally made it into a block or not. + pub async fn wait_for_finalized(mut self) -> Result, TransactionProgressError> { + while let Some(status) = self.next().await { + match status? { + // Finalized! Return. + TxStatus::InFinalizedBlock(s) => return Ok(s), + // Error scenarios; return the error. + TxStatus::Error { message } => { + return Err(TransactionStatusError::Error(message).into()); + }, + TxStatus::Invalid { message } => { + return Err(TransactionStatusError::Invalid(message).into()); + }, + TxStatus::Dropped { message } => { + return Err(TransactionStatusError::Dropped(message).into()); + }, + // Ignore and wait for next status event: + _ => continue, + } + } + Err(TransactionProgressError::UnexpectedEndOfTransactionStatusStream) + } + + /// Wait for the transaction to be finalized, and for the transaction events to indicate + /// that the transaction was successful. Returns the events associated with the transaction, + /// as well as a couple of other details (block hash and extrinsic hash). + /// + /// **Note:** consumes self. If you'd like to perform multiple actions as progress is made, + /// use [`TxProgress::next()`] instead. + /// + /// **Note:** transaction statuses like `Invalid`/`Usurped`/`Dropped` indicate with some + /// probability that the transaction will not make it into a block but there is no guarantee + /// that this is true. In those cases the stream is closed however, so you currently have no way + /// to find out if they finally made it into a block or not. + pub async fn wait_for_finalized_success( + self, + ) -> Result, TransactionFinalizedSuccessError> { + let evs = self.wait_for_finalized().await?.wait_for_success().await?; + Ok(evs) + } +} + +impl Stream for TxProgress { + type Item = Result, TransactionProgressError>; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + let sub = match self.sub.as_mut() { + Some(sub) => sub, + None => return Poll::Ready(None), + }; + + sub.poll_next_unpin(cx) + .map_err(TransactionProgressError::CannotGetNextProgressUpdate) + .map_ok(|status| { + match status { + BackendTxStatus::Validated => TxStatus::Validated, + BackendTxStatus::Broadcasted => TxStatus::Broadcasted, + BackendTxStatus::NoLongerInBestBlock => TxStatus::NoLongerInBestBlock, + BackendTxStatus::InBestBlock { hash } => TxStatus::InBestBlock(TxInBlock::new( + hash, + self.ext_hash, + self.client.clone(), + )), + // These stream events mean that nothing further will be sent: + BackendTxStatus::InFinalizedBlock { hash } => { + self.sub = None; + TxStatus::InFinalizedBlock(TxInBlock::new( + hash, + self.ext_hash, + self.client.clone(), + )) + }, + BackendTxStatus::Error { message } => { + self.sub = None; + TxStatus::Error { message } + }, + BackendTxStatus::Invalid { message } => { + self.sub = None; + TxStatus::Invalid { message } + }, + BackendTxStatus::Dropped { message } => { + self.sub = None; + TxStatus::Dropped { message } + }, + } + }) + } +} + +/// Possible transaction statuses returned from our [`TxProgress::next()`] call. +#[derive_where(Debug; C)] +pub enum TxStatus { + /// Transaction is part of the future queue. + Validated, + /// The transaction has been broadcast to other nodes. + Broadcasted, + /// Transaction is no longer in a best block. + NoLongerInBestBlock, + /// Transaction has been included in block with given hash. + InBestBlock(TxInBlock), + /// Transaction has been finalized by a finality-gadget, e.g GRANDPA + InFinalizedBlock(TxInBlock), + /// Something went wrong in the node. + Error { + /// Human readable message; what went wrong. + message: String, + }, + /// Transaction is invalid (bad nonce, signature etc). + Invalid { + /// Human readable message; why was it invalid. + message: String, + }, + /// The transaction was dropped. + Dropped { + /// Human readable message; why was it dropped. + message: String, + }, +} + +impl TxStatus { + /// A convenience method to return the finalized details. Returns + /// [`None`] if the enum variant is not [`TxStatus::InFinalizedBlock`]. + pub fn as_finalized(&self) -> Option<&TxInBlock> { + match self { + Self::InFinalizedBlock(val) => Some(val), + _ => None, + } + } + + /// A convenience method to return the best block details. Returns + /// [`None`] if the enum variant is not [`TxStatus::InBestBlock`]. + pub fn as_in_block(&self) -> Option<&TxInBlock> { + match self { + Self::InBestBlock(val) => Some(val), + _ => None, + } + } +} + +/// This struct represents a transaction that has made it into a block. +#[derive_where(Debug; C)] +pub struct TxInBlock { + block_ref: BlockRef>, + ext_hash: HashFor, + client: C, +} + +impl TxInBlock { + pub(crate) fn new(block_ref: BlockRef>, ext_hash: HashFor, client: C) -> Self { + Self { block_ref, ext_hash, client } + } + + /// Return the hash of the block that the transaction has made it into. + pub fn block_hash(&self) -> HashFor { + self.block_ref.hash() + } + + /// Return the hash of the extrinsic that was submitted. + pub fn extrinsic_hash(&self) -> HashFor { + self.ext_hash + } +} + +impl> TxInBlock { + /// Fetch the events associated with this transaction. If the transaction + /// was successful (ie no `ExtrinsicFailed`) events were found, then we return + /// the events associated with it. If the transaction was not successful, or + /// something else went wrong, we return an error. + /// + /// **Note:** If multiple `ExtrinsicFailed` errors are returned (for instance + /// because a pallet chooses to emit one as an event, which is considered + /// abnormal behaviour), it is not specified which of the errors is returned here. + /// You can use [`TxInBlock::fetch_events`] instead if you'd like to + /// work with multiple "error" events. + /// + /// **Note:** This has to download block details from the node and decode events + /// from them. + pub async fn wait_for_success( + &self, + ) -> Result, TransactionEventsError> { + let events = self.fetch_events().await?; + + // Try to find any errors; return the first one we encounter. + for (ev_idx, ev) in events.iter().enumerate() { + let ev = ev.map_err(|e| TransactionEventsError::CannotDecodeEventInBlock { + event_index: ev_idx, + block_hash: self.block_hash().into(), + error: e, + })?; + + if ev.pallet_name() == "System" && ev.variant_name() == "ExtrinsicFailed" { + let dispatch_error = + DispatchError::decode_from(ev.field_bytes(), self.client.metadata()).map_err( + |e| TransactionEventsError::CannotDecodeDispatchError { + error: e, + bytes: ev.field_bytes().to_vec(), + }, + )?; + return Err(dispatch_error.into()); + } + } + + Ok(events) + } + + /// Fetch all of the events associated with this transaction. This succeeds whether + /// the transaction was a success or not; it's up to you to handle the error and + /// success events however you prefer. + /// + /// **Note:** This has to download block details from the node and decode events + /// from them. + pub async fn fetch_events( + &self, + ) -> Result, TransactionEventsError> { + let hasher = self.client.hasher(); + + let block_body = self + .client + .backend() + .block_body(self.block_ref.hash()) + .await + .map_err(|e| TransactionEventsError::CannotFetchBlockBody { + block_hash: self.block_hash().into(), + error: e, + })? + .ok_or_else(|| TransactionEventsError::BlockNotFound { + block_hash: self.block_hash().into(), + })?; + + let extrinsic_idx = block_body + .iter() + .position(|ext| { + use crate::config::Hasher; + let Ok((_, stripped)) = strip_compact_prefix(ext) else { + return false; + }; + let hash = hasher.hash_of(&stripped); + hash == self.ext_hash + }) + // If we successfully obtain the block hash we think contains our + // extrinsic, the extrinsic should be in there somewhere.. + .ok_or_else(|| TransactionEventsError::CannotFindTransactionInBlock { + block_hash: self.block_hash().into(), + transaction_hash: self.ext_hash.into(), + })?; + + let events = EventsClient::new(self.client.clone()) + .at(self.block_ref.clone()) + .await + .map_err(|e| TransactionEventsError::CannotFetchEventsForTransaction { + block_hash: self.block_hash().into(), + transaction_hash: self.ext_hash.into(), + error: e, + })?; + + Ok(crate::blocks::ExtrinsicEvents::new(self.ext_hash, extrinsic_idx as u32, events)) + } +} + +#[cfg(test)] +mod test { + use super::*; + use pezkuwi_subxt_core::client::RuntimeVersion; + + use crate::{ + BizinikiwConfig, + backend::{StreamOfResults, TransactionStatus}, + client::{OfflineClientT, OnlineClientT}, + config::{Config, HashFor}, + tx::TxProgress, + }; + + type MockTxProgress = TxProgress; + type MockHash = HashFor; + type MockBizinikiwiTxStatus = TransactionStatus; + + /// a mock client to satisfy trait bounds in tests + #[derive(Clone, Debug)] + struct MockClient; + + impl OfflineClientT for MockClient { + fn metadata(&self) -> crate::Metadata { + unimplemented!("just a mock impl to satisfy trait bounds") + } + + fn genesis_hash(&self) -> MockHash { + unimplemented!("just a mock impl to satisfy trait bounds") + } + + fn runtime_version(&self) -> RuntimeVersion { + unimplemented!("just a mock impl to satisfy trait bounds") + } + + fn hasher(&self) -> ::Hasher { + unimplemented!("just a mock impl to satisfy trait bounds") + } + + fn client_state(&self) -> pezkuwi_subxt_core::client::ClientState { + unimplemented!("just a mock impl to satisfy trait bounds") + } + } + + impl OnlineClientT for MockClient { + fn backend(&self) -> &dyn crate::backend::Backend { + unimplemented!("just a mock impl to satisfy trait bounds") + } + } + + #[tokio::test] + async fn wait_for_finalized_returns_err_when_error() { + let tx_progress = mock_tx_progress(vec![ + MockBizinikiwiTxStatus::Broadcasted, + MockBizinikiwiTxStatus::Error { message: "err".into() }, + ]); + let finalized_result = tx_progress.wait_for_finalized().await; + assert!(matches!( + finalized_result, + Err(TransactionProgressError::TransactionStatusError(TransactionStatusError::Error(e))) if e == "err" + )); + } + + #[tokio::test] + async fn wait_for_finalized_returns_err_when_invalid() { + let tx_progress = mock_tx_progress(vec![ + MockBizinikiwiTxStatus::Broadcasted, + MockBizinikiwiTxStatus::Invalid { message: "err".into() }, + ]); + let finalized_result = tx_progress.wait_for_finalized().await; + assert!(matches!( + finalized_result, + Err(TransactionProgressError::TransactionStatusError(TransactionStatusError::Invalid(e))) if e == "err" + )); + } + + #[tokio::test] + async fn wait_for_finalized_returns_err_when_dropped() { + let tx_progress = mock_tx_progress(vec![ + MockBizinikiwiTxStatus::Broadcasted, + MockBizinikiwiTxStatus::Dropped { message: "err".into() }, + ]); + let finalized_result = tx_progress.wait_for_finalized().await; + assert!(matches!( + finalized_result, + Err(TransactionProgressError::TransactionStatusError(TransactionStatusError::Dropped(e))) if e == "err" + )); + } + + fn mock_tx_progress(statuses: Vec) -> MockTxProgress { + let sub = create_bizinikiwi_tx_status_subscription(statuses); + TxProgress::new(sub, MockClient, Default::default()) + } + + fn create_bizinikiwi_tx_status_subscription( + elements: Vec, + ) -> StreamOfResults { + let results = elements.into_iter().map(Ok); + let stream = Box::pin(futures::stream::iter(results)); + let sub: StreamOfResults = StreamOfResults::new(stream); + sub + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/utils/fetch_chain_spec.rs b/vendor/pezkuwi-subxt/subxt/src/utils/fetch_chain_spec.rs new file mode 100644 index 00000000..45a5544e --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/utils/fetch_chain_spec.rs @@ -0,0 +1,112 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use serde_json::value::RawValue; + +/// Possible errors encountered trying to fetch a chain spec from an RPC node. +#[derive(thiserror::Error, Debug)] +#[allow(missing_docs)] +pub enum FetchChainspecError { + #[error("Cannot fetch chain spec: RPC error: {0}.")] + RpcError(String), + #[error("Cannot fetch chain spec: Invalid URL.")] + InvalidUrl, + #[error("Cannot fetch chain spec: Invalid URL scheme.")] + InvalidScheme, + #[error("Cannot fetch chain spec: Handshake error establishing WS connection.")] + HandshakeError, +} + +/// Fetch a chain spec from an RPC node at the given URL. +pub async fn fetch_chainspec_from_rpc_node( + url: impl AsRef, +) -> Result, FetchChainspecError> { + use jsonrpsee::core::client::{ClientT, SubscriptionClientT}; + use jsonrpsee::rpc_params; + + let client = jsonrpsee_helpers::client(url.as_ref()).await?; + + let result = client + .request("sync_state_genSyncSpec", jsonrpsee::rpc_params![true]) + .await + .map_err(|err| FetchChainspecError::RpcError(err.to_string()))?; + + // Subscribe to the finalized heads of the chain. + let mut subscription = SubscriptionClientT::subscribe::, _>( + &client, + "chain_subscribeFinalizedHeads", + rpc_params![], + "chain_unsubscribeFinalizedHeads", + ) + .await + .map_err(|err| FetchChainspecError::RpcError(err.to_string()))?; + + // We must ensure that the finalized block of the chain is not the block included + // in the chainSpec. + // This is a temporary workaround for: https://github.com/smol-dot/smoldot/issues/1562. + // The first finalized block that is received might by the finalized block could be the one + // included in the chainSpec. Decoding the chainSpec for this purpose is too complex. + let _ = subscription.next().await; + let _ = subscription.next().await; + + Ok(result) +} + +crate::macros::cfg_jsonrpsee_native! { + mod jsonrpsee_helpers { + use super::FetchChainspecError; + use tokio_util::compat::Compat; + + pub use jsonrpsee::{ + client_transport::ws::{self, EitherStream, Url, WsTransportClientBuilder}, + core::client::Client, + }; + + pub type Sender = ws::Sender>; + pub type Receiver = ws::Receiver>; + + /// Build WS RPC client from URL + pub async fn client(url: &str) -> Result { + let url = Url::parse(url).map_err(|_| FetchChainspecError::InvalidUrl)?; + + if url.scheme() != "ws" && url.scheme() != "wss" { + return Err(FetchChainspecError::InvalidScheme); + } + + let (sender, receiver) = ws_transport(url).await?; + + Ok(Client::builder() + .max_buffer_capacity_per_subscription(4096) + .build_with_tokio(sender, receiver)) + } + + async fn ws_transport(url: Url) -> Result<(Sender, Receiver), FetchChainspecError> { + WsTransportClientBuilder::default() + .build(url) + .await + .map_err(|_| FetchChainspecError::HandshakeError) + } + } +} + +crate::macros::cfg_jsonrpsee_web! { + mod jsonrpsee_helpers { + use super::FetchChainspecError; + pub use jsonrpsee::{ + client_transport::web, + core::client::{Client, ClientBuilder}, + }; + + /// Build web RPC client from URL + pub async fn client(url: &str) -> Result { + let (sender, receiver) = web::connect(url) + .await + .map_err(|_| FetchChainspecError::HandshakeError)?; + + Ok(ClientBuilder::default() + .max_buffer_capacity_per_subscription(4096) + .build_with_wasm(sender, receiver)) + } + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/utils/mod.rs b/vendor/pezkuwi-subxt/subxt/src/utils/mod.rs new file mode 100644 index 00000000..b788a783 --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/utils/mod.rs @@ -0,0 +1,18 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Miscellaneous utility helpers. + +pub use pezkuwi_subxt_core::utils::{ + AccountId32, Encoded, Era, H160, H256, H512, KeyedVec, MultiAddress, MultiSignature, + PhantomDataSendSync, Static, UncheckedExtrinsic, WrapperKeepOpaque, Yes, bits, + strip_compact_prefix, to_hex, +}; + +pub use pezkuwi_subxt_rpcs::utils::url_is_secure; + +crate::macros::cfg_jsonrpsee! { + mod fetch_chain_spec; + pub use fetch_chain_spec::{fetch_chainspec_from_rpc_node, FetchChainspecError}; +} diff --git a/vendor/pezkuwi-subxt/subxt/src/view_functions/mod.rs b/vendor/pezkuwi-subxt/subxt/src/view_functions/mod.rs new file mode 100644 index 00000000..c14e100c --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/view_functions/mod.rs @@ -0,0 +1,14 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Types associated with executing View Function calls. + +mod view_function_types; +mod view_functions_client; + +pub use pezkuwi_subxt_core::view_functions::payload::{ + DynamicPayload, Payload, StaticPayload, dynamic, +}; +pub use view_function_types::ViewFunctionsApi; +pub use view_functions_client::ViewFunctionsClient; diff --git a/vendor/pezkuwi-subxt/subxt/src/view_functions/view_function_types.rs b/vendor/pezkuwi-subxt/subxt/src/view_functions/view_function_types.rs new file mode 100644 index 00000000..d055f4ed --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/view_functions/view_function_types.rs @@ -0,0 +1,80 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::Payload; +use crate::{ + backend::BlockRef, + client::OnlineClientT, + config::{Config, HashFor}, + error::ViewFunctionError, +}; +use derive_where::derive_where; +use std::{future::Future, marker::PhantomData}; + +/// Execute View Function calls. +#[derive_where(Clone; Client)] +pub struct ViewFunctionsApi { + client: Client, + block_ref: BlockRef>, + _marker: PhantomData, +} + +impl ViewFunctionsApi { + /// Create a new [`ViewFunctionsApi`] + pub(crate) fn new(client: Client, block_ref: BlockRef>) -> Self { + Self { client, block_ref, _marker: PhantomData } + } +} + +impl ViewFunctionsApi +where + T: Config, + Client: OnlineClientT, +{ + /// Run the validation logic against some View Function payload you'd like to use. Returns + /// `Ok(())` if the payload is valid (or if it's not possible to check since the payload has no + /// validation hash). Return an error if the payload was not valid or something went wrong + /// trying to validate it (ie the View Function in question do not exist at all) + pub fn validate(&self, payload: Call) -> Result<(), ViewFunctionError> { + pezkuwi_subxt_core::view_functions::validate(payload, &self.client.metadata()) + .map_err(Into::into) + } + + /// Execute a View Function call. + pub fn call( + &self, + payload: Call, + ) -> impl Future> + use { + let client = self.client.clone(); + let block_hash = self.block_ref.hash(); + // Ensure that the returned future doesn't have a lifetime tied to api.view_functions(), + // which is a temporary thing we'll be throwing away quickly: + async move { + let metadata = client.metadata(); + + // Validate the View Function payload hash against the compile hash from codegen. + pezkuwi_subxt_core::view_functions::validate(&payload, &metadata)?; + + // Assemble the data to call the "execute_view_function" runtime API, which + // then calls the relevant view function. + let call_name = pezkuwi_subxt_core::view_functions::CALL_NAME; + let call_args = pezkuwi_subxt_core::view_functions::call_args(&payload, &metadata)?; + + // Make the call. + let bytes = client + .backend() + .call(call_name, Some(call_args.as_slice()), block_hash) + .await + .map_err(ViewFunctionError::CannotCallApi)?; + + // Decode the response. + let value = pezkuwi_subxt_core::view_functions::decode_value( + &mut &*bytes, + &payload, + &metadata, + )?; + Ok(value) + } + } +} diff --git a/vendor/pezkuwi-subxt/subxt/src/view_functions/view_functions_client.rs b/vendor/pezkuwi-subxt/subxt/src/view_functions/view_functions_client.rs new file mode 100644 index 00000000..23046b5f --- /dev/null +++ b/vendor/pezkuwi-subxt/subxt/src/view_functions/view_functions_client.rs @@ -0,0 +1,59 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::view_function_types::ViewFunctionsApi; + +use crate::{ + backend::BlockRef, + client::OnlineClientT, + config::{Config, HashFor}, + error::ViewFunctionError, +}; +use derive_where::derive_where; +use std::{future::Future, marker::PhantomData}; + +/// Make View Function calls at some block. +#[derive_where(Clone; Client)] +pub struct ViewFunctionsClient { + client: Client, + _marker: PhantomData, +} + +impl ViewFunctionsClient { + /// Create a new [`ViewFunctionsClient`] + pub fn new(client: Client) -> Self { + Self { client, _marker: PhantomData } + } +} + +impl ViewFunctionsClient +where + T: Config, + Client: OnlineClientT, +{ + /// Obtain an interface to call View Functions at some block hash. + pub fn at(&self, block_ref: impl Into>>) -> ViewFunctionsApi { + ViewFunctionsApi::new(self.client.clone(), block_ref.into()) + } + + /// Obtain an interface to call View Functions at the latest finalized block. + pub fn at_latest( + &self, + ) -> impl Future, ViewFunctionError>> + Send + 'static + { + // Clone and pass the client in like this so that we can explicitly + // return a Future that's Send + 'static, rather than tied to &self. + let client = self.client.clone(); + async move { + // get the ref for the latest finalized block and use that. + let block_ref = client + .backend() + .latest_finalized_block_ref() + .await + .map_err(ViewFunctionError::CannotGetLatestFinalizedBlock)?; + + Ok(ViewFunctionsApi::new(client, block_ref)) + } + } +} diff --git a/vendor/pezkuwi-subxt/utils/fetch-metadata/Cargo.toml b/vendor/pezkuwi-subxt/utils/fetch-metadata/Cargo.toml new file mode 100644 index 00000000..5e9ebb0d --- /dev/null +++ b/vendor/pezkuwi-subxt/utils/fetch-metadata/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "pezkuwi-subxt-utils-fetchmetadata" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +publish = true +autotests = false + +license.workspace = true +repository.workspace = true +documentation.workspace = true +homepage.workspace = true +description = "subxt utility to fetch metadata" + +[features] +url = ["dep:jsonrpsee", "dep:tokio", "dep:url", "frame-metadata"] + +[dependencies] +codec = { package = "parity-scale-codec", workspace = true, features = ["derive", "std"] } +hex = { workspace = true, features = ["std"] } +thiserror = { workspace = true } + +# Optional dependencies for the `url` feature. +frame-metadata = { workspace = true, optional = true, features = ["std"] } +jsonrpsee = { workspace = true, features = ["http-client", "ws-client"], optional = true } +tokio = { workspace = true, features = ["rt-multi-thread"], optional = true } +url = { workspace = true, optional = true } + +[package.metadata.docs.rs] +features = ["url"] +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.playground] +default-features = true + +[lints] +workspace = true diff --git a/vendor/pezkuwi-subxt/utils/fetch-metadata/src/error.rs b/vendor/pezkuwi-subxt/utils/fetch-metadata/src/error.rs new file mode 100644 index 00000000..db36a4e9 --- /dev/null +++ b/vendor/pezkuwi-subxt/utils/fetch-metadata/src/error.rs @@ -0,0 +1,30 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +/// Error attempting to fetch metadata. +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum Error { + /// Error decoding from a hex value. + #[error("Cannot decode hex value: {0}")] + DecodeError(#[from] hex::FromHexError), + /// Some SCALE codec error. + #[error("Cannot scale encode/decode value: {0}")] + CodecError(#[from] codec::Error), + /// JSON-RPC error fetching metadata. + #[cfg(feature = "url")] + #[error("Request error: {0}")] + RequestError(#[from] jsonrpsee::core::ClientError), + /// Failed IO when fetching from a file. + #[error( + "Failed IO for {0}, make sure that you are providing the correct file path for metadata: {1}" + )] + Io(String, std::io::Error), + /// URL scheme is not http, https, ws or wss. + #[error("'{0}' not supported, supported URI schemes are http, https, ws or wss.")] + InvalidScheme(String), + /// Some other error. + #[error("Other error: {0}")] + Other(String), +} diff --git a/vendor/pezkuwi-subxt/utils/fetch-metadata/src/lib.rs b/vendor/pezkuwi-subxt/utils/fetch-metadata/src/lib.rs new file mode 100644 index 00000000..2bfb0f02 --- /dev/null +++ b/vendor/pezkuwi-subxt/utils/fetch-metadata/src/lib.rs @@ -0,0 +1,30 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Subxt utils fetch metadata. + +#![cfg_attr(docsrs, feature(doc_cfg))] + +// Internal helper macros +#[macro_use] +mod macros; +mod error; + +cfg_fetch_from_url! { + mod url; + pub use url::{from_url, from_url_blocking, MetadataVersion, Url}; +} + +pub use error::Error; + +/// Fetch metadata from a file in a blocking manner. +pub fn from_file_blocking(path: &std::path::Path) -> Result, error::Error> { + use std::io::Read; + + let to_err = |err| error::Error::Io(path.to_string_lossy().into(), err); + let mut file = std::fs::File::open(path).map_err(to_err)?; + let mut bytes = Vec::new(); + file.read_to_end(&mut bytes).map_err(to_err)?; + Ok(bytes) +} diff --git a/vendor/pezkuwi-subxt/utils/fetch-metadata/src/macros.rs b/vendor/pezkuwi-subxt/utils/fetch-metadata/src/macros.rs new file mode 100644 index 00000000..4274331a --- /dev/null +++ b/vendor/pezkuwi-subxt/utils/fetch-metadata/src/macros.rs @@ -0,0 +1,22 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +macro_rules! cfg_feature { + ($feature:literal, $($item:item)*) => { + $( + #[cfg(feature = $feature)] + #[cfg_attr(docsrs, doc(cfg(feature = $feature)))] + $item + )* + } +} + +macro_rules! cfg_fetch_from_url { + ($($item:item)*) => { + crate::macros::cfg_feature!("url", $($item)*); + }; +} + +#[allow(unused)] +pub(crate) use {cfg_feature, cfg_fetch_from_url}; diff --git a/vendor/pezkuwi-subxt/utils/fetch-metadata/src/url.rs b/vendor/pezkuwi-subxt/utils/fetch-metadata/src/url.rs new file mode 100644 index 00000000..cb8c855a --- /dev/null +++ b/vendor/pezkuwi-subxt/utils/fetch-metadata/src/url.rs @@ -0,0 +1,197 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Fetch metadata from a URL. + +use crate::Error; +use codec::{Decode, Encode}; +use jsonrpsee::{ + core::client::ClientT, http_client::HttpClientBuilder, rpc_params, ws_client::WsClientBuilder, +}; + +pub use url::Url; + +/// The metadata version that is fetched from the node. +#[derive(Default, Debug, Clone, Copy)] +pub enum MetadataVersion { + /// Latest stable version of the metadata. + #[default] + Latest, + /// Fetch a specified version of the metadata. + Version(u32), + /// Latest unstable version of the metadata. + Unstable, +} + +// Note: Implementation needed for the CLI tool. +impl std::str::FromStr for MetadataVersion { + type Err = String; + + fn from_str(input: &str) -> Result { + match input { + "unstable" => Ok(MetadataVersion::Unstable), + "latest" => Ok(MetadataVersion::Latest), + version => { + let num: u32 = version + .parse() + .map_err(|_| format!("Invalid metadata version specified {version:?}"))?; + + Ok(MetadataVersion::Version(num)) + } + } + } +} + +/// Returns the metadata bytes from the provided URL. +pub async fn from_url(url: Url, version: MetadataVersion, at_block_hash: Option<&str>) -> Result, Error> { + let bytes = match url.scheme() { + "http" | "https" => fetch_metadata_http(url, version, at_block_hash).await, + "ws" | "wss" => fetch_metadata_ws(url, version, at_block_hash).await, + invalid_scheme => Err(Error::InvalidScheme(invalid_scheme.to_owned())), + }?; + + Ok(bytes) +} + +/// Returns the metadata bytes from the provided URL, blocking the current thread. +pub fn from_url_blocking(url: Url, version: MetadataVersion, at_block_hash: Option<&str>) -> Result, Error> { + tokio_block_on(from_url(url, version, at_block_hash)) +} + +// Block on some tokio runtime for sync contexts +fn tokio_block_on>(fut: Fut) -> T { + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap() + .block_on(fut) +} + +async fn fetch_metadata_ws(url: Url, version: MetadataVersion, at_block_hash: Option<&str>) -> Result, Error> { + let client = WsClientBuilder::default() + .request_timeout(std::time::Duration::from_secs(180)) + .max_buffer_capacity_per_subscription(4096) + .build(url) + .await?; + + fetch_metadata(client, version, at_block_hash).await +} + +async fn fetch_metadata_http(url: Url, version: MetadataVersion, at_block_hash: Option<&str>) -> Result, Error> { + let client = HttpClientBuilder::default() + .request_timeout(std::time::Duration::from_secs(180)) + .build(url)?; + + fetch_metadata(client, version, at_block_hash).await +} + +/// The innermost call to fetch metadata: +async fn fetch_metadata(client: impl ClientT, version: MetadataVersion, at_block_hash: Option<&str>) -> Result, Error> { + const UNSTABLE_METADATA_VERSION: u32 = u32::MAX; + + // Ensure always 0x prefix. + let at_block_hash = at_block_hash + .map(|hash| format!("0x{}", hash.strip_prefix("0x").unwrap_or(hash))); + let at_block_hash = at_block_hash.as_deref(); + + // Fetch available metadata versions. If error, revert to legacy metadata code. + async fn fetch_available_versions( + client: &impl ClientT, + at_block_hash: Option<&str>, + ) -> Result, Error> { + let res: String = client + .request("state_call", rpc_params!["Metadata_metadata_versions", "0x", at_block_hash]) + .await?; + let raw_bytes = hex::decode(res.trim_start_matches("0x"))?; + Decode::decode(&mut &raw_bytes[..]).map_err(Into::into) + } + + // Fetch metadata using the "new" state_call interface + async fn fetch_inner( + client: &impl ClientT, + version: MetadataVersion, + supported_versions: Vec, + at_block_hash: Option<&str>, + ) -> Result, Error> { + // Return the version the user wants if it's supported: + let version = match version { + MetadataVersion::Latest => *supported_versions + .iter() + .filter(|&&v| v != UNSTABLE_METADATA_VERSION) + .max() + .ok_or_else(|| Error::Other("No valid metadata versions returned".to_string()))?, + MetadataVersion::Unstable => { + if supported_versions.contains(&UNSTABLE_METADATA_VERSION) { + UNSTABLE_METADATA_VERSION + } else { + return Err(Error::Other( + "The node does not have an unstable metadata version available".to_string(), + )); + } + } + MetadataVersion::Version(version) => { + if supported_versions.contains(&version) { + version + } else { + return Err(Error::Other(format!( + "The node does not have metadata version {version} available" + ))); + } + } + }; + + let bytes = version.encode(); + let version: String = format!("0x{}", hex::encode(&bytes)); + + // Fetch the metadata at that version: + let metadata_string: String = client + .request( + "state_call", + rpc_params!["Metadata_metadata_at_version", &version, at_block_hash], + ) + .await?; + // Decode the metadata. + let metadata_bytes = hex::decode(metadata_string.trim_start_matches("0x"))?; + let metadata: Option = + Decode::decode(&mut &metadata_bytes[..])?; + let Some(metadata) = metadata else { + return Err(Error::Other(format!( + "The node does not have metadata version {version} available" + ))); + }; + Ok(metadata.0) + } + + // Fetch metadata using the "old" state_call interface + async fn fetch_inner_legacy( + client: &impl ClientT, + at_block_hash: Option<&str>, + ) -> Result, Error> { + // Fetch the metadata. + let metadata_string: String = client + .request("state_call", rpc_params!["Metadata_metadata", "0x", at_block_hash]) + .await?; + + // Decode the metadata. + let metadata_bytes = hex::decode(metadata_string.trim_start_matches("0x"))?; + let metadata: frame_metadata::OpaqueMetadata = Decode::decode(&mut &metadata_bytes[..])?; + Ok(metadata.0) + } + + match fetch_available_versions(&client, at_block_hash).await { + Ok(supported_versions) => { + fetch_inner(&client, version, supported_versions, at_block_hash).await + }, + Err(e) => { + // The "new" interface failed. if the user is asking for V14 or the "latest" + // metadata then try the legacy interface instead. Else, just return the + // reason for failure. + if matches!(version, MetadataVersion::Version(14) | MetadataVersion::Latest) { + fetch_inner_legacy(&client, at_block_hash).await + } else { + Err(e) + } + } + } +} diff --git a/vendor/pezkuwi-subxt/utils/strip-metadata/Cargo.toml b/vendor/pezkuwi-subxt/utils/strip-metadata/Cargo.toml new file mode 100644 index 00000000..d7ef6ba9 --- /dev/null +++ b/vendor/pezkuwi-subxt/utils/strip-metadata/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "pezkuwi-subxt-utils-stripmetadata" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +publish = true +autotests = false + +license.workspace = true +repository.workspace = true +documentation.workspace = true +homepage.workspace = true +description = "subxt utility to strip metadata" + +[dependencies] +codec = { workspace = true } +either = { workspace = true } +frame-metadata = { workspace = true, features = ["current", "std"] } +scale-info = { workspace = true, features = ["std"] } + +[package.metadata.docs.rs] +features = ["url"] +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.playground] +default-features = true + +[lints] +workspace = true diff --git a/vendor/pezkuwi-subxt/utils/strip-metadata/src/lib.rs b/vendor/pezkuwi-subxt/utils/strip-metadata/src/lib.rs new file mode 100644 index 00000000..3949216c --- /dev/null +++ b/vendor/pezkuwi-subxt/utils/strip-metadata/src/lib.rs @@ -0,0 +1,875 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This utility crate provides a [`StripMetadata`] trait which exposes a +//! [`StripMetadata::strip_metadata`] method able to remove pallets and runtime APIs from the +//! metadata in question. + +use either::Either; +use frame_metadata::{v14, v15, v16}; +use scale_info::PortableRegistry; +use std::collections::BTreeSet; + +/// This trait is implemented for metadata versions to enable us to strip pallets and runtime APIs +/// from them. +/// +/// To implement the [`StripMetadata::strip_metadata`] method for a new metadata version, you'll +/// probably: +/// - Remove any pallets and runtime APIs from the metadata based on the filter functions. +/// - Call `self.iter_type_ids_mut().collect()` to gather all of the type IDs to keep. +/// - This will require implementing `IterateTypeIds`, which is the thing that iterates over all of +/// the type IDs still present in the metadata such that we know what we need to keep. +/// - Call `self.types.retain(..)` to filter any types not matching the type IDs above out of the +/// registry. +/// - Iterate over the type IDs again, mapping those found in the metadata to the new IDs that +/// calling `self.types.retain(..)` handed back. +pub trait StripMetadata { + /// Strip out any pallets and runtime APIs for which the provided filter functions return false. + fn strip_metadata( + &mut self, + keep_pallet: PalletFilter, + keep_runtime_api: RuntimeApiFilter, + ) where + PalletFilter: Fn(&str) -> bool, + RuntimeApiFilter: Fn(&str) -> bool; +} + +impl StripMetadata for v14::RuntimeMetadataV14 { + fn strip_metadata( + &mut self, + keep_pallet: PalletFilter, + _keep_runtime_api: RuntimeApiFilter, + ) where + PalletFilter: Fn(&str) -> bool, + RuntimeApiFilter: Fn(&str) -> bool, + { + // Throw away pallets we don't care about: + self.pallets.retain(|pallet| keep_pallet(&pallet.name)); + + // Now, only retain types we care about in the registry: + retain_types(self); + } +} + +impl StripMetadata for v15::RuntimeMetadataV15 { + fn strip_metadata( + &mut self, + keep_pallet: PalletFilter, + keep_runtime_api: RuntimeApiFilter, + ) where + PalletFilter: Fn(&str) -> bool, + RuntimeApiFilter: Fn(&str) -> bool, + { + // Throw away pallets and runtime APIs we don't care about: + self.pallets.retain(|pallet| keep_pallet(&pallet.name)); + self.apis.retain(|api| keep_runtime_api(&api.name)); + + // Now, only retain types we care about in the registry: + retain_types(self); + } +} + +impl StripMetadata for v16::RuntimeMetadataV16 { + fn strip_metadata( + &mut self, + keep_pallet: PalletFilter, + keep_runtime_api: RuntimeApiFilter, + ) where + PalletFilter: Fn(&str) -> bool, + RuntimeApiFilter: Fn(&str) -> bool, + { + // Throw away pallets and runtime APIs we don't care about. + // Keep the System pallet, because it has some associated types that we care about in Subxt. + self.pallets + .retain(|pallet| pallet.name == "System" || keep_pallet(&pallet.name)); + self.apis.retain(|api| keep_runtime_api(&api.name)); + + // If the user asked to strip the System pallet, we'll strip most things from it but keep + // the associated types, because Subxt makes use of them. + if !keep_pallet("System") { + if let Some(system_pallet) = self.pallets.iter_mut().find(|p| p.name == "System") { + let index = system_pallet.index; + let associated_types = core::mem::take(&mut system_pallet.associated_types); + + *system_pallet = v16::PalletMetadata { + name: "System".to_string(), + index, + associated_types, + // Everything else is empty: + storage: None, + calls: None, + event: None, + constants: vec![], + error: None, + view_functions: vec![], + docs: vec![], + deprecation_info: v16::ItemDeprecationInfo::NotDeprecated, + }; + } + } + + // Now, only retain types we care about in the registry: + retain_types(self); + } +} + +fn retain_types(m: &mut M) { + // We want to preserve this type even if it's not used anywhere: + let dispatch_err_type_id = find_dispatch_error_type(m.get_types_mut()); + + // Iterate over the type IDs and retain any that we still need: + let keep_these_ids: BTreeSet = + m.iter_type_ids_mut().map(|id| *id).chain(Some(dispatch_err_type_id)).collect(); + + let new_ids = m.get_types_mut().retain(|id| keep_these_ids.contains(&id)); + + // Map IDs found in the metadata to new ones as needed after the retaining: + for id in m.iter_type_ids_mut() { + if let Some(new_id) = new_ids.get(id) { + *id = *new_id; + }; + } +} + +/// This trait is implemented for metadatas, and its purpose is to hand back iterators over +/// all of the type IDs (doesn't need to recurse into them) that are used in the metadata, +/// so that we know which ones we need to keep around in the type registry (and thus which +/// ones we can remove). +trait IterateTypeIds { + /// This should iterate over all type IDs found in the metadata. + fn iter_type_ids_mut(&mut self) -> impl Iterator; +} + +impl IterateTypeIds for v14::RuntimeMetadataV14 { + fn iter_type_ids_mut(&mut self) -> impl Iterator { + // Gather pallet types: + let pallet_types = self.pallets.iter_mut().flat_map(|pallet| { + let pallet_call_types = pallet.calls.as_mut().into_iter().map(|calls| &mut calls.ty.id); + + let pallet_storage_types = + pallet.storage.as_mut().into_iter().flat_map(|s| &mut s.entries).flat_map( + |storage_entry| match &mut storage_entry.ty { + v14::StorageEntryType::Plain(ty) => + Either::Left(core::iter::once(&mut ty.id)), + v14::StorageEntryType::Map { key, value, .. } => + Either::Right([&mut key.id, &mut value.id].into_iter()), + }, + ); + + let pallet_constant_types = + pallet.constants.iter_mut().map(|constant| &mut constant.ty.id); + + let pallet_event_type = + pallet.event.as_mut().into_iter().map(|events| &mut events.ty.id); + + let pallet_error_type = pallet.error.as_mut().into_iter().map(|error| &mut error.ty.id); + + pallet_call_types + .chain(pallet_storage_types) + .chain(pallet_constant_types) + .chain(pallet_event_type) + .chain(pallet_error_type) + }); + + // Transaction Extension types: + let transaction_extension_types = self + .extrinsic + .signed_extensions + .iter_mut() + .flat_map(|ext| [&mut ext.ty.id, &mut ext.additional_signed.id].into_iter()); + + // The extrinsic type: + let extrinsic_type_id = &mut self.extrinsic.ty.id; + + // Return all IDs gathered: + pallet_types.chain(Some(extrinsic_type_id)).chain(transaction_extension_types) + } +} + +impl IterateTypeIds for v15::RuntimeMetadataV15 { + fn iter_type_ids_mut(&mut self) -> impl Iterator { + // Gather pallet types: + let pallet_types = self.pallets.iter_mut().flat_map(|pallet| { + let pallet_call_types = pallet.calls.as_mut().into_iter().map(|calls| &mut calls.ty.id); + + let pallet_storage_types = + pallet.storage.as_mut().into_iter().flat_map(|s| &mut s.entries).flat_map( + |storage_entry| match &mut storage_entry.ty { + v14::StorageEntryType::Plain(ty) => + Either::Left(core::iter::once(&mut ty.id)), + v14::StorageEntryType::Map { key, value, .. } => + Either::Right([&mut key.id, &mut value.id].into_iter()), + }, + ); + + let pallet_constant_types = + pallet.constants.iter_mut().map(|constant| &mut constant.ty.id); + + let pallet_event_type = + pallet.event.as_mut().into_iter().map(|events| &mut events.ty.id); + + let pallet_error_type = pallet.error.as_mut().into_iter().map(|error| &mut error.ty.id); + + pallet_call_types + .chain(pallet_storage_types) + .chain(pallet_constant_types) + .chain(pallet_event_type) + .chain(pallet_error_type) + }); + + // Runtime APIs: + let runtime_api_types = + self.apis.iter_mut().flat_map(|api| &mut api.methods).flat_map(|method| { + let method_inputs = method.inputs.iter_mut().map(|input| &mut input.ty.id); + let method_output = &mut method.output.id; + method_inputs.chain(core::iter::once(method_output)) + }); + + // The extrinsic type IDs: + let extrinsic_type_ids = [ + &mut self.extrinsic.call_ty.id, + &mut self.extrinsic.address_ty.id, + &mut self.extrinsic.extra_ty.id, + &mut self.extrinsic.signature_ty.id, + ]; + + // Outer enum type IDs: + let outer_enum_type_ids = [ + &mut self.outer_enums.call_enum_ty.id, + &mut self.outer_enums.event_enum_ty.id, + &mut self.outer_enums.error_enum_ty.id, + ]; + + // Transaction Extension types: + let transaction_extension_types = self + .extrinsic + .signed_extensions + .iter_mut() + .flat_map(|ext| [&mut ext.ty.id, &mut ext.additional_signed.id].into_iter()); + + // Custom types: + let custom_type_ids = self.custom.map.values_mut().map(|value| &mut value.ty.id); + + // Return all IDs gathered: + pallet_types + .chain(runtime_api_types) + .chain(extrinsic_type_ids) + .chain(outer_enum_type_ids) + .chain(transaction_extension_types) + .chain(custom_type_ids) + } +} + +impl IterateTypeIds for v16::RuntimeMetadataV16 { + fn iter_type_ids_mut(&mut self) -> impl Iterator { + // Gather pallet types: + let pallet_types = self.pallets.iter_mut().flat_map(|pallet| { + let pallet_call_types = pallet.calls.as_mut().into_iter().map(|calls| &mut calls.ty.id); + + let pallet_storage_types = + pallet.storage.as_mut().into_iter().flat_map(|s| &mut s.entries).flat_map( + |storage_entry| match &mut storage_entry.ty { + v16::StorageEntryType::Plain(ty) => + Either::Left(core::iter::once(&mut ty.id)), + v16::StorageEntryType::Map { key, value, .. } => + Either::Right([&mut key.id, &mut value.id].into_iter()), + }, + ); + + let pallet_constant_types = + pallet.constants.iter_mut().map(|constant| &mut constant.ty.id); + + let pallet_event_type = + pallet.event.as_mut().into_iter().map(|events| &mut events.ty.id); + + let pallet_error_type = pallet.error.as_mut().into_iter().map(|error| &mut error.ty.id); + + let pallet_view_fns = pallet.view_functions.iter_mut().flat_map(|vf| { + let inputs = vf.inputs.iter_mut().map(|input| &mut input.ty.id); + let output = &mut vf.output.id; + + inputs.chain(core::iter::once(output)) + }); + + let pallet_associated_types = pallet + .associated_types + .iter_mut() + .map(|associated_type| &mut associated_type.ty.id); + + pallet_call_types + .chain(pallet_storage_types) + .chain(pallet_constant_types) + .chain(pallet_event_type) + .chain(pallet_error_type) + .chain(pallet_view_fns) + .chain(pallet_associated_types) + }); + + // Runtime APIs: + let runtime_api_types = + self.apis.iter_mut().flat_map(|api| &mut api.methods).flat_map(|method| { + let method_inputs = method.inputs.iter_mut().map(|input| &mut input.ty.id); + let method_output = &mut method.output.id; + method_inputs.chain(core::iter::once(method_output)) + }); + + // The extrinsic type IDs: + let extrinsic_type_ids = + [&mut self.extrinsic.address_ty.id, &mut self.extrinsic.signature_ty.id]; + + // Outer enum type IDs: + let outer_enum_type_ids = [ + &mut self.outer_enums.call_enum_ty.id, + &mut self.outer_enums.event_enum_ty.id, + &mut self.outer_enums.error_enum_ty.id, + ]; + + // Transaction Extension types: + let transaction_extension_types = self + .extrinsic + .transaction_extensions + .iter_mut() + .flat_map(|ext| [&mut ext.ty.id, &mut ext.implicit.id].into_iter()); + + // Custom types: + let custom_type_ids = self.custom.map.values_mut().map(|value| &mut value.ty.id); + + // Return all IDs gathered: + pallet_types + .chain(runtime_api_types) + .chain(extrinsic_type_ids) + .chain(outer_enum_type_ids) + .chain(transaction_extension_types) + .chain(custom_type_ids) + } +} + +/// This trait defines how to get a type registry from the metadata +trait GetTypes { + fn get_types_mut(&mut self) -> &mut PortableRegistry; +} + +impl GetTypes for v14::RuntimeMetadataV14 { + fn get_types_mut(&mut self) -> &mut PortableRegistry { + &mut self.types + } +} + +impl GetTypes for v15::RuntimeMetadataV15 { + fn get_types_mut(&mut self) -> &mut PortableRegistry { + &mut self.types + } +} + +impl GetTypes for v16::RuntimeMetadataV16 { + fn get_types_mut(&mut self) -> &mut PortableRegistry { + &mut self.types + } +} + +/// Subxt needs this type so we always ensure to preserve it +/// even if it's not explicitly mentioned anywhere: +fn find_dispatch_error_type(types: &mut PortableRegistry) -> u32 { + types + .types + .iter() + .enumerate() + .find(|(_idx, ty)| ty.ty.path.segments == ["sp_runtime", "DispatchError"]) + .expect("Metadata must contain sp_runtime::DispatchError") + .0 as u32 +} + +#[cfg(test)] +mod test { + use std::collections::BTreeMap; + + use super::*; + use codec::Compact; + use scale_info::meta_type; + + /// Create dummy types that we can check the presence of with is_in_types. + macro_rules! make_types { + ($($name:ident)+) => { + $( + struct $name {} + impl scale_info::TypeInfo for $name { + type Identity = $name; + + fn type_info() -> scale_info::Type { + scale_info::Type { + path: scale_info::Path { + segments: vec!["dummy_type", stringify!($name)], + }, + type_params: vec![], + type_def: scale_info::TypeDef::Composite(scale_info::TypeDefComposite { fields: vec![] }), + docs: vec![], + } + } + } + + impl $name { + #[allow(dead_code)] + pub fn is_in_types(types: &scale_info::PortableRegistry) -> bool { + types.types.iter().any(|ty| ty.ty.path.segments == vec!["dummy_type", stringify!($name)]) + } + } + )+ + } + } + + /// Asserts that a set of the dummy types exist in a registry. + macro_rules! assert_is_in_types { + ($($name:ident)+ => $types:expr) => {{ + $( + if !$name::is_in_types(&$types) { + panic!("{} was not found in {}", stringify!($name), stringify!($types)); + } + )+ + }} + } + + /// Asserts that a set of the dummy types do not exist in a registry. + macro_rules! assert_not_in_types { + ($($name:ident)+ => $types:expr) => {{ + $( + if $name::is_in_types(&$types) { + panic!("{} was found in {}", stringify!($name), stringify!($types)); + } + )+ + }} + } + + #[allow(dead_code)] + enum DummyDispatchError { + A, + B, + C, + } + + impl scale_info::TypeInfo for DummyDispatchError { + type Identity = DummyDispatchError; + + fn type_info() -> scale_info::Type { + scale_info::Type { + path: scale_info::Path { segments: vec!["sp_runtime", "DispatchError"] }, + type_params: vec![], + type_def: scale_info::TypeDef::Variant(scale_info::TypeDefVariant { + variants: vec![], + }), + docs: vec![], + } + } + } + + #[test] + fn v14_stripping_works() { + make_types!(A B C D E); + + let pallets = vec![ + v14::PalletMetadata { + name: "First", + index: 0, + calls: None, + storage: Some(v14::PalletStorageMetadata { + prefix: "___", + entries: vec![v14::StorageEntryMetadata { + name: "Hello", + modifier: v14::StorageEntryModifier::Optional, + ty: frame_metadata::v14::StorageEntryType::Plain(meta_type::()), + default: vec![], + docs: vec![], + }], + }), + event: Some(v14::PalletEventMetadata { ty: meta_type::() }), + constants: vec![], + error: None, + }, + v14::PalletMetadata { + name: "Second", + index: 1, + calls: Some(v15::PalletCallMetadata { ty: meta_type::() }), + storage: None, + event: None, + constants: vec![v14::PalletConstantMetadata { + name: "SomeConstant", + ty: meta_type::(), + value: vec![], + docs: vec![], + }], + error: None, + }, + ]; + + let extrinsic = + v14::ExtrinsicMetadata { version: 0, signed_extensions: vec![], ty: meta_type::() }; + + let metadata = + v14::RuntimeMetadataV14::new(pallets, extrinsic, meta_type::()); + + assert_eq!(metadata.types.types.len(), 6); + assert_is_in_types!(A B C D E => metadata.types); + + let only_first_pallet = { + let mut md = metadata.clone(); + md.strip_metadata(|name| name == "First", |_| true); + md + }; + + assert_eq!(only_first_pallet.types.types.len(), 4); + assert_is_in_types!(A B E => only_first_pallet.types); + assert_not_in_types!(C D => only_first_pallet.types); + assert_eq!(only_first_pallet.pallets.len(), 1); + assert_eq!(&only_first_pallet.pallets[0].name, "First"); + + let only_second_pallet = { + let mut md = metadata.clone(); + md.strip_metadata(|name| name == "Second", |_| true); + md + }; + + assert_eq!(only_second_pallet.types.types.len(), 4); + assert_is_in_types!(C D E => only_second_pallet.types); + assert_not_in_types!(A B => only_second_pallet.types); + assert_eq!(only_second_pallet.pallets.len(), 1); + assert_eq!(&only_second_pallet.pallets[0].name, "Second"); + + let no_pallets = { + let mut md = metadata.clone(); + md.strip_metadata(|_| false, |_| true); + md + }; + + assert_eq!(no_pallets.types.types.len(), 2); + assert_is_in_types!(E => no_pallets.types); + assert_not_in_types!(A B C D => no_pallets.types); + assert_eq!(no_pallets.pallets.len(), 0); + } + + #[test] + fn v15_stripping_works() { + make_types!(A B C D E F G H I J K L M N O P); + + let pallets = vec![ + v15::PalletMetadata { + name: "First", + index: 0, + calls: None, + storage: Some(v15::PalletStorageMetadata { + prefix: "___", + entries: vec![v15::StorageEntryMetadata { + name: "Hello", + modifier: v15::StorageEntryModifier::Optional, + ty: frame_metadata::v15::StorageEntryType::Plain(meta_type::()), + default: vec![], + docs: vec![], + }], + }), + event: Some(v15::PalletEventMetadata { ty: meta_type::() }), + constants: vec![], + error: None, + docs: vec![], + }, + v15::PalletMetadata { + name: "Second", + index: 1, + calls: Some(v15::PalletCallMetadata { ty: meta_type::() }), + storage: None, + event: None, + constants: vec![v15::PalletConstantMetadata { + name: "SomeConstant", + ty: meta_type::(), + value: vec![], + docs: vec![], + }], + error: None, + docs: vec![], + }, + ]; + + let extrinsic = v15::ExtrinsicMetadata { + version: 0, + signed_extensions: vec![], + call_ty: meta_type::(), + address_ty: meta_type::(), + signature_ty: meta_type::(), + extra_ty: meta_type::(), + }; + + let runtime_apis = vec![ + v15::RuntimeApiMetadata { + name: "SomeApi", + docs: vec![], + methods: vec![v15::RuntimeApiMethodMetadata { + name: "some_method", + inputs: vec![v15::RuntimeApiMethodParamMetadata { + name: "input1", + ty: meta_type::(), + }], + output: meta_type::(), + docs: vec![], + }], + }, + v15::RuntimeApiMetadata { + name: "AnotherApi", + docs: vec![], + methods: vec![v15::RuntimeApiMethodMetadata { + name: "another_method", + inputs: vec![v15::RuntimeApiMethodParamMetadata { + name: "input1", + ty: meta_type::(), + }], + output: meta_type::(), + docs: vec![], + }], + }, + ]; + + let outer_enums = v15::OuterEnums { + call_enum_ty: meta_type::(), + error_enum_ty: meta_type::(), + event_enum_ty: meta_type::(), + }; + + let custom_values = v15::CustomMetadata { + map: BTreeMap::from_iter(vec![( + "Item", + v15::CustomValueMetadata { ty: meta_type::