Compare commits

..

1 Commits

Author SHA1 Message Date
Omar Abdulla 4ad3084fe1 Cache the chainspec 2025-12-03 19:37:01 +03:00
35 changed files with 708 additions and 2583 deletions
@@ -1,141 +0,0 @@
name: "Run Revive Differential Tests"
description: "Builds and runs revive-differential-tests (retester) from this repo against the caller's Polkadot SDK."
inputs:
# Setup arguments & environment
polkadot-sdk-path:
description: "The path of the polkadot-sdk that should be compiled for the tests to run against."
required: false
default: "."
type: string
cargo-command:
description: "The cargo command to use in compilations and running of tests (e.g., forklift cargo)."
required: false
default: "cargo"
type: string
revive-differential-tests-ref:
description: "The branch, tag or SHA to checkout for the revive-differential-tests."
required: false
default: "main"
type: string
resolc-version:
description: "The version of resolc to install and use in tests."
required: false
default: "0.5.0"
type: string
use-compilation-caches:
description: "Controls if the compilation caches will be used for the test run or not."
required: false
default: true
type: boolean
# Test Execution Arguments
platform:
description: "The identifier of the platform to run the tests on (e.g., geth-evm-solc, revive-dev-node-revm-solc)"
required: true
type: string
polkadot-omnichain-node-chain-spec-path:
description: "The path of the chain-spec of the chain we're spawning'. This is only required if the polkadot-omni-node is one of the selected platforms."
required: false
type: string
polkadot-omnichain-node-parachain-id:
description: "The id of the parachain to spawn with the polkadot-omni-node. This is only required if the polkadot-omni-node is one of the selected platforms."
type: number
required: false
expectations-file-path:
description: "Path to the expectations file to use to compare against."
type: string
required: false
runs:
using: "composite"
steps:
- name: Checkout the Differential Tests Repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
repository: paritytech/revive-differential-tests
ref: ${{ inputs['revive-differential-tests-ref'] }}
path: revive-differential-tests
submodules: recursive
- name: Installing the Latest Resolc
shell: bash
if: ${{ runner.os == 'Linux' && runner.arch == 'X64' }}
run: |
VERSION="${{ inputs['resolc-version'] }}"
ASSET_URL="https://github.com/paritytech/revive/releases/download/v$VERSION/resolc-x86_64-unknown-linux-musl"
echo "Downloading resolc v$VERSION from $ASSET_URL"
curl -Lsf --show-error -o resolc "$ASSET_URL"
chmod +x resolc
./resolc --version
- name: Installing Retester
shell: bash
run: ${{ inputs['cargo-command'] }} install --locked --path revive-differential-tests/crates/core
- name: Creating a workdir for retester
shell: bash
run: mkdir workdir
- name: Downloading & Initializing the compilation caches
shell: bash
if: ${{ inputs['use-compilation-caches'] == true }}
run: |
curl -fL --retry 3 --retry-all-errors --connect-timeout 10 -o cache.tar.gz "https://github.com/paritytech/revive-differential-tests/releases/download/compilation-caches-v1.1/cache.tar.gz"
tar -zxf cache.tar.gz -C ./workdir > /dev/null 2>&1
- name: Building the dependencies from the Polkadot SDK
shell: bash
run: |
${{ inputs['cargo-command'] }} build --locked --profile release -p pallet-revive-eth-rpc -p revive-dev-node --manifest-path ${{ inputs['polkadot-sdk-path'] }}/Cargo.toml
${{ inputs['cargo-command'] }} build --locked --profile release --bin polkadot-omni-node --manifest-path ${{ inputs['polkadot-sdk-path'] }}/Cargo.toml
- name: Installing retester
shell: bash
run: ${{ inputs['cargo-command'] }} install --path ./revive-differential-tests/crates/core
- name: Installing report-processor
shell: bash
run: ${{ inputs['cargo-command'] }} install --path ./revive-differential-tests/crates/report-processor
- name: Running the Differential Tests
shell: bash
run: |
OMNI_ARGS=()
if [[ -n "${{ inputs['polkadot-omnichain-node-parachain-id'] }}" ]]; then
OMNI_ARGS+=(
--polkadot-omni-node.parachain-id
"${{ inputs['polkadot-omnichain-node-parachain-id'] }}"
)
fi
if [[ -n "${{ inputs['polkadot-omnichain-node-chain-spec-path'] }}" ]]; then
OMNI_ARGS+=(
--polkadot-omni-node.chain-spec-path
"${{ inputs['polkadot-omnichain-node-chain-spec-path'] }}"
)
fi
retester test \
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/simple \
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/complex \
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/translated_semantic_tests \
--platform ${{ inputs['platform'] }} \
--report.file-name report.json \
--concurrency.number-of-nodes 10 \
--concurrency.number-of-threads 10 \
--concurrency.number-of-concurrent-tasks 100 \
--working-directory ./workdir \
--revive-dev-node.consensus manual-seal-200 \
--revive-dev-node.path ${{ inputs['polkadot-sdk-path'] }}/target/release/revive-dev-node \
--eth-rpc.path ${{ inputs['polkadot-sdk-path'] }}/target/release/eth-rpc \
--polkadot-omni-node.path ${{ inputs['polkadot-sdk-path'] }}/target/release/polkadot-omni-node \
--resolc.path ./resolc \
"${OMNI_ARGS[@]}" || true
- name: Generate the expectation file
shell: bash
run: report-processor generate-expectations-file --report-path ./workdir/report.json --output-path ./workdir/expectations.json --remove-prefix ./revive-differential-tests/resolc-compiler-tests
- name: Upload the Report to the CI
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
with:
name: ${{ inputs['platform'] }}-report.json
path: ./workdir/report.json
- name: Upload the Report to the CI
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
with:
name: ${{ inputs['platform'] }}.json
path: ./workdir/expectations.json
- name: Check Expectations
shell: bash
if: ${{ inputs['expectations-file-path'] != '' }}
run: report-processor compare-expectation-files --base-expectation-path ${{ inputs['expectations-file-path'] }} --other-expectation-path ./workdir/expectations.json
+1 -1
View File
@@ -10,7 +10,7 @@ node_modules
*.log *.log
profile.json.gz profile.json.gz
workdir* workdir
!/schema.json !/schema.json
!/dev-genesis.json !/dev-genesis.json
Generated
+90 -272
View File
@@ -67,16 +67,15 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
[[package]] [[package]]
name = "alloy" name = "alloy"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e7cdea683d79fab6c208182ba702ea37b9f7422f3de80044d79a210b884c8ca" checksum = "7f6cfe35f100bc496007c9a00f90b88bdf565f1421d4c707c9f07e0717e2aaad"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-contract", "alloy-contract",
"alloy-core", "alloy-core",
"alloy-eips", "alloy-eips",
"alloy-genesis", "alloy-genesis",
"alloy-json-rpc",
"alloy-network", "alloy-network",
"alloy-provider", "alloy-provider",
"alloy-pubsub", "alloy-pubsub",
@@ -105,9 +104,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-consensus" name = "alloy-consensus"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7ea09cffa9ad82f6404e6ab415ea0c41a7674c0f2e2e689cb8683f772b5940d" checksum = "59094911f05dbff1cf5b29046a00ef26452eccc8d47136d50a47c0cf22f00c85"
dependencies = [ dependencies = [
"alloy-eips", "alloy-eips",
"alloy-primitives", "alloy-primitives",
@@ -116,7 +115,6 @@ dependencies = [
"alloy-trie", "alloy-trie",
"alloy-tx-macros", "alloy-tx-macros",
"auto_impl", "auto_impl",
"borsh",
"c-kzg", "c-kzg",
"derive_more 2.0.1", "derive_more 2.0.1",
"either", "either",
@@ -132,9 +130,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-consensus-any" name = "alloy-consensus-any"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8aafa1f0ddb5cbb6cba6b10e8fa6e31f8c5d5c22e262b30a5d2fa9d336c3b637" checksum = "903cb8f728107ca27c816546f15be38c688df3c381d7bd1a4a9f215effc1ddb4"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips", "alloy-eips",
@@ -146,9 +144,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-contract" name = "alloy-contract"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "398c81368b864fdea950071a00b298c22b21506fed1ed8abc7f2902727f987f1" checksum = "03df5cb3b428ac96b386ad64c11d5c6e87a5505682cf1fbd6f8f773e9eda04f6"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-dyn-abi", "alloy-dyn-abi",
@@ -169,9 +167,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-core" name = "alloy-core"
version = "1.5.2" version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d4087016b0896051dd3d03e0bedda2f4d4d1689af8addc8450288c63a9e5f68" checksum = "ad31216895d27d307369daa1393f5850b50bbbd372478a9fa951c095c210627e"
dependencies = [ dependencies = [
"alloy-dyn-abi", "alloy-dyn-abi",
"alloy-json-abi", "alloy-json-abi",
@@ -182,9 +180,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-dyn-abi" name = "alloy-dyn-abi"
version = "1.5.2" version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "369f5707b958927176265e8a58627fc6195e5dfa5c55689396e68b241b3a72e6" checksum = "7b95b3deca680efc7e9cba781f1a1db352fa1ea50e6384a514944dcf4419e652"
dependencies = [ dependencies = [
"alloy-json-abi", "alloy-json-abi",
"alloy-primitives", "alloy-primitives",
@@ -211,35 +209,32 @@ dependencies = [
[[package]] [[package]]
name = "alloy-eip2930" name = "alloy-eip2930"
version = "0.2.3" version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9441120fa82df73e8959ae0e4ab8ade03de2aaae61be313fbf5746277847ce25" checksum = "7b82752a889170df67bbb36d42ca63c531eb16274f0d7299ae2a680facba17bd"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"alloy-rlp", "alloy-rlp",
"borsh",
"serde", "serde",
] ]
[[package]] [[package]]
name = "alloy-eip7702" name = "alloy-eip7702"
version = "0.6.3" version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2919c5a56a1007492da313e7a3b6d45ef5edc5d33416fdec63c0d7a2702a0d20" checksum = "9d4769c6ffddca380b0070d71c8b7f30bed375543fe76bb2f74ec0acf4b7cd16"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"alloy-rlp", "alloy-rlp",
"borsh",
"k256",
"serde", "serde",
"thiserror 2.0.12", "thiserror 2.0.12",
] ]
[[package]] [[package]]
name = "alloy-eips" name = "alloy-eips"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "691fed81bbafefae0f5a6cedd837ebb3fade46e7d91c5b67a463af12ecf5b11a" checksum = "ac7f1c9a1ccc7f3e03c36976455751a6166a4f0d2d2c530c3f87dfe7d0cdc836"
dependencies = [ dependencies = [
"alloy-eip2124", "alloy-eip2124",
"alloy-eip2930", "alloy-eip2930",
@@ -248,7 +243,6 @@ dependencies = [
"alloy-rlp", "alloy-rlp",
"alloy-serde", "alloy-serde",
"auto_impl", "auto_impl",
"borsh",
"c-kzg", "c-kzg",
"derive_more 2.0.1", "derive_more 2.0.1",
"either", "either",
@@ -260,24 +254,23 @@ dependencies = [
[[package]] [[package]]
name = "alloy-genesis" name = "alloy-genesis"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf91e325928dfffe90c769c2c758cc6e9ba35331c6e984310fe8276548df4a9e" checksum = "1421f6c9d15e5b86afbfe5865ca84dea3b9f77173a0963c1a2ee4e626320ada9"
dependencies = [ dependencies = [
"alloy-eips", "alloy-eips",
"alloy-primitives", "alloy-primitives",
"alloy-serde", "alloy-serde",
"alloy-trie", "alloy-trie",
"borsh",
"serde", "serde",
"serde_with", "serde_with",
] ]
[[package]] [[package]]
name = "alloy-json-abi" name = "alloy-json-abi"
version = "1.5.2" version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "84e3cf01219c966f95a460c95f1d4c30e12f6c18150c21a30b768af2a2a29142" checksum = "15516116086325c157c18261d768a20677f0f699348000ed391d4ad0dcb82530"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"alloy-sol-type-parser", "alloy-sol-type-parser",
@@ -287,9 +280,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-json-rpc" name = "alloy-json-rpc"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8618cd8431d82d21ed98c300b6072f73fe925dff73b548aa2d4573b5a8d3ca91" checksum = "65f763621707fa09cece30b73ecc607eb43fd7a72451fe3b46f645b905086926"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"alloy-sol-types", "alloy-sol-types",
@@ -302,9 +295,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-network" name = "alloy-network"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "390641d0e7e51d5d39b905be654ef391a89d62b9e6d3a74fd931b4df26daae20" checksum = "2f59a869fa4b4c3a7f08b1c8cb79aec61c29febe6e24a24fe0fcfded8a9b5703"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-consensus-any", "alloy-consensus-any",
@@ -328,9 +321,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-network-primitives" name = "alloy-network-primitives"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9badd9de9f310f0c17602c642c043eee40033c0651f45809189e411f6b166e0f" checksum = "46e9374c667c95c41177602ebe6f6a2edd455193844f011d973d374b65501b38"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips", "alloy-eips",
@@ -341,18 +334,18 @@ dependencies = [
[[package]] [[package]]
name = "alloy-primitives" name = "alloy-primitives"
version = "1.5.2" version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6a0fb18dd5fb43ec5f0f6a20be1ce0287c79825827de5744afaa6c957737c33" checksum = "6177ed26655d4e84e00b65cb494d4e0b8830e7cae7ef5d63087d445a2600fb55"
dependencies = [ dependencies = [
"alloy-rlp", "alloy-rlp",
"bytes", "bytes",
"cfg-if", "cfg-if",
"const-hex", "const-hex",
"derive_more 2.0.1", "derive_more 2.0.1",
"foldhash 0.2.0", "foldhash",
"getrandom 0.3.3", "getrandom 0.3.3",
"hashbrown 0.16.1", "hashbrown 0.15.3",
"indexmap 2.10.0", "indexmap 2.10.0",
"itoa", "itoa",
"k256", "k256",
@@ -360,7 +353,6 @@ dependencies = [
"paste", "paste",
"proptest", "proptest",
"rand 0.9.2", "rand 0.9.2",
"rapidhash",
"ruint", "ruint",
"rustc-hash", "rustc-hash",
"serde", "serde",
@@ -370,9 +362,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-provider" name = "alloy-provider"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b7dcf6452993e31ea728b9fc316ebe4e4e3a820c094f2aad55646041ee812a0" checksum = "77818b7348bd5486491a5297579dbfe5f706a81f8e1f5976393025f1e22a7c7d"
dependencies = [ dependencies = [
"alloy-chains", "alloy-chains",
"alloy-consensus", "alloy-consensus",
@@ -383,11 +375,9 @@ dependencies = [
"alloy-primitives", "alloy-primitives",
"alloy-pubsub", "alloy-pubsub",
"alloy-rpc-client", "alloy-rpc-client",
"alloy-rpc-types-anvil",
"alloy-rpc-types-debug", "alloy-rpc-types-debug",
"alloy-rpc-types-eth", "alloy-rpc-types-eth",
"alloy-rpc-types-trace", "alloy-rpc-types-trace",
"alloy-rpc-types-txpool",
"alloy-signer", "alloy-signer",
"alloy-sol-types", "alloy-sol-types",
"alloy-transport", "alloy-transport",
@@ -401,7 +391,7 @@ dependencies = [
"either", "either",
"futures", "futures",
"futures-utils-wasm", "futures-utils-wasm",
"lru 0.16.3", "lru 0.13.0",
"parking_lot", "parking_lot",
"pin-project", "pin-project",
"reqwest", "reqwest",
@@ -416,9 +406,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-pubsub" name = "alloy-pubsub"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "040dabce173e246b9522cf189db8e383c811b89cf6bd07a6ab952ec3b822a1e6" checksum = "249b45103a66c9ad60ad8176b076106d03a2399a37f0ee7b0e03692e6b354cb9"
dependencies = [ dependencies = [
"alloy-json-rpc", "alloy-json-rpc",
"alloy-primitives", "alloy-primitives",
@@ -460,9 +450,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-rpc-client" name = "alloy-rpc-client"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce4a28b1302733f565a2900a0d7cb3db94ffd1dd58ad7ebf5b0ec302e868ed1e" checksum = "2430d5623e428dd012c6c2156ae40b7fe638d6fca255e3244e0fba51fa698e93"
dependencies = [ dependencies = [
"alloy-json-rpc", "alloy-json-rpc",
"alloy-primitives", "alloy-primitives",
@@ -486,38 +476,23 @@ dependencies = [
[[package]] [[package]]
name = "alloy-rpc-types" name = "alloy-rpc-types"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1408505e2a41c71f7b3f83ee52e5ecd0f2a6f2db98046d0a4defb9f85a007a9e" checksum = "e9e131624d08a25cfc40557041e7dc42e1182fa1153e7592d120f769a1edce56"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"alloy-rpc-types-anvil",
"alloy-rpc-types-debug", "alloy-rpc-types-debug",
"alloy-rpc-types-engine",
"alloy-rpc-types-eth", "alloy-rpc-types-eth",
"alloy-rpc-types-trace", "alloy-rpc-types-trace",
"alloy-rpc-types-txpool",
"alloy-serde",
"serde",
]
[[package]]
name = "alloy-rpc-types-anvil"
version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "456a35438dc5631320a747466a0366bf21b03494fc2e33ac903c128504a68edf"
dependencies = [
"alloy-primitives",
"alloy-rpc-types-eth",
"alloy-serde", "alloy-serde",
"serde", "serde",
] ]
[[package]] [[package]]
name = "alloy-rpc-types-any" name = "alloy-rpc-types-any"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6792425a4a8e74be38e8785f90f497f8f325188f40f13c168a220310fd421d12" checksum = "07429a1099cd17227abcddb91b5e38c960aaeb02a6967467f5bb561fbe716ac6"
dependencies = [ dependencies = [
"alloy-consensus-any", "alloy-consensus-any",
"alloy-rpc-types-eth", "alloy-rpc-types-eth",
@@ -526,9 +501,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-rpc-types-debug" name = "alloy-rpc-types-debug"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f72b891c28aa7376f7e4468c40d2bdcc1013ab47ceae57a2696e78b0cd1e8341" checksum = "aeff305b7d10cc1c888456d023e7bb8a5ea82e9e42b951e37619b88cc1a1486d"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"derive_more 2.0.1", "derive_more 2.0.1",
@@ -536,28 +511,11 @@ dependencies = [
"serde_with", "serde_with",
] ]
[[package]]
name = "alloy-rpc-types-engine"
version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7bcd9ead89076095806364327a1b18c2215998b6fff5a45f82c658bfbabf2df"
dependencies = [
"alloy-consensus",
"alloy-eips",
"alloy-primitives",
"alloy-rlp",
"alloy-serde",
"derive_more 2.0.1",
"rand 0.8.5",
"serde",
"strum",
]
[[package]] [[package]]
name = "alloy-rpc-types-eth" name = "alloy-rpc-types-eth"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a3b505d6223c88023fb1217ac24eab950e4368f6634405bea3977d34cae6935b" checksum = "db46b0901ee16bbb68d986003c66dcb74a12f9d9b3c44f8e85d51974f2458f0f"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-consensus-any", "alloy-consensus-any",
@@ -576,9 +534,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-rpc-types-trace" name = "alloy-rpc-types-trace"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ca8db59fa69da9da5bb6b75823c2b07c27b0f626a0f3af72bac32a7c361a418" checksum = "36f10620724bd45f80c79668a8cdbacb6974f860686998abce28f6196ae79444"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"alloy-rpc-types-eth", "alloy-rpc-types-eth",
@@ -588,23 +546,11 @@ dependencies = [
"thiserror 2.0.12", "thiserror 2.0.12",
] ]
[[package]]
name = "alloy-rpc-types-txpool"
version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e14194567368b8c8b7aeef470831bbe90cc8b12ef5f48b18acdda9cf20070ff1"
dependencies = [
"alloy-primitives",
"alloy-rpc-types-eth",
"alloy-serde",
"serde",
]
[[package]] [[package]]
name = "alloy-serde" name = "alloy-serde"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75a755a3cc0297683c2879bbfe2ff22778f35068f07444f0b52b5b87570142b6" checksum = "5413814be7a22fbc81e0f04a2401fcc3eb25e56fd53b04683e8acecc6e1fe01b"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"serde", "serde",
@@ -613,9 +559,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-signer" name = "alloy-signer"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d73afcd1fb2d851bf4ba67504a951b73231596f819cc814f50d11126db7ac1b" checksum = "53410a18a61916e2c073a6519499514e027b01e77eeaf96acd1df7cf96ef6bb2"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"async-trait", "async-trait",
@@ -628,9 +574,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-signer-local" name = "alloy-signer-local"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "807b043936012acc788c96cba06b8580609d124bb105dc470a1617051cc4aa63" checksum = "e6006c4cbfa5d08cadec1fcabea6cb56dc585a30a9fce40bcf81e307d6a71c8e"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-network", "alloy-network",
@@ -644,9 +590,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-sol-macro" name = "alloy-sol-macro"
version = "1.5.2" version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09eb18ce0df92b4277291bbaa0ed70545d78b02948df756bbd3d6214bf39a218" checksum = "a14f21d053aea4c6630687c2f4ad614bed4c81e14737a9b904798b24f30ea849"
dependencies = [ dependencies = [
"alloy-sol-macro-expander", "alloy-sol-macro-expander",
"alloy-sol-macro-input", "alloy-sol-macro-input",
@@ -658,9 +604,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-sol-macro-expander" name = "alloy-sol-macro-expander"
version = "1.5.2" version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95d9fa2daf21f59aa546d549943f10b5cce1ae59986774019fbedae834ffe01b" checksum = "34d99282e7c9ef14eb62727981a985a01869e586d1dec729d3bb33679094c100"
dependencies = [ dependencies = [
"alloy-json-abi", "alloy-json-abi",
"alloy-sol-macro-input", "alloy-sol-macro-input",
@@ -677,9 +623,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-sol-macro-input" name = "alloy-sol-macro-input"
version = "1.5.2" version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9396007fe69c26ee118a19f4dee1f5d1d6be186ea75b3881adf16d87f8444686" checksum = "eda029f955b78e493360ee1d7bd11e1ab9f2a220a5715449babc79d6d0a01105"
dependencies = [ dependencies = [
"alloy-json-abi", "alloy-json-abi",
"const-hex", "const-hex",
@@ -695,9 +641,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-sol-type-parser" name = "alloy-sol-type-parser"
version = "1.5.2" version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af67a0b0dcebe14244fc92002cd8d96ecbf65db4639d479f5fcd5805755a4c27" checksum = "10db1bd7baa35bc8d4a1b07efbf734e73e5ba09f2580fb8cee3483a36087ceb2"
dependencies = [ dependencies = [
"serde", "serde",
"winnow", "winnow",
@@ -705,9 +651,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-sol-types" name = "alloy-sol-types"
version = "1.5.2" version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09aeea64f09a7483bdcd4193634c7e5cf9fd7775ee767585270cd8ce2d69dc95" checksum = "58377025a47d8b8426b3e4846a251f2c1991033b27f517aade368146f6ab1dfe"
dependencies = [ dependencies = [
"alloy-json-abi", "alloy-json-abi",
"alloy-primitives", "alloy-primitives",
@@ -717,11 +663,12 @@ dependencies = [
[[package]] [[package]]
name = "alloy-transport" name = "alloy-transport"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b84a605484a03959436e5bea194e6d62f77c3caef750196b4b4f1c8d23254df" checksum = "d94ee404368a3d9910dfe61b203e888c6b0e151a50e147f95da8baff9f9c7763"
dependencies = [ dependencies = [
"alloy-json-rpc", "alloy-json-rpc",
"alloy-primitives",
"auto_impl", "auto_impl",
"base64 0.22.1", "base64 0.22.1",
"derive_more 2.0.1", "derive_more 2.0.1",
@@ -740,9 +687,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-transport-http" name = "alloy-transport-http"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a400ad5b73590a099111481d4a66a2ca1266ebc85972a844958caf42bfdd37d" checksum = "a2f8a6338d594f6c6481292215ee8f2fd7b986c80aba23f3f44e761a8658de78"
dependencies = [ dependencies = [
"alloy-json-rpc", "alloy-json-rpc",
"alloy-transport", "alloy-transport",
@@ -755,9 +702,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-transport-ipc" name = "alloy-transport-ipc"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74adc2ef0cb8c2cad4de2044afec2d4028061bc016148a251704dc204f259477" checksum = "17a37a8ca18006fa0a58c7489645619ff58cfa073f2b29c4e052c9bd114b123a"
dependencies = [ dependencies = [
"alloy-json-rpc", "alloy-json-rpc",
"alloy-pubsub", "alloy-pubsub",
@@ -775,14 +722,15 @@ dependencies = [
[[package]] [[package]]
name = "alloy-transport-ws" name = "alloy-transport-ws"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2c1672b97fef0057f3ca268507fb4f1bc59497531603f39ccaf47cc1e5b9cb4" checksum = "679b0122b7bca9d4dc5eb2c0549677a3c53153f6e232f23f4b3ba5575f74ebde"
dependencies = [ dependencies = [
"alloy-pubsub", "alloy-pubsub",
"alloy-transport", "alloy-transport",
"futures", "futures",
"http 1.3.1", "http 1.3.1",
"rustls 0.23.27",
"serde_json", "serde_json",
"tokio", "tokio",
"tokio-tungstenite 0.26.2", "tokio-tungstenite 0.26.2",
@@ -792,9 +740,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-trie" name = "alloy-trie"
version = "0.9.3" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "428aa0f0e0658ff091f8f667c406e034b431cb10abd39de4f507520968acc499" checksum = "bada1fc392a33665de0dc50d401a3701b62583c655e3522a323490a5da016962"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"alloy-rlp", "alloy-rlp",
@@ -808,10 +756,11 @@ dependencies = [
[[package]] [[package]]
name = "alloy-tx-macros" name = "alloy-tx-macros"
version = "1.4.1" version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f17272de4df6b8b59889b264f0306eba47a69f23f57f1c08f1366a4617b48c30" checksum = "e64c09ec565a90ed8390d82aa08cd3b22e492321b96cb4a3d4f58414683c9e2f"
dependencies = [ dependencies = [
"alloy-primitives",
"darling 0.21.3", "darling 0.21.3",
"proc-macro2", "proc-macro2",
"quote", "quote",
@@ -1777,29 +1726,6 @@ dependencies = [
"zeroize", "zeroize",
] ]
[[package]]
name = "borsh"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d1da5ab77c1437701eeff7c88d968729e7766172279eab0676857b3d63af7a6f"
dependencies = [
"borsh-derive",
"cfg_aliases",
]
[[package]]
name = "borsh-derive"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0686c856aa6aac0c4498f936d7d6a02df690f614c03e4d906d1018062b5c5e2c"
dependencies = [
"once_cell",
"proc-macro-crate",
"proc-macro2",
"quote",
"syn 2.0.101",
]
[[package]] [[package]]
name = "bounded-collections" name = "bounded-collections"
version = "0.2.4" version = "0.2.4"
@@ -2913,12 +2839,6 @@ version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2"
[[package]]
name = "foldhash"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb"
[[package]] [[package]]
name = "foreign-types" name = "foreign-types"
version = "0.3.2" version = "0.3.2"
@@ -3345,23 +3265,10 @@ checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3"
dependencies = [ dependencies = [
"allocator-api2", "allocator-api2",
"equivalent", "equivalent",
"foldhash 0.1.5", "foldhash",
"serde", "serde",
] ]
[[package]]
name = "hashbrown"
version = "0.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100"
dependencies = [
"allocator-api2",
"equivalent",
"foldhash 0.2.0",
"serde",
"serde_core",
]
[[package]] [[package]]
name = "heck" name = "heck"
version = "0.5.0" version = "0.5.0"
@@ -3599,7 +3506,6 @@ dependencies = [
"tokio", "tokio",
"tokio-rustls 0.26.2", "tokio-rustls 0.26.2",
"tower-service", "tower-service",
"webpki-roots 1.0.2",
] ]
[[package]] [[package]]
@@ -4522,19 +4428,13 @@ dependencies = [
[[package]] [[package]]
name = "lru" name = "lru"
version = "0.16.3" version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1dc47f592c06f33f8e3aea9591776ec7c9f9e4124778ff8a3c3b87159f7e593" checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465"
dependencies = [ dependencies = [
"hashbrown 0.16.1", "hashbrown 0.15.3",
] ]
[[package]]
name = "lru-slab"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154"
[[package]] [[package]]
name = "macro-string" name = "macro-string"
version = "0.1.4" version = "0.1.4"
@@ -5425,61 +5325,6 @@ dependencies = [
"byteorder", "byteorder",
] ]
[[package]]
name = "quinn"
version = "0.11.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20"
dependencies = [
"bytes",
"cfg_aliases",
"pin-project-lite",
"quinn-proto",
"quinn-udp",
"rustc-hash",
"rustls 0.23.27",
"socket2 0.6.0",
"thiserror 2.0.12",
"tokio",
"tracing",
"web-time",
]
[[package]]
name = "quinn-proto"
version = "0.11.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31"
dependencies = [
"bytes",
"getrandom 0.3.3",
"lru-slab",
"rand 0.9.2",
"ring",
"rustc-hash",
"rustls 0.23.27",
"rustls-pki-types",
"slab",
"thiserror 2.0.12",
"tinyvec",
"tracing",
"web-time",
]
[[package]]
name = "quinn-udp"
version = "0.5.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd"
dependencies = [
"cfg_aliases",
"libc",
"once_cell",
"socket2 0.6.0",
"tracing",
"windows-sys 0.59.0",
]
[[package]] [[package]]
name = "quote" name = "quote"
version = "1.0.40" version = "1.0.40"
@@ -5572,16 +5417,6 @@ dependencies = [
"rand_core 0.6.4", "rand_core 0.6.4",
] ]
[[package]]
name = "rapidhash"
version = "4.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d8b5b858a440a0bc02625b62dd95131b9201aa9f69f411195dd4a7cfb1de3d7"
dependencies = [
"rand 0.9.2",
"rustversion",
]
[[package]] [[package]]
name = "rayon" name = "rayon"
version = "1.10.0" version = "1.10.0"
@@ -5719,8 +5554,6 @@ dependencies = [
"once_cell", "once_cell",
"percent-encoding", "percent-encoding",
"pin-project-lite", "pin-project-lite",
"quinn",
"rustls 0.23.27",
"rustls-pki-types", "rustls-pki-types",
"serde", "serde",
"serde_json", "serde_json",
@@ -5728,7 +5561,6 @@ dependencies = [
"sync_wrapper", "sync_wrapper",
"tokio", "tokio",
"tokio-native-tls", "tokio-native-tls",
"tokio-rustls 0.26.2",
"tower 0.5.2", "tower 0.5.2",
"tower-http 0.6.4", "tower-http 0.6.4",
"tower-service", "tower-service",
@@ -5736,7 +5568,6 @@ dependencies = [
"wasm-bindgen", "wasm-bindgen",
"wasm-bindgen-futures", "wasm-bindgen-futures",
"web-sys", "web-sys",
"webpki-roots 1.0.2",
] ]
[[package]] [[package]]
@@ -5764,6 +5595,7 @@ dependencies = [
"semver 1.0.26", "semver 1.0.26",
"serde", "serde",
"strum", "strum",
"tokio",
] ]
[[package]] [[package]]
@@ -5910,18 +5742,6 @@ dependencies = [
"tracing", "tracing",
] ]
[[package]]
name = "revive-dt-report-processor"
version = "0.1.0"
dependencies = [
"anyhow",
"clap",
"revive-dt-common",
"revive-dt-report",
"serde",
"serde_json",
]
[[package]] [[package]]
name = "revive-dt-solc-binaries" name = "revive-dt-solc-binaries"
version = "0.1.0" version = "0.1.0"
@@ -5986,14 +5806,13 @@ dependencies = [
[[package]] [[package]]
name = "ruint" name = "ruint"
version = "1.17.2" version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c141e807189ad38a07276942c6623032d3753c8859c146104ac2e4d68865945a" checksum = "11256b5fe8c68f56ac6f39ef0720e592f33d2367a4782740d9c9142e889c7fb4"
dependencies = [ dependencies = [
"alloy-rlp", "alloy-rlp",
"ark-ff 0.3.0", "ark-ff 0.3.0",
"ark-ff 0.4.2", "ark-ff 0.4.2",
"ark-ff 0.5.0",
"bytes", "bytes",
"fastrlp 0.3.1", "fastrlp 0.3.1",
"fastrlp 0.4.0", "fastrlp 0.4.0",
@@ -6007,7 +5826,7 @@ dependencies = [
"rand 0.9.2", "rand 0.9.2",
"rlp", "rlp",
"ruint-macro", "ruint-macro",
"serde_core", "serde",
"valuable", "valuable",
"zeroize", "zeroize",
] ]
@@ -6136,7 +5955,6 @@ version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79"
dependencies = [ dependencies = [
"web-time",
"zeroize", "zeroize",
] ]
@@ -7920,9 +7738,9 @@ dependencies = [
[[package]] [[package]]
name = "syn-solidity" name = "syn-solidity"
version = "1.5.2" version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f92d01b5de07eaf324f7fca61cc6bd3d82bbc1de5b6c963e6fe79e86f36580d" checksum = "b9ac494e7266fcdd2ad80bf4375d55d27a117ea5c866c26d0e97fe5b3caeeb75"
dependencies = [ dependencies = [
"paste", "paste",
"proc-macro2", "proc-macro2",
+21 -7
View File
@@ -21,9 +21,7 @@ revive-dt-node-interaction = { version = "0.1.0", path = "crates/node-interactio
revive-dt-node-pool = { version = "0.1.0", path = "crates/node-pool" } revive-dt-node-pool = { version = "0.1.0", path = "crates/node-pool" }
revive-dt-report = { version = "0.1.0", path = "crates/report" } revive-dt-report = { version = "0.1.0", path = "crates/report" }
revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" } revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
revive-dt-report-processor = { version = "0.1.0", path = "crates/report-processor" }
alloy = { version = "1.4.1", features = ["full", "genesis", "json-rpc"] }
ansi_term = "0.12.1" ansi_term = "0.12.1"
anyhow = "1.0" anyhow = "1.0"
bson = { version = "2.15.0" } bson = { version = "2.15.0" }
@@ -80,14 +78,30 @@ revive-differential = { git = "https://github.com/paritytech/revive", rev = "338
zombienet-sdk = { git = "https://github.com/paritytech/zombienet-sdk.git", rev = "891f6554354ce466abd496366dbf8b4f82141241" } zombienet-sdk = { git = "https://github.com/paritytech/zombienet-sdk.git", rev = "891f6554354ce466abd496366dbf8b4f82141241" }
[workspace.dependencies.alloy]
version = "1.0.37"
default-features = false
features = [
"json-abi",
"providers",
"provider-ws",
"provider-ipc",
"provider-http",
"provider-debug-api",
"reqwest",
"rpc-types",
"signer-local",
"std",
"network",
"serde",
"rpc-types-eth",
"genesis",
"sol-types",
]
[profile.bench] [profile.bench]
inherits = "release" inherits = "release"
codegen-units = 1
lto = true lto = true
[profile.production]
inherits = "release"
codegen-units = 1 codegen-units = 1
lto = true
[workspace.lints.clippy] [workspace.lints.clippy]
+1
View File
@@ -19,6 +19,7 @@ semver = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
schemars = { workspace = true } schemars = { workspace = true }
strum = { workspace = true } strum = { workspace = true }
tokio = { workspace = true, default-features = false, features = ["time"] }
[lints] [lints]
workspace = true workspace = true
+3
View File
@@ -0,0 +1,3 @@
mod poll;
pub use poll::*;
+72
View File
@@ -0,0 +1,72 @@
use std::ops::ControlFlow;
use std::time::Duration;
use anyhow::{Context as _, Result, anyhow};
const EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION: Duration = Duration::from_secs(60);
/// A function that polls for a fallible future for some period of time and errors if it fails to
/// get a result after polling.
///
/// Given a future that returns a [`Result<ControlFlow<O, ()>>`], this function calls the future
/// repeatedly (with some wait period) until the future returns a [`ControlFlow::Break`] or until it
/// returns an [`Err`] in which case the function stops polling and returns the error.
///
/// If the future keeps returning [`ControlFlow::Continue`] and fails to return a [`Break`] within
/// the permitted polling duration then this function returns an [`Err`]
///
/// [`Break`]: ControlFlow::Break
/// [`Continue`]: ControlFlow::Continue
pub async fn poll<F, O>(
polling_duration: Duration,
polling_wait_behavior: PollingWaitBehavior,
mut future: impl FnMut() -> F,
) -> Result<O>
where
F: Future<Output = Result<ControlFlow<O, ()>>>,
{
let mut retries = 0;
let mut total_wait_duration = Duration::ZERO;
let max_allowed_wait_duration = polling_duration;
loop {
if total_wait_duration >= max_allowed_wait_duration {
break Err(anyhow!(
"Polling failed after {} retries and a total of {:?} of wait time",
retries,
total_wait_duration
));
}
match future()
.await
.context("Polled future returned an error during polling loop")?
{
ControlFlow::Continue(()) => {
let next_wait_duration = match polling_wait_behavior {
PollingWaitBehavior::Constant(duration) => duration,
PollingWaitBehavior::ExponentialBackoff => {
Duration::from_secs(2u64.pow(retries))
.min(EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION)
}
};
let next_wait_duration =
next_wait_duration.min(max_allowed_wait_duration - total_wait_duration);
total_wait_duration += next_wait_duration;
retries += 1;
tokio::time::sleep(next_wait_duration).await;
}
ControlFlow::Break(output) => {
break Ok(output);
}
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
pub enum PollingWaitBehavior {
Constant(Duration),
#[default]
ExponentialBackoff,
}
+1
View File
@@ -3,6 +3,7 @@
pub mod cached_fs; pub mod cached_fs;
pub mod fs; pub mod fs;
pub mod futures;
pub mod iterators; pub mod iterators;
pub mod macros; pub mod macros;
pub mod types; pub mod types;
-8
View File
@@ -39,12 +39,6 @@ pub enum PlatformIdentifier {
ZombienetPolkavmResolc, ZombienetPolkavmResolc,
/// A zombienet based Substrate/Polkadot node with the REVM backend with the solc compiler. /// A zombienet based Substrate/Polkadot node with the REVM backend with the solc compiler.
ZombienetRevmSolc, ZombienetRevmSolc,
/// A polkadot-omni-chain based node with a custom runtime with the PolkaVM backend and the
/// resolc compiler.
PolkadotOmniNodePolkavmResolc,
/// A polkadot-omni-chain based node with a custom runtime with the REVM backend and the solc
/// compiler.
PolkadotOmniNodeRevmSolc,
} }
/// An enum of the platform identifiers of all of the platforms supported by this framework. /// An enum of the platform identifiers of all of the platforms supported by this framework.
@@ -101,8 +95,6 @@ pub enum NodeIdentifier {
ReviveDevNode, ReviveDevNode,
/// A zombienet spawned nodes /// A zombienet spawned nodes
Zombienet, Zombienet,
/// The polkadot-omni-node.
PolkadotOmniNode,
} }
/// An enum representing the identifiers of the supported VMs. /// An enum representing the identifiers of the supported VMs.
-12
View File
@@ -23,18 +23,6 @@ pub struct Mode {
pub version: Option<semver::VersionReq>, pub version: Option<semver::VersionReq>,
} }
impl Ord for Mode {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.to_string().cmp(&other.to_string())
}
}
impl PartialOrd for Mode {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Display for Mode { impl Display for Mode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.pipeline.fmt(f)?; self.pipeline.fmt(f)?;
@@ -1,15 +1,10 @@
use std::{ use std::{fmt::Display, path::PathBuf, str::FromStr};
fmt::Display,
path::{Path, PathBuf},
str::FromStr,
};
use anyhow::{Context as _, bail}; use anyhow::{Context as _, bail};
use serde::{Deserialize, Serialize};
use crate::types::Mode; use crate::types::Mode;
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum ParsedTestSpecifier { pub enum ParsedTestSpecifier {
/// All of the test cases in the file should be ran across all of the specified modes /// All of the test cases in the file should be ran across all of the specified modes
FileOrDirectory { FileOrDirectory {
@@ -39,22 +34,6 @@ pub enum ParsedTestSpecifier {
}, },
} }
impl ParsedTestSpecifier {
pub fn metadata_path(&self) -> &Path {
match self {
ParsedTestSpecifier::FileOrDirectory {
metadata_or_directory_file_path: metadata_file_path,
}
| ParsedTestSpecifier::Case {
metadata_file_path, ..
}
| ParsedTestSpecifier::CaseWithMode {
metadata_file_path, ..
} => metadata_file_path,
}
}
}
impl Display for ParsedTestSpecifier { impl Display for ParsedTestSpecifier {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self { match self {
@@ -152,22 +131,3 @@ impl TryFrom<&str> for ParsedTestSpecifier {
value.parse() value.parse()
} }
} }
impl Serialize for ParsedTestSpecifier {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.to_string().serialize(serializer)
}
}
impl<'de> Deserialize<'de> for ParsedTestSpecifier {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let string = String::deserialize(deserializer)?;
string.parse().map_err(serde::de::Error::custom)
}
}
+8 -12
View File
@@ -208,18 +208,14 @@ impl SolidityCompiler for Resolc {
anyhow::bail!("Compilation failed with an error: {message}"); anyhow::bail!("Compilation failed with an error: {message}");
} }
let parsed: SolcStandardJsonOutput = { let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout)
let mut deserializer = serde_json::Deserializer::from_slice(&stdout); .map_err(|e| {
deserializer.disable_recursion_limit(); anyhow::anyhow!(
serde::de::Deserialize::deserialize(&mut deserializer) "failed to parse resolc JSON output: {e}\nstderr: {}",
.map_err(|e| { String::from_utf8_lossy(&stderr)
anyhow::anyhow!( )
"failed to parse resolc JSON output: {e}\nstderr: {}", })
String::from_utf8_lossy(&stderr) .context("Failed to parse resolc standard JSON output")?;
)
})
.context("Failed to parse resolc standard JSON output")?
};
tracing::debug!( tracing::debug!(
output = %serde_json::to_string(&parsed).unwrap(), output = %serde_json::to_string(&parsed).unwrap(),
-111
View File
@@ -143,17 +143,6 @@ impl AsRef<ReviveDevNodeConfiguration> for Context {
} }
} }
impl AsRef<PolkadotOmnichainNodeConfiguration> for Context {
fn as_ref(&self) -> &PolkadotOmnichainNodeConfiguration {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportGenesis(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
}
impl AsRef<EthRpcConfiguration> for Context { impl AsRef<EthRpcConfiguration> for Context {
fn as_ref(&self) -> &EthRpcConfiguration { fn as_ref(&self) -> &EthRpcConfiguration {
match self { match self {
@@ -239,7 +228,6 @@ pub struct TestExecutionContext {
#[arg( #[arg(
short = 'p', short = 'p',
long = "platform", long = "platform",
id = "platforms",
default_values = ["geth-evm-solc", "revive-dev-node-polkavm-resolc"] default_values = ["geth-evm-solc", "revive-dev-node-polkavm-resolc"]
)] )]
pub platforms: Vec<PlatformIdentifier>, pub platforms: Vec<PlatformIdentifier>,
@@ -289,10 +277,6 @@ pub struct TestExecutionContext {
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")] #[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
pub revive_dev_node_configuration: ReviveDevNodeConfiguration, pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
/// Configuration parameters for the Polkadot Omnichain Node.
#[clap(flatten, next_help_heading = "Polkadot Omnichain Node Configuration")]
pub polkadot_omnichain_node_configuration: PolkadotOmnichainNodeConfiguration,
/// Configuration parameters for the Eth Rpc. /// Configuration parameters for the Eth Rpc.
#[clap(flatten, next_help_heading = "Eth RPC Configuration")] #[clap(flatten, next_help_heading = "Eth RPC Configuration")]
pub eth_rpc_configuration: EthRpcConfiguration, pub eth_rpc_configuration: EthRpcConfiguration,
@@ -391,23 +375,6 @@ pub struct BenchmarkingContext {
#[arg(short = 'r', long = "default-repetition-count", default_value_t = 1000)] #[arg(short = 'r', long = "default-repetition-count", default_value_t = 1000)]
pub default_repetition_count: usize, pub default_repetition_count: usize,
/// This transaction controls whether the benchmarking driver should await for transactions to
/// be included in a block before moving on to the next transaction in the sequence or not.
///
/// This behavior is useful in certain cases and not so useful in others. For example, in some
/// repetition block if there's some kind of relationship between txs n and n+1 (for example a
/// mint then a transfer) then you would want to wait for the minting to happen and then move on
/// to the transfers. On the other hand, if there's no relationship between the transactions n
/// and n+1 (e.g., mint and another mint of a different token) then awaiting the first mint to
/// be included in a block might not seem necessary.
///
/// By default, this behavior is set to false to allow the benchmarking framework to saturate
/// the node's mempool as quickly as possible. However, as explained above, there are cases
/// where it's needed and certain workloads where failure to provide this argument would lead to
/// inaccurate results.
#[arg(long)]
pub await_transaction_inclusion: bool,
/// Configuration parameters for the corpus files to use. /// Configuration parameters for the corpus files to use.
#[clap(flatten, next_help_heading = "Corpus Configuration")] #[clap(flatten, next_help_heading = "Corpus Configuration")]
pub corpus_configuration: CorpusConfiguration, pub corpus_configuration: CorpusConfiguration,
@@ -436,10 +403,6 @@ pub struct BenchmarkingContext {
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")] #[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
pub revive_dev_node_configuration: ReviveDevNodeConfiguration, pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
/// Configuration parameters for the Polkadot Omnichain Node.
#[clap(flatten, next_help_heading = "Polkadot Omnichain Node Configuration")]
pub polkadot_omnichain_node_configuration: PolkadotOmnichainNodeConfiguration,
/// Configuration parameters for the Eth Rpc. /// Configuration parameters for the Eth Rpc.
#[clap(flatten, next_help_heading = "Eth RPC Configuration")] #[clap(flatten, next_help_heading = "Eth RPC Configuration")]
pub eth_rpc_configuration: EthRpcConfiguration, pub eth_rpc_configuration: EthRpcConfiguration,
@@ -518,10 +481,6 @@ pub struct ExportGenesisContext {
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")] #[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
pub revive_dev_node_configuration: ReviveDevNodeConfiguration, pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
/// Configuration parameters for the Polkadot Omnichain Node.
#[clap(flatten, next_help_heading = "Polkadot Omnichain Node Configuration")]
pub polkadot_omnichain_node_configuration: PolkadotOmnichainNodeConfiguration,
/// Configuration parameters for the wallet. /// Configuration parameters for the wallet.
#[clap(flatten, next_help_heading = "Wallet Configuration")] #[clap(flatten, next_help_heading = "Wallet Configuration")]
pub wallet_configuration: WalletConfiguration, pub wallet_configuration: WalletConfiguration,
@@ -581,12 +540,6 @@ impl AsRef<ReviveDevNodeConfiguration> for TestExecutionContext {
} }
} }
impl AsRef<PolkadotOmnichainNodeConfiguration> for TestExecutionContext {
fn as_ref(&self) -> &PolkadotOmnichainNodeConfiguration {
&self.polkadot_omnichain_node_configuration
}
}
impl AsRef<EthRpcConfiguration> for TestExecutionContext { impl AsRef<EthRpcConfiguration> for TestExecutionContext {
fn as_ref(&self) -> &EthRpcConfiguration { fn as_ref(&self) -> &EthRpcConfiguration {
&self.eth_rpc_configuration &self.eth_rpc_configuration
@@ -683,12 +636,6 @@ impl AsRef<ReviveDevNodeConfiguration> for BenchmarkingContext {
} }
} }
impl AsRef<PolkadotOmnichainNodeConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &PolkadotOmnichainNodeConfiguration {
&self.polkadot_omnichain_node_configuration
}
}
impl AsRef<EthRpcConfiguration> for BenchmarkingContext { impl AsRef<EthRpcConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &EthRpcConfiguration { fn as_ref(&self) -> &EthRpcConfiguration {
&self.eth_rpc_configuration &self.eth_rpc_configuration
@@ -749,12 +696,6 @@ impl AsRef<ReviveDevNodeConfiguration> for ExportGenesisContext {
} }
} }
impl AsRef<PolkadotOmnichainNodeConfiguration> for ExportGenesisContext {
fn as_ref(&self) -> &PolkadotOmnichainNodeConfiguration {
&self.polkadot_omnichain_node_configuration
}
}
impl AsRef<WalletConfiguration> for ExportGenesisContext { impl AsRef<WalletConfiguration> for ExportGenesisContext {
fn as_ref(&self) -> &WalletConfiguration { fn as_ref(&self) -> &WalletConfiguration {
&self.wallet_configuration &self.wallet_configuration
@@ -911,54 +852,6 @@ pub struct ReviveDevNodeConfiguration {
pub existing_rpc_url: Vec<String>, pub existing_rpc_url: Vec<String>,
} }
/// A set of configuration parameters for the polkadot-omni-node.
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct PolkadotOmnichainNodeConfiguration {
/// Specifies the path of the polkadot-omni-node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the polkadot-omni-node
/// binary that's provided in the user's $PATH.
#[clap(
id = "polkadot-omni-node.path",
long = "polkadot-omni-node.path",
default_value = "polkadot-omni-node"
)]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "polkadot-omni-node.start-timeout-ms",
long = "polkadot-omni-node.start-timeout-ms",
default_value = "90000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
/// Defines how often blocks will be sealed by the node in milliseconds.
#[clap(
id = "polkadot-omni-node.block-time-ms",
long = "polkadot-omni-node.block-time-ms",
default_value = "200",
value_parser = parse_duration
)]
pub block_time: Duration,
/// The path of the chainspec of the chain that we're spawning
#[clap(
id = "polkadot-omni-node.chain-spec-path",
long = "polkadot-omni-node.chain-spec-path"
)]
pub chain_spec_path: Option<PathBuf>,
/// The ID of the parachain that the polkadot-omni-node will spawn. This argument is required if
/// the polkadot-omni-node is one of the selected platforms for running the tests or benchmarks.
#[clap(
id = "polkadot-omni-node.parachain-id",
long = "polkadot-omni-node.parachain-id"
)]
pub parachain_id: Option<usize>,
}
/// A set of configuration parameters for the ETH RPC. /// A set of configuration parameters for the ETH RPC.
#[derive(Clone, Debug, Parser, Serialize, Deserialize)] #[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct EthRpcConfiguration { pub struct EthRpcConfiguration {
@@ -1113,10 +1006,6 @@ pub struct ReportConfiguration {
/// Controls if the compiler output is included in the final report. /// Controls if the compiler output is included in the final report.
#[clap(long = "report.include-compiler-output")] #[clap(long = "report.include-compiler-output")]
pub include_compiler_output: bool, pub include_compiler_output: bool,
/// The filename to use for the report.
#[clap(long = "report.file-name")]
pub file_name: Option<String>,
} }
#[derive(Clone, Debug, Parser, Serialize, Deserialize)] #[derive(Clone, Debug, Parser, Serialize, Deserialize)]
@@ -1,5 +1,6 @@
use std::{ use std::{
collections::HashMap, collections::HashMap,
ops::ControlFlow,
sync::{ sync::{
Arc, Arc,
atomic::{AtomicUsize, Ordering}, atomic::{AtomicUsize, Ordering},
@@ -12,7 +13,6 @@ use alloy::{
json_abi::JsonAbi, json_abi::JsonAbi,
network::{Ethereum, TransactionBuilder}, network::{Ethereum, TransactionBuilder},
primitives::{Address, TxHash, U256}, primitives::{Address, TxHash, U256},
providers::Provider,
rpc::types::{ rpc::types::{
TransactionReceipt, TransactionRequest, TransactionReceipt, TransactionRequest,
trace::geth::{ trace::geth::{
@@ -22,9 +22,12 @@ use alloy::{
}, },
}; };
use anyhow::{Context as _, Result, bail}; use anyhow::{Context as _, Result, bail};
use futures::{FutureExt as _, TryFutureExt}; use futures::TryFutureExt;
use indexmap::IndexMap; use indexmap::IndexMap;
use revive_dt_common::types::PrivateKeyAllocator; use revive_dt_common::{
futures::{PollingWaitBehavior, poll},
types::PrivateKeyAllocator,
};
use revive_dt_format::{ use revive_dt_format::{
metadata::{ContractInstance, ContractPathAndIdent}, metadata::{ContractInstance, ContractPathAndIdent},
steps::{ steps::{
@@ -34,7 +37,7 @@ use revive_dt_format::{
traits::{ResolutionContext, ResolverApi}, traits::{ResolutionContext, ResolverApi},
}; };
use tokio::sync::{Mutex, OnceCell, mpsc::UnboundedSender}; use tokio::sync::{Mutex, OnceCell, mpsc::UnboundedSender};
use tracing::{Span, debug, error, field::display, info, instrument}; use tracing::{Instrument, Span, debug, error, field::display, info, info_span, instrument};
use crate::{ use crate::{
differential_benchmarks::{ExecutionState, WatcherEvent}, differential_benchmarks::{ExecutionState, WatcherEvent},
@@ -70,10 +73,6 @@ pub struct Driver<'a, I> {
/// The number of steps that were executed on the driver. /// The number of steps that were executed on the driver.
steps_executed: usize, steps_executed: usize,
/// This function controls if the driver should wait for transactions to be included in a block
/// or not before proceeding forward.
await_transaction_inclusion: bool,
/// This is the queue of steps that are to be executed by the driver for this test case. Each /// This is the queue of steps that are to be executed by the driver for this test case. Each
/// time `execute_step` is called one of the steps is executed. /// time `execute_step` is called one of the steps is executed.
steps_iterator: I, steps_iterator: I,
@@ -90,7 +89,6 @@ where
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>, private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
cached_compiler: &CachedCompiler<'a>, cached_compiler: &CachedCompiler<'a>,
watcher_tx: UnboundedSender<WatcherEvent>, watcher_tx: UnboundedSender<WatcherEvent>,
await_transaction_inclusion: bool,
steps: I, steps: I,
) -> Result<Self> { ) -> Result<Self> {
let mut this = Driver { let mut this = Driver {
@@ -106,7 +104,6 @@ where
execution_state: ExecutionState::empty(), execution_state: ExecutionState::empty(),
steps_executed: 0, steps_executed: 0,
steps_iterator: steps, steps_iterator: steps,
await_transaction_inclusion,
watcher_tx, watcher_tx,
}; };
this.init_execution_state(cached_compiler) this.init_execution_state(cached_compiler)
@@ -169,7 +166,7 @@ where
code, code,
); );
let receipt = self let receipt = self
.execute_transaction(tx, None, Duration::from_secs(5 * 60)) .execute_transaction(tx, None)
.and_then(|(_, receipt_fut)| receipt_fut) .and_then(|(_, receipt_fut)| receipt_fut)
.await .await
.inspect_err(|err| { .inspect_err(|err| {
@@ -368,30 +365,7 @@ where
let tx = step let tx = step
.as_transaction(self.resolver.as_ref(), self.default_resolution_context()) .as_transaction(self.resolver.as_ref(), self.default_resolution_context())
.await?; .await?;
Ok(self.execute_transaction(tx, Some(step_path)).await?.0)
let (tx_hash, receipt_future) = self
.execute_transaction(tx.clone(), Some(step_path), Duration::from_secs(30 * 60))
.await?;
if self.await_transaction_inclusion {
let receipt = receipt_future
.await
.context("Failed while waiting for transaction inclusion in block")?;
if !receipt.status() {
error!(
?tx,
tx.hash = %receipt.transaction_hash,
?receipt,
"Encountered a failing benchmark transaction"
);
bail!(
"Encountered a failing transaction in benchmarks: {}",
receipt.transaction_hash
)
}
}
Ok(tx_hash)
} }
} }
} }
@@ -492,7 +466,6 @@ where
.collect::<Vec<_>>(); .collect::<Vec<_>>();
steps.into_iter() steps.into_iter()
}, },
await_transaction_inclusion: self.await_transaction_inclusion,
watcher_tx: self.watcher_tx.clone(), watcher_tx: self.watcher_tx.clone(),
}) })
.map(|driver| driver.execute_all()); .map(|driver| driver.execute_all());
@@ -659,7 +632,7 @@ where
}; };
let receipt = match self let receipt = match self
.execute_transaction(tx, step_path, Duration::from_secs(5 * 60)) .execute_transaction(tx, step_path)
.and_then(|(_, receipt_fut)| receipt_fut) .and_then(|(_, receipt_fut)| receipt_fut)
.await .await
{ {
@@ -704,33 +677,18 @@ where
#[instrument( #[instrument(
level = "info", level = "info",
skip_all, skip_all,
fields( fields(driver_id = self.driver_id, transaction_hash = tracing::field::Empty)
driver_id = self.driver_id,
transaction = ?transaction,
transaction_hash = tracing::field::Empty
),
err(Debug)
)] )]
async fn execute_transaction( async fn execute_transaction(
&self, &self,
transaction: TransactionRequest, transaction: TransactionRequest,
step_path: Option<&StepPath>, step_path: Option<&StepPath>,
receipt_wait_duration: Duration,
) -> anyhow::Result<(TxHash, impl Future<Output = Result<TransactionReceipt>>)> { ) -> anyhow::Result<(TxHash, impl Future<Output = Result<TransactionReceipt>>)> {
let node = self.platform_information.node; let node = self.platform_information.node;
let provider = node.provider().await.context("Creating provider failed")?; let transaction_hash = node
.submit_transaction(transaction)
let pending_transaction_builder = provider
.send_transaction(transaction)
.await .await
.context("Failed to submit transaction")?; .context("Failed to submit transaction")?;
let transaction_hash = *pending_transaction_builder.tx_hash();
let receipt_future = pending_transaction_builder
.with_timeout(Some(receipt_wait_duration))
.with_required_confirmations(2)
.get_receipt()
.map(|res| res.context("Failed to get the receipt of the transaction"));
Span::current().record("transaction_hash", display(transaction_hash)); Span::current().record("transaction_hash", display(transaction_hash));
info!("Submitted transaction"); info!("Submitted transaction");
@@ -743,7 +701,28 @@ where
.context("Failed to send the transaction hash to the watcher")?; .context("Failed to send the transaction hash to the watcher")?;
}; };
Ok((transaction_hash, receipt_future)) Ok((transaction_hash, async move {
info!("Starting to poll for transaction receipt");
poll(
Duration::from_secs(30 * 60),
PollingWaitBehavior::Constant(Duration::from_secs(1)),
|| {
async move {
match node.get_receipt(transaction_hash).await {
Ok(receipt) => {
info!("Polling succeeded, receipt found");
Ok(ControlFlow::Break(receipt))
}
Err(_) => Ok(ControlFlow::Continue(())),
}
}
.instrument(info_span!("Polling for receipt"))
},
)
.instrument(info_span!("Polling for receipt", %transaction_hash))
.await
.inspect(|_| info!("Found the transaction receipt"))
}))
} }
// endregion:Transaction Execution // endregion:Transaction Execution
} }
@@ -160,7 +160,6 @@ pub async fn handle_differential_benchmarks(
private_key_allocator, private_key_allocator,
cached_compiler.as_ref(), cached_compiler.as_ref(),
watcher_tx.clone(), watcher_tx.clone(),
context.await_transaction_inclusion,
test_definition test_definition
.case .case
.steps_iterator_for_benchmarks(context.default_repetition_count) .steps_iterator_for_benchmarks(context.default_repetition_count)
@@ -139,18 +139,23 @@ impl Watcher {
break; break;
} }
info!(
block_number = block.ethereum_block_information.block_number,
block_tx_count = block.ethereum_block_information.transaction_hashes.len(),
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
"Observed a block"
);
// Remove all of the transaction hashes observed in this block from the txs we // Remove all of the transaction hashes observed in this block from the txs we
// are currently watching for. // are currently watching for.
let mut watch_for_transaction_hashes = let mut watch_for_transaction_hashes =
watch_for_transaction_hashes.write().await; watch_for_transaction_hashes.write().await;
let mut relevant_transactions_observed = 0;
for tx_hash in block.ethereum_block_information.transaction_hashes.iter() { for tx_hash in block.ethereum_block_information.transaction_hashes.iter() {
let Some((step_path, submission_time)) = let Some((step_path, submission_time)) =
watch_for_transaction_hashes.remove(tx_hash) watch_for_transaction_hashes.remove(tx_hash)
else { else {
continue; continue;
}; };
relevant_transactions_observed += 1;
let transaction_information = TransactionInformation { let transaction_information = TransactionInformation {
transaction_hash: *tx_hash, transaction_hash: *tx_hash,
submission_timestamp: submission_time submission_timestamp: submission_time
@@ -167,14 +172,6 @@ impl Watcher {
) )
.expect("Can't fail") .expect("Can't fail")
} }
info!(
block_number = block.ethereum_block_information.block_number,
block_tx_count = block.ethereum_block_information.transaction_hashes.len(),
relevant_transactions_observed,
remaining_transactions = watch_for_transaction_hashes.len(),
"Observed a block"
);
} }
info!("Watcher's Block Watching Task Finished"); info!("Watcher's Block Watching Task Finished");
@@ -409,6 +409,7 @@ where
.handle_function_call_execution(step, deployment_receipts) .handle_function_call_execution(step, deployment_receipts)
.await .await
.context("Failed to handle the function call execution")?; .context("Failed to handle the function call execution")?;
tracing::Span::current().record("block_number", execution_receipt.block_number);
let tracing_result = self let tracing_result = self
.handle_function_call_call_frame_tracing(execution_receipt.transaction_hash) .handle_function_call_call_frame_tracing(execution_receipt.transaction_hash)
.await .await
+7 -152
View File
@@ -14,12 +14,9 @@ use revive_dt_common::types::*;
use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc}; use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc};
use revive_dt_config::*; use revive_dt_config::*;
use revive_dt_node::{ use revive_dt_node::{
Node, Node, node_implementations::geth::GethNode,
node_implementations::{ node_implementations::lighthouse_geth::LighthouseGethNode,
geth::GethNode, lighthouse_geth::LighthouseGethNode, node_implementations::substrate::SubstrateNode, node_implementations::zombienet::ZombienetNode,
polkadot_omni_node::PolkadotOmnichainNode, substrate::SubstrateNode,
zombienet::ZombienetNode,
},
}; };
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
use tracing::info; use tracing::info;
@@ -94,8 +91,7 @@ impl Platform for GethEvmSolcPlatform {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..)); let node = GethNode::new(context);
let node = GethNode::new(context, use_fallback_gas_filler);
let node = spawn_node::<GethNode>(node, genesis)?; let node = spawn_node::<GethNode>(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
})) }))
@@ -149,8 +145,7 @@ impl Platform for LighthouseGethEvmSolcPlatform {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..)); let node = LighthouseGethNode::new(context);
let node = LighthouseGethNode::new(context, use_fallback_gas_filler);
let node = spawn_node::<LighthouseGethNode>(node, genesis)?; let node = spawn_node::<LighthouseGethNode>(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
})) }))
@@ -211,14 +206,12 @@ impl Platform for ReviveDevNodePolkavmResolcPlatform {
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node = SubstrateNode::new( let node = SubstrateNode::new(
revive_dev_node_path, revive_dev_node_path,
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND, SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
Some(revive_dev_node_consensus), Some(revive_dev_node_consensus),
context, context,
&eth_rpc_connection_strings, &eth_rpc_connection_strings,
use_fallback_gas_filler,
); );
let node = spawn_node(node, genesis)?; let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
@@ -281,14 +274,12 @@ impl Platform for ReviveDevNodeRevmSolcPlatform {
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node = SubstrateNode::new( let node = SubstrateNode::new(
revive_dev_node_path, revive_dev_node_path,
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND, SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
Some(revive_dev_node_consensus), Some(revive_dev_node_consensus),
context, context,
&eth_rpc_connection_strings, &eth_rpc_connection_strings,
use_fallback_gas_filler,
); );
let node = spawn_node(node, genesis)?; let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
@@ -347,9 +338,7 @@ impl Platform for ZombienetPolkavmResolcPlatform {
.clone(); .clone();
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..)); let node = ZombienetNode::new(polkadot_parachain_path, context);
let node =
ZombienetNode::new(polkadot_parachain_path, context, use_fallback_gas_filler);
let node = spawn_node(node, genesis)?; let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
})) }))
@@ -406,9 +395,7 @@ impl Platform for ZombienetRevmSolcPlatform {
.clone(); .clone();
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..)); let node = ZombienetNode::new(polkadot_parachain_path, context);
let node =
ZombienetNode::new(polkadot_parachain_path, context, use_fallback_gas_filler);
let node = spawn_node(node, genesis)?; let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
})) }))
@@ -435,126 +422,6 @@ impl Platform for ZombienetRevmSolcPlatform {
} }
} }
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
pub struct PolkadotOmniNodePolkavmResolcPlatform;
impl Platform for PolkadotOmniNodePolkavmResolcPlatform {
fn platform_identifier(&self) -> PlatformIdentifier {
PlatformIdentifier::PolkadotOmniNodePolkavmResolc
}
fn node_identifier(&self) -> NodeIdentifier {
NodeIdentifier::PolkadotOmniNode
}
fn vm_identifier(&self) -> VmIdentifier {
VmIdentifier::PolkaVM
}
fn compiler_identifier(&self) -> CompilerIdentifier {
CompilerIdentifier::Resolc
}
fn new_node(
&self,
context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node = PolkadotOmnichainNode::new(context, use_fallback_gas_filler);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
}))
}
fn new_compiler(
&self,
context: Context,
version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
Box::pin(async move {
let compiler = Resolc::new(context, version).await;
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
})
}
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
let polkadot_omnichain_node_configuration =
AsRef::<PolkadotOmnichainNodeConfiguration>::as_ref(&context);
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
PolkadotOmnichainNode::node_genesis(
&wallet,
polkadot_omnichain_node_configuration
.chain_spec_path
.as_ref()
.context("No WASM runtime path found in the polkadot-omni-node configuration")?,
)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
pub struct PolkadotOmniNodeRevmSolcPlatform;
impl Platform for PolkadotOmniNodeRevmSolcPlatform {
fn platform_identifier(&self) -> PlatformIdentifier {
PlatformIdentifier::PolkadotOmniNodeRevmSolc
}
fn node_identifier(&self) -> NodeIdentifier {
NodeIdentifier::PolkadotOmniNode
}
fn vm_identifier(&self) -> VmIdentifier {
VmIdentifier::Evm
}
fn compiler_identifier(&self) -> CompilerIdentifier {
CompilerIdentifier::Solc
}
fn new_node(
&self,
context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let use_fallback_gas_filler = matches!(context, Context::Test(..));
let node = PolkadotOmnichainNode::new(context, use_fallback_gas_filler);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
}))
}
fn new_compiler(
&self,
context: Context,
version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
Box::pin(async move {
let compiler = Solc::new(context, version).await;
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
})
}
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
let polkadot_omnichain_node_configuration =
AsRef::<PolkadotOmnichainNodeConfiguration>::as_ref(&context);
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
PolkadotOmnichainNode::node_genesis(
&wallet,
polkadot_omnichain_node_configuration
.chain_spec_path
.as_ref()
.context("No WASM runtime path found in the polkadot-omni-node configuration")?,
)
}
}
impl From<PlatformIdentifier> for Box<dyn Platform> { impl From<PlatformIdentifier> for Box<dyn Platform> {
fn from(value: PlatformIdentifier) -> Self { fn from(value: PlatformIdentifier) -> Self {
match value { match value {
@@ -572,12 +439,6 @@ impl From<PlatformIdentifier> for Box<dyn Platform> {
Box::new(ZombienetPolkavmResolcPlatform) as Box<_> Box::new(ZombienetPolkavmResolcPlatform) as Box<_>
} }
PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>, PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>,
PlatformIdentifier::PolkadotOmniNodePolkavmResolc => {
Box::new(PolkadotOmniNodePolkavmResolcPlatform) as Box<_>
}
PlatformIdentifier::PolkadotOmniNodeRevmSolc => {
Box::new(PolkadotOmniNodeRevmSolcPlatform) as Box<_>
}
} }
} }
} }
@@ -599,12 +460,6 @@ impl From<PlatformIdentifier> for &dyn Platform {
&ZombienetPolkavmResolcPlatform as &dyn Platform &ZombienetPolkavmResolcPlatform as &dyn Platform
} }
PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform, PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform,
PlatformIdentifier::PolkadotOmniNodePolkavmResolc => {
&PolkadotOmniNodePolkavmResolcPlatform as &dyn Platform
}
PlatformIdentifier::PolkadotOmniNodeRevmSolc => {
&PolkadotOmniNodeRevmSolcPlatform as &dyn Platform
}
} }
} }
} }
+6 -37
View File
@@ -2,9 +2,9 @@ mod differential_benchmarks;
mod differential_tests; mod differential_tests;
mod helpers; mod helpers;
use anyhow::{Context as _, bail}; use anyhow::Context as _;
use clap::Parser; use clap::Parser;
use revive_dt_report::{ReportAggregator, TestCaseStatus}; use revive_dt_report::ReportAggregator;
use schemars::schema_for; use schemars::schema_for;
use tracing::{info, level_filters::LevelFilter}; use tracing::{info, level_filters::LevelFilter};
use tracing_subscriber::{EnvFilter, FmtSubscriber}; use tracing_subscriber::{EnvFilter, FmtSubscriber};
@@ -57,22 +57,8 @@ fn main() -> anyhow::Result<()> {
let differential_tests_handling_task = let differential_tests_handling_task =
handle_differential_tests(*context, reporter); handle_differential_tests(*context, reporter);
let (_, report) = futures::future::try_join( futures::future::try_join(differential_tests_handling_task, report_aggregator_task)
differential_tests_handling_task, .await?;
report_aggregator_task,
)
.await?;
let contains_failure = report
.execution_information
.values()
.flat_map(|values| values.case_reports.values())
.flat_map(|values| values.mode_execution_reports.values())
.any(|report| matches!(report.status, Some(TestCaseStatus::Failed { .. })));
if contains_failure {
bail!("Some tests failed")
}
Ok(()) Ok(())
}), }),
@@ -85,23 +71,12 @@ fn main() -> anyhow::Result<()> {
let differential_benchmarks_handling_task = let differential_benchmarks_handling_task =
handle_differential_benchmarks(*context, reporter); handle_differential_benchmarks(*context, reporter);
let (_, report) = futures::future::try_join( futures::future::try_join(
differential_benchmarks_handling_task, differential_benchmarks_handling_task,
report_aggregator_task, report_aggregator_task,
) )
.await?; .await?;
let contains_failure = report
.execution_information
.values()
.flat_map(|values| values.case_reports.values())
.flat_map(|values| values.mode_execution_reports.values())
.any(|report| matches!(report.status, Some(TestCaseStatus::Failed { .. })));
if contains_failure {
bail!("Some benchmarks failed")
}
Ok(()) Ok(())
}), }),
Context::ExportGenesis(ref export_genesis_context) => { Context::ExportGenesis(ref export_genesis_context) => {
@@ -110,17 +85,11 @@ fn main() -> anyhow::Result<()> {
let genesis_json = serde_json::to_string_pretty(&genesis) let genesis_json = serde_json::to_string_pretty(&genesis)
.context("Failed to serialize the genesis to JSON")?; .context("Failed to serialize the genesis to JSON")?;
println!("{genesis_json}"); println!("{genesis_json}");
Ok(()) Ok(())
} }
Context::ExportJsonSchema => { Context::ExportJsonSchema => {
let schema = schema_for!(Metadata); let schema = schema_for!(Metadata);
println!( println!("{}", serde_json::to_string_pretty(&schema).unwrap());
"{}",
serde_json::to_string_pretty(&schema)
.context("Failed to export the JSON schema")?
);
Ok(()) Ok(())
} }
} }
+94 -19
View File
@@ -3,6 +3,7 @@
use std::{ use std::{
fs::{File, create_dir_all, remove_dir_all}, fs::{File, create_dir_all, remove_dir_all},
io::Read, io::Read,
ops::ControlFlow,
path::PathBuf, path::PathBuf,
pin::Pin, pin::Pin,
process::{Command, Stdio}, process::{Command, Stdio},
@@ -34,9 +35,12 @@ use anyhow::Context as _;
use futures::{FutureExt, Stream, StreamExt}; use futures::{FutureExt, Stream, StreamExt};
use revive_common::EVMVersion; use revive_common::EVMVersion;
use tokio::sync::OnceCell; use tokio::sync::OnceCell;
use tracing::{error, instrument}; use tracing::{Instrument, error, instrument};
use revive_dt_common::fs::clear_directory; use revive_dt_common::{
fs::clear_directory,
futures::{PollingWaitBehavior, poll},
};
use revive_dt_config::*; use revive_dt_config::*;
use revive_dt_format::traits::ResolverApi; use revive_dt_format::traits::ResolverApi;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
@@ -72,7 +76,6 @@ pub struct GethNode {
wallet: Arc<EthereumWallet>, wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>, provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
use_fallback_gas_filler: bool,
} }
impl GethNode { impl GethNode {
@@ -86,12 +89,17 @@ impl GethNode {
const READY_MARKER: &str = "IPC endpoint opened"; const READY_MARKER: &str = "IPC endpoint opened";
const ERROR_MARKER: &str = "Fatal:"; const ERROR_MARKER: &str = "Fatal:";
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet";
const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60);
const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60);
pub fn new( pub fn new(
context: impl AsRef<WorkingDirectoryConfiguration> context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<WalletConfiguration> + AsRef<WalletConfiguration>
+ AsRef<GethConfiguration> + AsRef<GethConfiguration>
+ Clone, + Clone,
use_fallback_gas_filler: bool,
) -> Self { ) -> Self {
let working_directory_configuration = let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context); AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
@@ -118,7 +126,6 @@ impl GethNode {
wallet: wallet.clone(), wallet: wallet.clone(),
nonce_manager: Default::default(), nonce_manager: Default::default(),
provider: Default::default(), provider: Default::default(),
use_fallback_gas_filler,
} }
} }
@@ -239,8 +246,7 @@ impl GethNode {
.get_or_try_init(|| async move { .get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>( construct_concurrency_limited_provider::<Ethereum, _>(
self.connection_string.as_str(), self.connection_string.as_str(),
FallbackGasFiller::default() FallbackGasFiller::default(),
.with_fallback_mechanism(self.use_fallback_gas_filler),
ChainIdFiller::new(Some(CHAIN_ID)), ChainIdFiller::new(Some(CHAIN_ID)),
NonceFiller::new(self.nonce_manager.clone()), NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(), self.wallet.clone(),
@@ -331,15 +337,62 @@ impl EthereumNode for GethNode {
transaction: TransactionRequest, transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move { Box::pin(async move {
self.provider() let provider = self
.provider()
.await .await
.context("Failed to create provider for transaction submission")? .context("Failed to create provider for transaction submission")?;
let pending_transaction = provider
.send_transaction(transaction) .send_transaction(transaction)
.await .await
.context("Encountered an error when submitting a transaction")? .inspect_err(
.get_receipt() |err| error!(%err, "Encountered an error when submitting the transaction"),
.await )
.context("Failed to get the receipt for the transaction") .context("Failed to submit transaction to geth node")?;
let transaction_hash = *pending_transaction.tx_hash();
// The following is a fix for the "transaction indexing is in progress" error that we used
// to get. You can find more information on this in the following GH issue in geth
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
// before we can get the receipt of the transaction it needs to have been indexed by the
// node's indexer. Just because the transaction has been confirmed it doesn't mean that it
// has been indexed. When we call alloy's `get_receipt` it checks if the transaction was
// confirmed. If it has been, then it will call `eth_getTransactionReceipt` method which
// _might_ return the above error if the tx has not yet been indexed yet. So, we need to
// implement a retry mechanism for the receipt to keep retrying to get it until it
// eventually works, but we only do that if the error we get back is the "transaction
// indexing is in progress" error or if the receipt is None.
//
// Getting the transaction indexed and taking a receipt can take a long time especially when
// a lot of transactions are being submitted to the node. Thus, while initially we only
// allowed for 60 seconds of waiting with a 1 second delay in polling, we need to allow for
// a larger wait time. Therefore, in here we allow for 5 minutes of waiting with exponential
// backoff each time we attempt to get the receipt and find that it's not available.
poll(
Self::RECEIPT_POLLING_DURATION,
PollingWaitBehavior::Constant(Duration::from_millis(200)),
move || {
let provider = provider.clone();
async move {
match provider.get_transaction_receipt(transaction_hash).await {
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
Ok(None) => Ok(ControlFlow::Continue(())),
Err(error) => {
let error_string = error.to_string();
match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) {
true => Ok(ControlFlow::Continue(())),
false => Err(error.into()),
}
}
}
}
},
)
.instrument(tracing::info_span!(
"Awaiting transaction receipt",
?transaction_hash
))
.await
}) })
} }
@@ -350,12 +403,34 @@ impl EthereumNode for GethNode {
trace_options: GethDebugTracingOptions, trace_options: GethDebugTracingOptions,
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
Box::pin(async move { Box::pin(async move {
self.provider() let provider = self
.provider()
.await .await
.context("Failed to create provider for tracing")? .context("Failed to create provider for tracing")?;
.debug_trace_transaction(tx_hash, trace_options) poll(
.await Self::TRACE_POLLING_DURATION,
.context("Failed to get the transaction trace") PollingWaitBehavior::Constant(Duration::from_millis(200)),
move || {
let provider = provider.clone();
let trace_options = trace_options.clone();
async move {
match provider
.debug_trace_transaction(tx_hash, trace_options)
.await
{
Ok(trace) => Ok(ControlFlow::Break(trace)),
Err(error) => {
let error_string = error.to_string();
match error_string.contains(Self::TRANSACTION_TRACING_ERROR) {
true => Ok(ControlFlow::Continue(())),
false => Err(error.into()),
}
}
}
}
},
)
.await
}) })
} }
@@ -667,7 +742,7 @@ mod tests {
fn new_node() -> (TestExecutionContext, GethNode) { fn new_node() -> (TestExecutionContext, GethNode) {
let context = test_config(); let context = test_config();
let mut node = GethNode::new(&context, true); let mut node = GethNode::new(&context);
node.init(context.genesis_configuration.genesis().unwrap().clone()) node.init(context.genesis_configuration.genesis().unwrap().clone())
.expect("Failed to initialize the node") .expect("Failed to initialize the node")
.spawn_process() .spawn_process()
@@ -12,6 +12,7 @@ use std::{
collections::{BTreeMap, HashSet}, collections::{BTreeMap, HashSet},
fs::{File, create_dir_all}, fs::{File, create_dir_all},
io::Read, io::Read,
ops::ControlFlow,
path::PathBuf, path::PathBuf,
pin::Pin, pin::Pin,
process::{Command, Stdio}, process::{Command, Stdio},
@@ -47,9 +48,12 @@ use revive_common::EVMVersion;
use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_with::serde_as; use serde_with::serde_as;
use tokio::sync::OnceCell; use tokio::sync::OnceCell;
use tracing::{info, instrument}; use tracing::{Instrument, info, instrument};
use revive_dt_common::fs::clear_directory; use revive_dt_common::{
fs::clear_directory,
futures::{PollingWaitBehavior, poll},
};
use revive_dt_config::*; use revive_dt_config::*;
use revive_dt_format::traits::ResolverApi; use revive_dt_format::traits::ResolverApi;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
@@ -102,8 +106,6 @@ pub struct LighthouseGethNode {
persistent_http_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>, persistent_http_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
persistent_ws_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>, persistent_ws_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
use_fallback_gas_filler: bool,
} }
impl LighthouseGethNode { impl LighthouseGethNode {
@@ -112,6 +114,12 @@ impl LighthouseGethNode {
const CONFIG_FILE_NAME: &str = "config.yaml"; const CONFIG_FILE_NAME: &str = "config.yaml";
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet";
const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60);
const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60);
const VALIDATOR_MNEMONIC: &str = "giant issue aisle success illegal bike spike question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete"; const VALIDATOR_MNEMONIC: &str = "giant issue aisle success illegal bike spike question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete";
pub fn new( pub fn new(
@@ -119,7 +127,6 @@ impl LighthouseGethNode {
+ AsRef<WalletConfiguration> + AsRef<WalletConfiguration>
+ AsRef<KurtosisConfiguration> + AsRef<KurtosisConfiguration>
+ Clone, + Clone,
use_fallback_gas_filler: bool,
) -> Self { ) -> Self {
let working_directory_configuration = let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context); AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
@@ -169,7 +176,6 @@ impl LighthouseGethNode {
nonce_manager: Default::default(), nonce_manager: Default::default(),
persistent_http_provider: OnceCell::const_new(), persistent_http_provider: OnceCell::const_new(),
persistent_ws_provider: OnceCell::const_new(), persistent_ws_provider: OnceCell::const_new(),
use_fallback_gas_filler,
} }
} }
@@ -368,8 +374,7 @@ impl LighthouseGethNode {
.get_or_try_init(|| async move { .get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>( construct_concurrency_limited_provider::<Ethereum, _>(
self.ws_connection_string.as_str(), self.ws_connection_string.as_str(),
FallbackGasFiller::default() FallbackGasFiller::default(),
.with_fallback_mechanism(self.use_fallback_gas_filler),
ChainIdFiller::new(Some(CHAIN_ID)), ChainIdFiller::new(Some(CHAIN_ID)),
NonceFiller::new(self.nonce_manager.clone()), NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(), self.wallet.clone(),
@@ -471,6 +476,73 @@ impl LighthouseGethNode {
Ok(()) Ok(())
} }
fn internal_execute_transaction<'a>(
transaction: TransactionRequest,
provider: FillProvider<
impl TxFiller<Ethereum> + 'a,
impl Provider<Ethereum> + Clone + 'a,
Ethereum,
>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + 'a>> {
Box::pin(async move {
let pending_transaction = provider
.send_transaction(transaction)
.await
.inspect_err(|err| {
tracing::error!(
%err,
"Encountered an error when submitting the transaction"
)
})
.context("Failed to submit transaction to geth node")?;
let transaction_hash = *pending_transaction.tx_hash();
// The following is a fix for the "transaction indexing is in progress" error that we
// used to get. You can find more information on this in the following GH issue in geth
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
// before we can get the receipt of the transaction it needs to have been indexed by the
// node's indexer. Just because the transaction has been confirmed it doesn't mean that
// it has been indexed. When we call alloy's `get_receipt` it checks if the transaction
// was confirmed. If it has been, then it will call `eth_getTransactionReceipt` method
// which _might_ return the above error if the tx has not yet been indexed yet. So, we
// need to implement a retry mechanism for the receipt to keep retrying to get it until
// it eventually works, but we only do that if the error we get back is the "transaction
// indexing is in progress" error or if the receipt is None.
//
// Getting the transaction indexed and taking a receipt can take a long time especially
// when a lot of transactions are being submitted to the node. Thus, while initially we
// only allowed for 60 seconds of waiting with a 1 second delay in polling, we need to
// allow for a larger wait time. Therefore, in here we allow for 5 minutes of waiting
// with exponential backoff each time we attempt to get the receipt and find that it's
// not available.
poll(
Self::RECEIPT_POLLING_DURATION,
PollingWaitBehavior::Constant(Duration::from_millis(500)),
move || {
let provider = provider.clone();
async move {
match provider.get_transaction_receipt(transaction_hash).await {
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
Ok(None) => Ok(ControlFlow::Continue(())),
Err(error) => {
let error_string = error.to_string();
match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) {
true => Ok(ControlFlow::Continue(())),
false => Err(error.into()),
}
}
}
}
},
)
.instrument(tracing::info_span!(
"Awaiting transaction receipt",
?transaction_hash
))
.await
})
}
pub fn node_genesis(mut genesis: Genesis, wallet: &EthereumWallet) -> Genesis { pub fn node_genesis(mut genesis: Genesis, wallet: &EthereumWallet) -> Genesis {
for signer_address in NetworkWallet::<Ethereum>::signer_addresses(&wallet) { for signer_address in NetworkWallet::<Ethereum>::signer_addresses(&wallet) {
genesis genesis
@@ -549,15 +621,11 @@ impl EthereumNode for LighthouseGethNode {
transaction: TransactionRequest, transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move { Box::pin(async move {
self.provider() let provider = self
.http_provider()
.await .await
.context("Failed to create provider for transaction submission")? .context("Failed to create provider for transaction execution")?;
.send_transaction(transaction) Self::internal_execute_transaction(transaction, provider).await
.await
.context("Encountered an error when submitting a transaction")?
.get_receipt()
.await
.context("Failed to get the receipt for the transaction")
}) })
} }
@@ -568,12 +636,35 @@ impl EthereumNode for LighthouseGethNode {
trace_options: GethDebugTracingOptions, trace_options: GethDebugTracingOptions,
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
Box::pin(async move { Box::pin(async move {
self.provider() let provider = Arc::new(
.await self.http_provider()
.context("Failed to create provider for tracing")? .await
.debug_trace_transaction(tx_hash, trace_options) .context("Failed to create provider for tracing")?,
.await );
.context("Failed to get the transaction trace") poll(
Self::TRACE_POLLING_DURATION,
PollingWaitBehavior::Constant(Duration::from_millis(200)),
move || {
let provider = provider.clone();
let trace_options = trace_options.clone();
async move {
match provider
.debug_trace_transaction(tx_hash, trace_options)
.await
{
Ok(trace) => Ok(ControlFlow::Break(trace)),
Err(error) => {
let error_string = error.to_string();
match error_string.contains(Self::TRANSACTION_TRACING_ERROR) {
true => Ok(ControlFlow::Continue(())),
false => Err(error.into()),
}
}
}
}
},
)
.await
}) })
} }
@@ -1061,7 +1152,7 @@ mod tests {
let _guard = NODE_START_MUTEX.lock().unwrap(); let _guard = NODE_START_MUTEX.lock().unwrap();
let context = test_config(); let context = test_config();
let mut node = LighthouseGethNode::new(&context, true); let mut node = LighthouseGethNode::new(&context);
node.init(context.genesis_configuration.genesis().unwrap().clone()) node.init(context.genesis_configuration.genesis().unwrap().clone())
.expect("Failed to initialize the node") .expect("Failed to initialize the node")
.spawn_process() .spawn_process()
@@ -1,5 +1,4 @@
pub mod geth; pub mod geth;
pub mod lighthouse_geth; pub mod lighthouse_geth;
pub mod polkadot_omni_node;
pub mod substrate; pub mod substrate;
pub mod zombienet; pub mod zombienet;
@@ -1,791 +0,0 @@
use std::{
fs::{File, create_dir_all, remove_dir_all},
path::{Path, PathBuf},
pin::Pin,
process::{Command, Stdio},
sync::{
Arc,
atomic::{AtomicU32, Ordering},
},
time::Duration,
};
use alloy::{
eips::BlockNumberOrTag,
genesis::Genesis,
network::{Ethereum, EthereumWallet, NetworkWallet},
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
providers::{
Provider,
ext::DebugApi,
fillers::{CachedNonceManager, ChainIdFiller, NonceFiller},
},
rpc::types::{
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
trace::geth::{
DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame,
},
},
};
use anyhow::Context as _;
use futures::{FutureExt, Stream, StreamExt};
use revive_common::EVMVersion;
use revive_dt_common::fs::clear_directory;
use revive_dt_format::traits::ResolverApi;
use serde_json::json;
use sp_core::crypto::Ss58Codec;
use sp_runtime::AccountId32;
use revive_dt_config::*;
use revive_dt_node_interaction::EthereumNode;
use revive_dt_report::{
EthereumMinedBlockInformation, MinedBlockInformation, SubstrateMinedBlockInformation,
};
use subxt::{OnlineClient, SubstrateConfig};
use tokio::sync::OnceCell;
use tracing::{instrument, trace};
use crate::{
Node,
constants::INITIAL_BALANCE,
helpers::{Process, ProcessReadinessWaitBehavior},
provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider},
};
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
/// The number of blocks that should be cached by the polkadot-omni-node and the eth-rpc.
const NUMBER_OF_CACHED_BLOCKS: u32 = 100_000;
/// A node implementation for the polkadot-omni-node.
#[derive(Debug)]
pub struct PolkadotOmnichainNode {
/// The id of the node.
id: u32,
/// The path of the polkadot-omni-chain node binary.
polkadot_omnichain_node_binary_path: PathBuf,
/// The path of the eth-rpc binary.
eth_rpc_binary_path: PathBuf,
/// The path of the runtime's WASM that this node will be spawned with.
chain_spec_path: Option<PathBuf>,
/// The path of the base directory which contains all of the stored data for this node.
base_directory_path: PathBuf,
/// The path of the logs directory which contains all of the stored logs.
logs_directory_path: PathBuf,
/// Defines the amount of time to wait before considering that the node start has timed out.
node_start_timeout: Duration,
/// The id of the parachain that this node will be spawning.
parachain_id: Option<usize>,
/// The block time.
block_time: Duration,
/// The node's process.
polkadot_omnichain_node_process: Option<Process>,
/// The eth-rpc's process.
eth_rpc_process: Option<Process>,
/// The URL of the eth-rpc.
rpc_url: String,
/// The wallet object that's used to sign any transaction submitted through this node.
wallet: Arc<EthereumWallet>,
/// The nonce manager used to populate nonces for all transactions submitted through this node.
nonce_manager: CachedNonceManager,
/// The provider used for all RPC interactions with the RPC of this node.
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
/// A boolean that controls if the fallback gas filler should be used or not.
use_fallback_gas_filler: bool,
}
impl PolkadotOmnichainNode {
const BASE_DIRECTORY: &str = "polkadot-omni-node";
const LOGS_DIRECTORY: &str = "logs";
const POLKADOT_OMNICHAIN_NODE_READY_MARKER: &str = "Running JSON-RPC server";
const ETH_RPC_READY_MARKER: &str = "Running JSON-RPC server";
const CHAIN_SPEC_JSON_FILE: &str = "template_chainspec.json";
const BASE_POLKADOT_OMNICHAIN_NODE_RPC_PORT: u16 = 9944;
const BASE_ETH_RPC_PORT: u16 = 8545;
const POLKADOT_OMNICHAIN_NODE_LOG_ENV: &str =
"error,evm=debug,sc_rpc_server=info,runtime::revive=debug";
const RPC_LOG_ENV: &str = "info,eth-rpc=debug";
pub fn new(
context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<EthRpcConfiguration>
+ AsRef<WalletConfiguration>
+ AsRef<PolkadotOmnichainNodeConfiguration>,
use_fallback_gas_filler: bool,
) -> Self {
let polkadot_omnichain_node_configuration =
AsRef::<PolkadotOmnichainNodeConfiguration>::as_ref(&context);
let working_directory_path =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context).as_path();
let eth_rpc_path = AsRef::<EthRpcConfiguration>::as_ref(&context)
.path
.as_path();
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
let base_directory = working_directory_path
.join(Self::BASE_DIRECTORY)
.join(id.to_string());
let logs_directory = base_directory.join(Self::LOGS_DIRECTORY);
Self {
id,
polkadot_omnichain_node_binary_path: polkadot_omnichain_node_configuration
.path
.to_path_buf(),
eth_rpc_binary_path: eth_rpc_path.to_path_buf(),
chain_spec_path: polkadot_omnichain_node_configuration
.chain_spec_path
.clone(),
base_directory_path: base_directory,
logs_directory_path: logs_directory,
parachain_id: polkadot_omnichain_node_configuration.parachain_id,
block_time: polkadot_omnichain_node_configuration.block_time,
polkadot_omnichain_node_process: Default::default(),
eth_rpc_process: Default::default(),
rpc_url: Default::default(),
wallet,
nonce_manager: Default::default(),
provider: Default::default(),
use_fallback_gas_filler,
node_start_timeout: polkadot_omnichain_node_configuration.start_timeout_ms,
}
}
fn init(&mut self, _: Genesis) -> anyhow::Result<&mut Self> {
trace!("Removing the various directories");
let _ = remove_dir_all(self.base_directory_path.as_path());
let _ = clear_directory(&self.base_directory_path);
let _ = clear_directory(&self.logs_directory_path);
trace!("Creating the various directories");
create_dir_all(&self.base_directory_path)
.context("Failed to create base directory for polkadot-omni-node node")?;
create_dir_all(&self.logs_directory_path)
.context("Failed to create logs directory for polkadot-omni-node node")?;
let template_chainspec_path = self.base_directory_path.join(Self::CHAIN_SPEC_JSON_FILE);
let chainspec_json = Self::node_genesis(
&self.wallet,
self.chain_spec_path
.as_ref()
.context("No runtime path provided")?,
)
.context("Failed to prepare the chainspec command")?;
serde_json::to_writer_pretty(
std::fs::File::create(&template_chainspec_path)
.context("Failed to create polkadot-omni-node template chainspec file")?,
&chainspec_json,
)
.context("Failed to write polkadot-omni-node template chainspec JSON")?;
Ok(self)
}
fn spawn_process(&mut self) -> anyhow::Result<()> {
// Error out if the runtime's path or the parachain id are not set which means that the
// arguments we require were not provided.
self.chain_spec_path
.as_ref()
.context("No WASM path provided for the runtime")?;
self.parachain_id
.as_ref()
.context("No argument provided for the parachain-id")?;
let polkadot_omnichain_node_rpc_port =
Self::BASE_POLKADOT_OMNICHAIN_NODE_RPC_PORT + self.id as u16;
let eth_rpc_port = Self::BASE_ETH_RPC_PORT + self.id as u16;
let chainspec_path = self.base_directory_path.join(Self::CHAIN_SPEC_JSON_FILE);
self.rpc_url = format!("http://127.0.0.1:{eth_rpc_port}");
let polkadot_omnichain_node_process = Process::new(
"node",
self.logs_directory_path.as_path(),
self.polkadot_omnichain_node_binary_path.as_path(),
|command, stdout_file, stderr_file| {
command
.arg("--log")
.arg(Self::POLKADOT_OMNICHAIN_NODE_LOG_ENV)
.arg("--dev-block-time")
.arg(self.block_time.as_millis().to_string())
.arg("--rpc-port")
.arg(polkadot_omnichain_node_rpc_port.to_string())
.arg("--base-path")
.arg(self.base_directory_path.as_path())
.arg("--no-prometheus")
.arg("--no-hardware-benchmarks")
.arg("--authoring")
.arg("slot-based")
.arg("--chain")
.arg(chainspec_path)
.arg("--name")
.arg(format!("polkadot-omni-node-{}", self.id))
.arg("--rpc-methods")
.arg("unsafe")
.arg("--rpc-cors")
.arg("all")
.arg("--rpc-max-connections")
.arg(u32::MAX.to_string())
.arg("--pool-limit")
.arg(u32::MAX.to_string())
.arg("--pool-kbytes")
.arg(u32::MAX.to_string())
.arg("--state-pruning")
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
.env("RUST_LOG", Self::POLKADOT_OMNICHAIN_NODE_LOG_ENV)
.stdout(stdout_file)
.stderr(stderr_file);
},
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
max_wait_duration: self.node_start_timeout,
check_function: Box::new(|_, stderr_line| match stderr_line {
Some(line) => Ok(line.contains(Self::POLKADOT_OMNICHAIN_NODE_READY_MARKER)),
None => Ok(false),
}),
},
);
match polkadot_omnichain_node_process {
Ok(process) => self.polkadot_omnichain_node_process = Some(process),
Err(err) => {
tracing::error!(
?err,
"Failed to start polkadot-omni-node, shutting down gracefully"
);
self.shutdown().context(
"Failed to gracefully shutdown after polkadot-omni-node start error",
)?;
return Err(err);
}
}
let eth_rpc_process = Process::new(
"eth-rpc",
self.logs_directory_path.as_path(),
self.eth_rpc_binary_path.as_path(),
|command, stdout_file, stderr_file| {
command
.arg("--dev")
.arg("--rpc-port")
.arg(eth_rpc_port.to_string())
.arg("--node-rpc-url")
.arg(format!("ws://127.0.0.1:{polkadot_omnichain_node_rpc_port}"))
.arg("--rpc-max-connections")
.arg(u32::MAX.to_string())
.arg("--index-last-n-blocks")
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
.arg("--cache-size")
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
.env("RUST_LOG", Self::RPC_LOG_ENV)
.stdout(stdout_file)
.stderr(stderr_file);
},
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
max_wait_duration: Duration::from_secs(30),
check_function: Box::new(|_, stderr_line| match stderr_line {
Some(line) => Ok(line.contains(Self::ETH_RPC_READY_MARKER)),
None => Ok(false),
}),
},
);
match eth_rpc_process {
Ok(process) => self.eth_rpc_process = Some(process),
Err(err) => {
tracing::error!(?err, "Failed to start eth-rpc, shutting down gracefully");
self.shutdown()
.context("Failed to gracefully shutdown after eth-rpc start error")?;
return Err(err);
}
}
Ok(())
}
fn eth_to_substrate_address(address: &Address) -> String {
let eth_bytes = address.0.0;
let mut padded = [0xEEu8; 32];
padded[..20].copy_from_slice(&eth_bytes);
let account_id = AccountId32::from(padded);
account_id.to_ss58check()
}
pub fn eth_rpc_version(&self) -> anyhow::Result<String> {
let output = Command::new(&self.eth_rpc_binary_path)
.arg("--version")
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn()?
.wait_with_output()?
.stdout;
Ok(String::from_utf8_lossy(&output).trim().to_string())
}
async fn provider(&self) -> anyhow::Result<ConcreteProvider<Ethereum, Arc<EthereumWallet>>> {
self.provider
.get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>(
self.rpc_url.as_str(),
FallbackGasFiller::default()
.with_fallback_mechanism(self.use_fallback_gas_filler),
ChainIdFiller::default(),
NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(),
)
.await
.context("Failed to construct the provider")
})
.await
.cloned()
}
pub fn node_genesis(
wallet: &EthereumWallet,
chain_spec_path: &Path,
) -> anyhow::Result<serde_json::Value> {
let unmodified_chainspec_file =
File::open(chain_spec_path).context("Failed to open the unmodified chainspec file")?;
let mut chainspec_json =
serde_json::from_reader::<_, serde_json::Value>(&unmodified_chainspec_file)
.context("Failed to read the unmodified chainspec JSON")?;
let existing_chainspec_balances =
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"]
.as_array_mut()
.expect("Can't fail");
for address in NetworkWallet::<Ethereum>::signer_addresses(wallet) {
let substrate_address = Self::eth_to_substrate_address(&address);
let balance = INITIAL_BALANCE;
existing_chainspec_balances.push(json!((substrate_address, balance)));
}
Ok(chainspec_json)
}
}
impl EthereumNode for PolkadotOmnichainNode {
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>> {
Box::pin(async move { Ok(()) })
}
fn id(&self) -> usize {
self.id as _
}
fn connection_string(&self) -> &str {
&self.rpc_url
}
fn submit_transaction(
&self,
transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TxHash>> + '_>> {
Box::pin(async move {
let provider = self
.provider()
.await
.context("Failed to create the provider for transaction submission")?;
let pending_transaction = provider
.send_transaction(transaction)
.await
.context("Failed to submit the transaction through the provider")?;
Ok(*pending_transaction.tx_hash())
})
}
fn get_receipt(
&self,
tx_hash: TxHash,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move {
self.provider()
.await
.context("Failed to create provider for getting the receipt")?
.get_transaction_receipt(tx_hash)
.await
.context("Failed to get the receipt of the transaction")?
.context("Failed to get the receipt of the transaction")
})
}
fn execute_transaction(
&self,
transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move {
self.provider()
.await
.context("Failed to create provider for transaction submission")?
.send_transaction(transaction)
.await
.context("Encountered an error when submitting a transaction")?
.get_receipt()
.await
.context("Failed to get the receipt for the transaction")
})
}
fn trace_transaction(
&self,
tx_hash: TxHash,
trace_options: GethDebugTracingOptions,
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
Box::pin(async move {
self.provider()
.await
.context("Failed to create provider for debug tracing")?
.debug_trace_transaction(tx_hash, trace_options)
.await
.context("Failed to obtain debug trace from eth-proxy")
})
}
fn state_diff(
&self,
tx_hash: TxHash,
) -> Pin<Box<dyn Future<Output = anyhow::Result<DiffMode>> + '_>> {
Box::pin(async move {
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
diff_mode: Some(true),
disable_code: None,
disable_storage: None,
});
match self
.trace_transaction(tx_hash, trace_options)
.await?
.try_into_pre_state_frame()?
{
PreStateFrame::Diff(diff) => Ok(diff),
_ => anyhow::bail!("expected a diff mode trace"),
}
})
}
fn balance_of(
&self,
address: Address,
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
Box::pin(async move {
self.provider()
.await
.context("Failed to get the eth-rpc provider")?
.get_balance(address)
.await
.map_err(Into::into)
})
}
fn latest_state_proof(
&self,
address: Address,
keys: Vec<StorageKey>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<EIP1186AccountProofResponse>> + '_>> {
Box::pin(async move {
self.provider()
.await
.context("Failed to get the eth-rpc provider")?
.get_proof(address, keys)
.latest()
.await
.map_err(Into::into)
})
}
fn resolver(
&self,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Arc<dyn ResolverApi + '_>>> + '_>> {
Box::pin(async move {
let id = self.id;
let provider = self.provider().await?;
Ok(Arc::new(PolkadotOmnichainNodeResolver { id, provider }) as Arc<dyn ResolverApi>)
})
}
fn evm_version(&self) -> EVMVersion {
EVMVersion::Cancun
}
fn subscribe_to_full_blocks_information(
&self,
) -> Pin<
Box<
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
+ '_,
>,
> {
#[subxt::subxt(runtime_metadata_path = "../../assets/revive_metadata.scale")]
pub mod revive {}
Box::pin(async move {
let polkadot_omnichain_node_rpc_port =
Self::BASE_POLKADOT_OMNICHAIN_NODE_RPC_PORT + self.id as u16;
let polkadot_omnichain_node_rpc_url =
format!("ws://127.0.0.1:{polkadot_omnichain_node_rpc_port}");
let api = OnlineClient::<SubstrateConfig>::from_url(polkadot_omnichain_node_rpc_url)
.await
.context("Failed to create subxt rpc client")?;
let provider = self.provider().await.context("Failed to create provider")?;
let block_stream = api
.blocks()
.subscribe_all()
.await
.context("Failed to subscribe to blocks")?;
let mined_block_information_stream = block_stream.filter_map(move |block| {
let api = api.clone();
let provider = provider.clone();
async move {
let substrate_block = block.ok()?;
let revive_block = provider
.get_block_by_number(
BlockNumberOrTag::Number(substrate_block.number() as _),
)
.await
.expect("TODO: Remove")
.expect("TODO: Remove");
let used = api
.storage()
.at(substrate_block.reference())
.fetch_or_default(&revive::storage().system().block_weight())
.await
.expect("TODO: Remove");
let block_ref_time = (used.normal.ref_time as u128)
+ (used.operational.ref_time as u128)
+ (used.mandatory.ref_time as u128);
let block_proof_size = (used.normal.proof_size as u128)
+ (used.operational.proof_size as u128)
+ (used.mandatory.proof_size as u128);
let limits = api
.constants()
.at(&revive::constants().system().block_weights())
.expect("TODO: Remove");
let max_ref_time = limits.max_block.ref_time;
let max_proof_size = limits.max_block.proof_size;
Some(MinedBlockInformation {
ethereum_block_information: EthereumMinedBlockInformation {
block_number: revive_block.number(),
block_timestamp: revive_block.header.timestamp,
mined_gas: revive_block.header.gas_used as _,
block_gas_limit: revive_block.header.gas_limit as _,
transaction_hashes: revive_block
.transactions
.into_hashes()
.as_hashes()
.expect("Must be hashes")
.to_vec(),
},
substrate_block_information: Some(SubstrateMinedBlockInformation {
ref_time: block_ref_time,
max_ref_time,
proof_size: block_proof_size,
max_proof_size,
}),
tx_counts: Default::default(),
})
}
});
Ok(Box::pin(mined_block_information_stream)
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
})
}
fn provider(
&self,
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::providers::DynProvider<Ethereum>>> + '_>>
{
Box::pin(
self.provider()
.map(|provider| provider.map(|provider| provider.erased())),
)
}
}
pub struct PolkadotOmnichainNodeResolver {
id: u32,
provider: ConcreteProvider<Ethereum, Arc<EthereumWallet>>,
}
impl ResolverApi for PolkadotOmnichainNodeResolver {
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
fn chain_id(
&self,
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::primitives::ChainId>> + '_>> {
Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) })
}
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
fn transaction_gas_price(
&self,
tx_hash: TxHash,
) -> Pin<Box<dyn Future<Output = anyhow::Result<u128>> + '_>> {
Box::pin(async move {
self.provider
.get_transaction_receipt(tx_hash)
.await?
.context("Failed to get the transaction receipt")
.map(|receipt| receipt.effective_gas_price)
})
}
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
fn block_gas_limit(
&self,
number: BlockNumberOrTag,
) -> Pin<Box<dyn Future<Output = anyhow::Result<u128>> + '_>> {
Box::pin(async move {
self.provider
.get_block_by_number(number)
.await
.context("Failed to get the eth-rpc block")?
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
.map(|block| block.header.gas_limit as _)
})
}
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
fn block_coinbase(
&self,
number: BlockNumberOrTag,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Address>> + '_>> {
Box::pin(async move {
self.provider
.get_block_by_number(number)
.await
.context("Failed to get the eth-rpc block")?
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
.map(|block| block.header.beneficiary)
})
}
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
fn block_difficulty(
&self,
number: BlockNumberOrTag,
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
Box::pin(async move {
self.provider
.get_block_by_number(number)
.await
.context("Failed to get the eth-rpc block")?
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
.map(|block| U256::from_be_bytes(block.header.mix_hash.0))
})
}
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
fn block_base_fee(
&self,
number: BlockNumberOrTag,
) -> Pin<Box<dyn Future<Output = anyhow::Result<u64>> + '_>> {
Box::pin(async move {
self.provider
.get_block_by_number(number)
.await
.context("Failed to get the eth-rpc block")?
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
.and_then(|block| {
block
.header
.base_fee_per_gas
.context("Failed to get the base fee per gas")
})
})
}
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
fn block_hash(
&self,
number: BlockNumberOrTag,
) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockHash>> + '_>> {
Box::pin(async move {
self.provider
.get_block_by_number(number)
.await
.context("Failed to get the eth-rpc block")?
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
.map(|block| block.header.hash)
})
}
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
fn block_timestamp(
&self,
number: BlockNumberOrTag,
) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockTimestamp>> + '_>> {
Box::pin(async move {
self.provider
.get_block_by_number(number)
.await
.context("Failed to get the eth-rpc block")?
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
.map(|block| block.header.timestamp)
})
}
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
fn last_block_number(&self) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockNumber>> + '_>> {
Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) })
}
}
impl Node for PolkadotOmnichainNode {
fn shutdown(&mut self) -> anyhow::Result<()> {
drop(self.polkadot_omnichain_node_process.take());
drop(self.eth_rpc_process.take());
// Remove the node's database so that subsequent runs do not run on the same database. We
// ignore the error just in case the directory didn't exist in the first place and therefore
// there's nothing to be deleted.
let _ = remove_dir_all(self.base_directory_path.join("data"));
Ok(())
}
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
self.init(genesis)?.spawn_process()
}
fn version(&self) -> anyhow::Result<String> {
let output = Command::new(&self.polkadot_omnichain_node_binary_path)
.arg("--version")
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn()
.context("Failed to spawn substrate --version")?
.wait_with_output()
.context("Failed to wait for substrate --version")?
.stdout;
Ok(String::from_utf8_lossy(&output).into())
}
}
impl Drop for PolkadotOmnichainNode {
fn drop(&mut self) {
self.shutdown().expect("Failed to shutdown")
}
}
@@ -47,9 +47,12 @@ use tracing::{instrument, trace};
use crate::{ use crate::{
Node, Node,
constants::INITIAL_BALANCE, constants::{CHAIN_ID, INITIAL_BALANCE},
helpers::{Process, ProcessReadinessWaitBehavior}, helpers::{Process, ProcessReadinessWaitBehavior},
provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider}, provider_utils::{
ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider,
execute_transaction,
},
}; };
static NODE_COUNT: AtomicU32 = AtomicU32::new(0); static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
@@ -76,7 +79,6 @@ pub struct SubstrateNode {
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>, provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
consensus: Option<String>, consensus: Option<String>,
use_fallback_gas_filler: bool,
} }
impl SubstrateNode { impl SubstrateNode {
@@ -103,7 +105,6 @@ impl SubstrateNode {
+ AsRef<EthRpcConfiguration> + AsRef<EthRpcConfiguration>
+ AsRef<WalletConfiguration>, + AsRef<WalletConfiguration>,
existing_connection_strings: &[String], existing_connection_strings: &[String],
use_fallback_gas_filler: bool,
) -> Self { ) -> Self {
let working_directory_path = let working_directory_path =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context).as_path(); AsRef::<WorkingDirectoryConfiguration>::as_ref(&context).as_path();
@@ -136,7 +137,6 @@ impl SubstrateNode {
nonce_manager: Default::default(), nonce_manager: Default::default(),
provider: Default::default(), provider: Default::default(),
consensus, consensus,
use_fallback_gas_filler,
} }
} }
@@ -324,9 +324,8 @@ impl SubstrateNode {
.get_or_try_init(|| async move { .get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>( construct_concurrency_limited_provider::<Ethereum, _>(
self.rpc_url.as_str(), self.rpc_url.as_str(),
FallbackGasFiller::default() FallbackGasFiller::new(u64::MAX, 50_000_000_000, 1_000_000_000),
.with_fallback_mechanism(self.use_fallback_gas_filler), ChainIdFiller::new(Some(CHAIN_ID)),
ChainIdFiller::default(),
NonceFiller::new(self.nonce_manager.clone()), NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(), self.wallet.clone(),
) )
@@ -431,15 +430,11 @@ impl EthereumNode for SubstrateNode {
transaction: TransactionRequest, transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move { Box::pin(async move {
self.provider() let provider = self
.provider()
.await .await
.context("Failed to create provider for transaction submission")? .context("Failed to create the provider")?;
.send_transaction(transaction) execute_transaction(provider, transaction).await
.await
.context("Encountered an error when submitting a transaction")?
.get_receipt()
.await
.context("Failed to get the receipt for the transaction")
}) })
} }
@@ -830,7 +825,6 @@ mod tests {
None, None,
&context, &context,
&[], &[],
true,
); );
node.init(context.genesis_configuration.genesis().unwrap().clone()) node.init(context.genesis_configuration.genesis().unwrap().clone())
.expect("Failed to initialize the node") .expect("Failed to initialize the node")
@@ -902,7 +896,6 @@ mod tests {
None, None,
&context, &context,
&[], &[],
true,
); );
// Call `init()` // Call `init()`
@@ -76,7 +76,10 @@ use crate::{
Node, Node,
constants::INITIAL_BALANCE, constants::INITIAL_BALANCE,
helpers::{Process, ProcessReadinessWaitBehavior}, helpers::{Process, ProcessReadinessWaitBehavior},
provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider}, provider_utils::{
ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider,
execute_transaction,
},
}; };
static NODE_COUNT: AtomicU32 = AtomicU32::new(0); static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
@@ -111,8 +114,6 @@ pub struct ZombienetNode {
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>, provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
use_fallback_gas_filler: bool,
} }
impl ZombienetNode { impl ZombienetNode {
@@ -136,7 +137,6 @@ impl ZombienetNode {
context: impl AsRef<WorkingDirectoryConfiguration> context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<EthRpcConfiguration> + AsRef<EthRpcConfiguration>
+ AsRef<WalletConfiguration>, + AsRef<WalletConfiguration>,
use_fallback_gas_filler: bool,
) -> Self { ) -> Self {
let eth_proxy_binary = AsRef::<EthRpcConfiguration>::as_ref(&context) let eth_proxy_binary = AsRef::<EthRpcConfiguration>::as_ref(&context)
.path .path
@@ -164,7 +164,6 @@ impl ZombienetNode {
connection_string: String::new(), connection_string: String::new(),
node_rpc_port: None, node_rpc_port: None,
provider: Default::default(), provider: Default::default(),
use_fallback_gas_filler,
} }
} }
@@ -331,8 +330,7 @@ impl ZombienetNode {
.get_or_try_init(|| async move { .get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>( construct_concurrency_limited_provider::<Ethereum, _>(
self.connection_string.as_str(), self.connection_string.as_str(),
FallbackGasFiller::default() FallbackGasFiller::new(u64::MAX, 5_000_000_000, 1_000_000_000),
.with_fallback_mechanism(self.use_fallback_gas_filler),
ChainIdFiller::default(), // TODO: use CHAIN_ID constant ChainIdFiller::default(), // TODO: use CHAIN_ID constant
NonceFiller::new(self.nonce_manager.clone()), NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(), self.wallet.clone(),
@@ -430,18 +428,14 @@ impl EthereumNode for ZombienetNode {
fn execute_transaction( fn execute_transaction(
&self, &self,
transaction: TransactionRequest, transaction: alloy::rpc::types::TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move { Box::pin(async move {
self.provider() let provider = self
.provider()
.await .await
.context("Failed to create provider for transaction submission")? .context("Failed to create the provider")?;
.send_transaction(transaction) execute_transaction(provider, transaction).await
.await
.context("Encountered an error when submitting a transaction")?
.get_receipt()
.await
.context("Failed to get the receipt for the transaction")
}) })
} }
@@ -829,7 +823,6 @@ mod tests {
let mut node = ZombienetNode::new( let mut node = ZombienetNode::new(
context.polkadot_parachain_configuration.path.clone(), context.polkadot_parachain_configuration.path.clone(),
&context, &context,
true,
); );
let genesis = context.genesis_configuration.genesis().unwrap().clone(); let genesis = context.genesis_configuration.genesis().unwrap().clone();
node.init(genesis).unwrap(); node.init(genesis).unwrap();
@@ -943,7 +936,6 @@ mod tests {
let node = ZombienetNode::new( let node = ZombienetNode::new(
context.polkadot_parachain_configuration.path.clone(), context.polkadot_parachain_configuration.path.clone(),
&context, &context,
true,
); );
// Act // Act
@@ -964,7 +956,6 @@ mod tests {
let node = ZombienetNode::new( let node = ZombienetNode::new(
context.polkadot_parachain_configuration.path.clone(), context.polkadot_parachain_configuration.path.clone(),
&context, &context,
true,
); );
// Act // Act
@@ -1,69 +1,42 @@
use alloy::{ use alloy::{
eips::BlockNumberOrTag,
network::{Network, TransactionBuilder}, network::{Network, TransactionBuilder},
providers::{ providers::{
Provider, SendableTx, Provider, SendableTx,
ext::DebugApi, fillers::{GasFiller, TxFiller},
fillers::{GasFillable, GasFiller, TxFiller},
}, },
rpc::types::trace::geth::{ transports::TransportResult,
GethDebugBuiltInTracerType, GethDebugTracerType, GethDebugTracingCallOptions,
GethDebugTracingOptions,
},
transports::{RpcError, TransportResult},
}; };
/// An implementation of [`GasFiller`] with a fallback mechanism for reverting transactions. // Percentage padding applied to estimated gas (e.g. 120 = 20% padding)
/// const GAS_ESTIMATE_PADDING_NUMERATOR: u64 = 120;
/// This struct provides a fallback mechanism for alloy's [`GasFiller`] which kicks in when a const GAS_ESTIMATE_PADDING_DENOMINATOR: u64 = 100;
/// transaction's dry run fails due to it reverting allowing us to get gas estimates even for
/// failing transactions. In this codebase, this is very important since the MatterLabs tests
/// expect some transactions in the test suite revert. Since we're expected to run a number of
/// assertions on these reverting transactions we must commit them to the ledger.
///
/// Therefore, this struct does the following:
///
/// 1. It first attempts to estimate the gas through the mechanism implemented in the [`GasFiller`].
/// 2. If it fails, then we perform a debug trace of the transaction to find out how much gas the
/// transaction needs until it reverts.
/// 3. We fill in these values (either the success or failure case) into the transaction.
///
/// The fallback mechanism of this filler can be completely disabled if we don't want it to be used.
/// In that case, this gas filler will act in an identical way to alloy's [`GasFiller`].
///
/// We then fill in these values into the transaction.
///
/// The previous implementation of this fallback gas filler relied on making use of default values
/// for the gas limit in order to be able to submit the reverting transactions to the network. But,
/// it introduced a number of issues that we weren't anticipating at the time when it was built.
#[derive(Clone, Copy, Debug)]
pub struct FallbackGasFiller {
/// The inner [`GasFiller`] which we pass all of the calls to in the happy path.
inner: GasFiller,
/// A [`bool`] that controls if the fallback mechanism is enabled or not. #[derive(Clone, Debug)]
enable_fallback_mechanism: bool, pub struct FallbackGasFiller {
inner: GasFiller,
default_gas_limit: u64,
default_max_fee_per_gas: u128,
default_priority_fee: u128,
} }
impl FallbackGasFiller { impl FallbackGasFiller {
pub fn new() -> Self { pub fn new(
default_gas_limit: u64,
default_max_fee_per_gas: u128,
default_priority_fee: u128,
) -> Self {
Self { Self {
inner: Default::default(), inner: GasFiller,
enable_fallback_mechanism: true, default_gas_limit,
default_max_fee_per_gas,
default_priority_fee,
} }
} }
}
pub fn with_fallback_mechanism(mut self, enable: bool) -> Self { impl Default for FallbackGasFiller {
self.enable_fallback_mechanism = enable; fn default() -> Self {
self FallbackGasFiller::new(25_000_000, 1_000_000_000, 1_000_000_000)
}
pub fn with_fallback_mechanism_enabled(self) -> Self {
self.with_fallback_mechanism(true)
}
pub fn with_fallback_mechanism_disabled(self) -> Self {
self.with_fallback_mechanism(false)
} }
} }
@@ -71,84 +44,27 @@ impl<N> TxFiller<N> for FallbackGasFiller
where where
N: Network, N: Network,
{ {
type Fillable = <GasFiller as TxFiller<N>>::Fillable; type Fillable = Option<<GasFiller as TxFiller<N>>::Fillable>;
fn status( fn status(
&self, &self,
tx: &<N as Network>::TransactionRequest, tx: &<N as Network>::TransactionRequest,
) -> alloy::providers::fillers::FillerControlFlow { ) -> alloy::providers::fillers::FillerControlFlow {
TxFiller::<N>::status(&self.inner, tx) <GasFiller as TxFiller<N>>::status(&self.inner, tx)
} }
fn fill_sync(&self, _: &mut SendableTx<N>) {} fn fill_sync(&self, _: &mut alloy::providers::SendableTx<N>) {}
async fn prepare<P: Provider<N>>( async fn prepare<P: Provider<N>>(
&self, &self,
provider: &P, provider: &P,
tx: &<N as Network>::TransactionRequest, tx: &<N as Network>::TransactionRequest,
) -> TransportResult<Self::Fillable> { ) -> TransportResult<Self::Fillable> {
match ( match self.inner.prepare(provider, tx).await {
self.inner.prepare(provider, tx).await, Ok(fill) => Ok(Some(fill)),
self.enable_fallback_mechanism, Err(err) => {
) { tracing::debug!(error = ?err, "Gas Provider Estimation Failed, using fallback");
// Return the same thing if either this calls succeeds, or if the call falls and the Ok(None)
// fallback mechanism is disabled.
(rtn @ Ok(..), ..) | (rtn @ Err(..), false) => rtn,
(Err(..), true) => {
// Perform a trace of the transaction.
let trace = provider
.debug_trace_call(
tx.clone(),
BlockNumberOrTag::Latest.into(),
GethDebugTracingCallOptions {
tracing_options: GethDebugTracingOptions {
tracer: Some(GethDebugTracerType::BuiltInTracer(
GethDebugBuiltInTracerType::CallTracer,
)),
..Default::default()
},
state_overrides: Default::default(),
block_overrides: Default::default(),
tx_index: Default::default(),
},
)
.await?
.try_into_call_frame()
.map_err(|err| {
RpcError::local_usage_str(
format!("Expected a callframe trace, but got: {err:?}").as_str(),
)
})?;
let gas_used = u64::try_from(trace.gas_used).map_err(|_| {
RpcError::local_usage_str(
"Transaction trace returned a value of gas used that exceeds u64",
)
})?;
let gas_limit = gas_used.saturating_mul(2);
if let Some(gas_price) = tx.gas_price() {
return Ok(GasFillable::Legacy {
gas_limit,
gas_price,
});
}
let estimate = if let (Some(max_fee_per_gas), Some(max_priority_fee_per_gas)) =
(tx.max_fee_per_gas(), tx.max_priority_fee_per_gas())
{
alloy::eips::eip1559::Eip1559Estimation {
max_fee_per_gas,
max_priority_fee_per_gas,
}
} else {
provider.estimate_eip1559_fees().await?
};
Ok(GasFillable::Eip1559 {
gas_limit,
estimate,
})
} }
} }
} }
@@ -156,14 +72,27 @@ where
async fn fill( async fn fill(
&self, &self,
fillable: Self::Fillable, fillable: Self::Fillable,
tx: SendableTx<N>, mut tx: alloy::providers::SendableTx<N>,
) -> TransportResult<SendableTx<N>> { ) -> TransportResult<SendableTx<N>> {
self.inner.fill(fillable, tx).await if let Some(fill) = fillable {
} let mut tx = self.inner.fill(fill, tx).await?;
} if let Some(builder) = tx.as_mut_builder() {
if let Some(estimated) = builder.gas_limit() {
impl Default for FallbackGasFiller { let padded = estimated
fn default() -> Self { .checked_mul(GAS_ESTIMATE_PADDING_NUMERATOR)
Self::new() .and_then(|v| v.checked_div(GAS_ESTIMATE_PADDING_DENOMINATOR))
.unwrap_or(u64::MAX);
builder.set_gas_limit(padded);
}
}
Ok(tx)
} else {
if let Some(builder) = tx.as_mut_builder() {
builder.set_gas_limit(self.default_gas_limit);
builder.set_max_fee_per_gas(self.default_max_fee_per_gas);
builder.set_max_priority_fee_per_gas(self.default_priority_fee);
}
Ok(tx)
}
} }
} }
-2
View File
@@ -1,9 +1,7 @@
mod concurrency_limiter; mod concurrency_limiter;
mod fallback_gas_filler; mod fallback_gas_filler;
mod provider; mod provider;
mod receipt_retry_layer;
pub use concurrency_limiter::*; pub use concurrency_limiter::*;
pub use fallback_gas_filler::*; pub use fallback_gas_filler::*;
pub use provider::*; pub use provider::*;
pub use receipt_retry_layer::*;
+73 -5
View File
@@ -1,16 +1,18 @@
use std::sync::LazyLock; use std::{ops::ControlFlow, sync::LazyLock, time::Duration};
use alloy::{ use alloy::{
network::{Network, NetworkWallet, TransactionBuilder4844}, network::{Ethereum, Network, NetworkWallet, TransactionBuilder4844},
providers::{ providers::{
Identity, ProviderBuilder, RootProvider, Identity, PendingTransactionBuilder, Provider, ProviderBuilder, RootProvider,
fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller}, fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller},
}, },
rpc::client::ClientBuilder, rpc::client::ClientBuilder,
}; };
use anyhow::{Context, Result}; use anyhow::{Context, Result};
use revive_dt_common::futures::{PollingWaitBehavior, poll};
use tracing::{Instrument, debug, info, info_span};
use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller, RetryLayer}; use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller};
pub type ConcreteProvider<N, W> = FillProvider< pub type ConcreteProvider<N, W> = FillProvider<
JoinFill< JoinFill<
@@ -46,7 +48,6 @@ where
let client = ClientBuilder::default() let client = ClientBuilder::default()
.layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone()) .layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone())
.layer(RetryLayer::default())
.connect(rpc_url) .connect(rpc_url)
.await .await
.context("Failed to construct the RPC client")?; .context("Failed to construct the RPC client")?;
@@ -62,3 +63,70 @@ where
Ok(provider) Ok(provider)
} }
pub async fn execute_transaction<N, W>(
provider: ConcreteProvider<N, W>,
transaction: N::TransactionRequest,
) -> Result<N::ReceiptResponse>
where
N: Network<
TransactionRequest: TransactionBuilder4844,
TxEnvelope = <Ethereum as Network>::TxEnvelope,
>,
W: NetworkWallet<N>,
Identity: TxFiller<N>,
FallbackGasFiller: TxFiller<N>,
ChainIdFiller: TxFiller<N>,
NonceFiller: TxFiller<N>,
WalletFiller<W>: TxFiller<N>,
{
let sendable_transaction = provider
.fill(transaction)
.await
.context("Failed to fill transaction")?;
let transaction_envelope = sendable_transaction
.try_into_envelope()
.context("Failed to convert transaction into an envelope")?;
let tx_hash = *transaction_envelope.tx_hash();
let mut pending_transaction = match provider.send_tx_envelope(transaction_envelope).await {
Ok(pending_transaction) => pending_transaction,
Err(error) => {
let error_string = error.to_string();
if error_string.contains("Transaction Already Imported") {
PendingTransactionBuilder::<N>::new(provider.root().clone(), tx_hash)
} else {
return Err(error).context(format!("Failed to submit transaction {tx_hash}"));
}
}
};
debug!(%tx_hash, "Submitted Transaction");
pending_transaction.set_timeout(Some(Duration::from_secs(120)));
let tx_hash = pending_transaction.watch().await.context(format!(
"Transaction inclusion watching timeout for {tx_hash}"
))?;
poll(
Duration::from_secs(60),
PollingWaitBehavior::Constant(Duration::from_secs(3)),
|| {
let provider = provider.clone();
async move {
match provider.get_transaction_receipt(tx_hash).await {
Ok(Some(receipt)) => {
info!("Found the transaction receipt");
Ok(ControlFlow::Break(receipt))
}
_ => Ok(ControlFlow::Continue(())),
}
}
},
)
.instrument(info_span!("Polling for receipt", %tx_hash))
.await
.context(format!("Polling for receipt failed for {tx_hash}"))
}
@@ -1,158 +0,0 @@
use std::time::Duration;
use alloy::{
network::{AnyNetwork, Network},
rpc::json_rpc::{RequestPacket, ResponsePacket},
transports::{TransportError, TransportErrorKind, TransportFut},
};
use tokio::time::{interval, timeout};
use tower::{Layer, Service};
/// A layer that allows for automatic retries for getting the receipt.
///
/// There are certain cases where getting the receipt of a committed transaction might fail. In Geth
/// this can happen if the transaction has been committed to the ledger but has not been indexed, in
/// the substrate and revive stack it can also happen for other reasons.
///
/// Therefore, just because the first attempt to get the receipt (after transaction confirmation)
/// has failed it doesn't mean that it will continue to fail. This layer can be added to any alloy
/// provider to allow the provider to retry getting the receipt for some period of time before it
/// considers that a timeout. It attempts to poll for the receipt for the `polling_duration` with an
/// interval of `polling_interval` between each poll. If by the end of the `polling_duration` it was
/// not able to get the receipt successfully then this is considered to be a timeout.
///
/// Additionally, this layer allows for retries for other rpc methods such as all tracing methods.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct RetryLayer {
/// The amount of time to keep polling for the receipt before considering it a timeout.
polling_duration: Duration,
/// The interval of time to wait between each poll for the receipt.
polling_interval: Duration,
}
impl RetryLayer {
pub fn new(polling_duration: Duration, polling_interval: Duration) -> Self {
Self {
polling_duration,
polling_interval,
}
}
pub fn with_polling_duration(mut self, polling_duration: Duration) -> Self {
self.polling_duration = polling_duration;
self
}
pub fn with_polling_interval(mut self, polling_interval: Duration) -> Self {
self.polling_interval = polling_interval;
self
}
}
impl Default for RetryLayer {
fn default() -> Self {
Self {
polling_duration: Duration::from_secs(90),
polling_interval: Duration::from_millis(500),
}
}
}
impl<S> Layer<S> for RetryLayer {
type Service = RetryService<S>;
fn layer(&self, inner: S) -> Self::Service {
RetryService {
service: inner,
polling_duration: self.polling_duration,
polling_interval: self.polling_interval,
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct RetryService<S> {
/// The internal service.
service: S,
/// The amount of time to keep polling for the receipt before considering it a timeout.
polling_duration: Duration,
/// The interval of time to wait between each poll for the receipt.
polling_interval: Duration,
}
impl<S> Service<RequestPacket> for RetryService<S>
where
S: Service<RequestPacket, Future = TransportFut<'static>, Error = TransportError>
+ Send
+ 'static
+ Clone,
{
type Response = ResponsePacket;
type Error = TransportError;
type Future = TransportFut<'static>;
fn poll_ready(
&mut self,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), Self::Error>> {
self.service.poll_ready(cx)
}
#[allow(clippy::nonminimal_bool)]
fn call(&mut self, req: RequestPacket) -> Self::Future {
type ReceiptOutput = <AnyNetwork as Network>::ReceiptResponse;
let mut service = self.service.clone();
let polling_interval = self.polling_interval;
let polling_duration = self.polling_duration;
Box::pin(async move {
let request = req.as_single().ok_or_else(|| {
TransportErrorKind::custom_str("Retry layer doesn't support batch requests")
})?;
let method = request.method();
let requires_retries = method == "eth_getTransactionReceipt"
|| (method.contains("debug") && method.contains("trace"));
if !requires_retries {
return service.call(req).await;
}
timeout(polling_duration, async {
let mut interval = interval(polling_interval);
loop {
interval.tick().await;
let Ok(resp) = service.call(req.clone()).await else {
continue;
};
let response = resp.as_single().expect("Can't fail");
if response.is_error() {
continue;
}
if method == "eth_getTransactionReceipt"
&& response
.payload()
.clone()
.deserialize_success::<ReceiptOutput>()
.ok()
.and_then(|resp| resp.try_into_success().ok())
.is_some()
|| method != "eth_getTransactionReceipt"
{
return resp;
} else {
continue;
}
}
})
.await
.map_err(|_| TransportErrorKind::custom_str("Timeout when retrying request"))
})
}
}
-25
View File
@@ -1,25 +0,0 @@
[package]
name = "revive-dt-report-processor"
description = "revive differential testing report processor utility"
version.workspace = true
authors.workspace = true
license.workspace = true
edition.workspace = true
repository.workspace = true
rust-version.workspace = true
[[bin]]
name = "report-processor"
path = "src/main.rs"
[dependencies]
revive-dt-report = { workspace = true }
revive-dt-common = { workspace = true }
anyhow = { workspace = true }
clap = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
[lints]
workspace = true
-328
View File
@@ -1,328 +0,0 @@
use std::{
borrow::Cow,
collections::{BTreeMap, BTreeSet},
fmt::Display,
fs::{File, OpenOptions},
ops::{Deref, DerefMut},
path::{Path, PathBuf},
str::FromStr,
};
use anyhow::{Context as _, Error, Result, bail};
use clap::Parser;
use serde::{Deserialize, Serialize, de::DeserializeOwned};
use revive_dt_common::types::{Mode, ParsedTestSpecifier};
use revive_dt_report::{Report, TestCaseStatus};
fn main() -> Result<()> {
let cli = Cli::try_parse().context("Failed to parse the CLI arguments")?;
match cli {
Cli::GenerateExpectationsFile {
report_path,
output_path: output_file,
remove_prefix,
} => {
let remove_prefix = remove_prefix
.into_iter()
.map(|path| path.canonicalize().context("Failed to canonicalize path"))
.collect::<Result<Vec<_>>>()?;
let expectations = report_path
.execution_information
.iter()
.flat_map(|(metadata_file_path, metadata_file_report)| {
metadata_file_report
.case_reports
.iter()
.map(move |(case_idx, case_report)| {
(metadata_file_path, case_idx, case_report)
})
})
.flat_map(|(metadata_file_path, case_idx, case_report)| {
case_report.mode_execution_reports.iter().map(
move |(mode, execution_report)| {
(
metadata_file_path,
case_idx,
mode,
execution_report.status.as_ref(),
)
},
)
})
.filter_map(|(metadata_file_path, case_idx, mode, status)| {
status.map(|status| (metadata_file_path, case_idx, mode, status))
})
.map(|(metadata_file_path, case_idx, mode, status)| {
(
TestSpecifier {
metadata_file_path: Cow::Borrowed(
remove_prefix
.iter()
.filter_map(|prefix| {
metadata_file_path.as_inner().strip_prefix(prefix).ok()
})
.next()
.unwrap_or(metadata_file_path.as_inner()),
),
case_idx: case_idx.into_inner(),
mode: Cow::Borrowed(mode),
},
Status::from(status),
)
})
.collect::<Expectations>();
let output_file = OpenOptions::new()
.truncate(true)
.create(true)
.write(true)
.open(output_file)
.context("Failed to create the output file")?;
serde_json::to_writer_pretty(output_file, &expectations)
.context("Failed to write the expectations to file")?;
}
Cli::CompareExpectationFiles {
base_expectation_path,
other_expectation_path,
} => {
let keys = base_expectation_path
.keys()
.chain(other_expectation_path.keys())
.collect::<BTreeSet<_>>();
for key in keys {
let base_status = base_expectation_path.get(key).context(format!(
"Entry not found in the base expectations: \"{}\"",
key
))?;
let other_status = other_expectation_path.get(key).context(format!(
"Entry not found in the other expectations: \"{}\"",
key
))?;
if base_status != other_status {
bail!(
"Expectations for entry \"{}\" have changed. They were {:?} and now they are {:?}",
key,
base_status,
other_status
)
}
}
}
};
Ok(())
}
type Expectations<'a> = BTreeMap<TestSpecifier<'a>, Status>;
/// A tool that's used to process the reports generated by the retester binary in various ways.
#[derive(Clone, Debug, Parser)]
#[command(name = "retester", term_width = 100)]
pub enum Cli {
/// Generates an expectation file out of a given report.
GenerateExpectationsFile {
/// The path of the report's JSON file to generate the expectation's file for.
#[clap(long)]
report_path: JsonFile<Report>,
/// The path of the output file to generate.
///
/// Note that we expect that:
/// 1. The provided path points to a JSON file.
/// 1. The ancestor's of the provided path already exist such that no directory creations
/// are required.
#[clap(long)]
output_path: PathBuf,
/// Prefix paths to remove from the paths in the final expectations file.
#[clap(long)]
remove_prefix: Vec<PathBuf>,
},
/// Compares two expectation files to ensure that they match each other.
CompareExpectationFiles {
/// The path of the base expectation file.
#[clap(long)]
base_expectation_path: JsonFile<Expectations<'static>>,
/// The path of the other expectation file.
#[clap(long)]
other_expectation_path: JsonFile<Expectations<'static>>,
},
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub enum Status {
Succeeded,
Failed,
Ignored,
}
impl From<TestCaseStatus> for Status {
fn from(value: TestCaseStatus) -> Self {
match value {
TestCaseStatus::Succeeded { .. } => Self::Succeeded,
TestCaseStatus::Failed { .. } => Self::Failed,
TestCaseStatus::Ignored { .. } => Self::Ignored,
}
}
}
impl<'a> From<&'a TestCaseStatus> for Status {
fn from(value: &'a TestCaseStatus) -> Self {
match value {
TestCaseStatus::Succeeded { .. } => Self::Succeeded,
TestCaseStatus::Failed { .. } => Self::Failed,
TestCaseStatus::Ignored { .. } => Self::Ignored,
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct JsonFile<T> {
path: PathBuf,
content: Box<T>,
}
impl<T> Deref for JsonFile<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.content
}
}
impl<T> DerefMut for JsonFile<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.content
}
}
impl<T> FromStr for JsonFile<T>
where
T: DeserializeOwned,
{
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let path = PathBuf::from(s);
let file = File::open(&path).context("Failed to open the file")?;
serde_json::from_reader(&file)
.map(|content| Self { path, content })
.context(format!(
"Failed to deserialize file's content as {}",
std::any::type_name::<T>()
))
}
}
impl<T> Display for JsonFile<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self.path.display(), f)
}
}
impl<T> From<JsonFile<T>> for String {
fn from(value: JsonFile<T>) -> Self {
value.to_string()
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct TestSpecifier<'a> {
pub metadata_file_path: Cow<'a, Path>,
pub case_idx: usize,
pub mode: Cow<'a, Mode>,
}
impl<'a> Display for TestSpecifier<'a> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}::{}::{}",
self.metadata_file_path.display(),
self.case_idx,
self.mode
)
}
}
impl<'a> From<TestSpecifier<'a>> for ParsedTestSpecifier {
fn from(
TestSpecifier {
metadata_file_path,
case_idx,
mode,
}: TestSpecifier,
) -> Self {
Self::CaseWithMode {
metadata_file_path: metadata_file_path.to_path_buf(),
case_idx,
mode: mode.into_owned(),
}
}
}
impl TryFrom<ParsedTestSpecifier> for TestSpecifier<'static> {
type Error = Error;
fn try_from(value: ParsedTestSpecifier) -> Result<Self> {
let ParsedTestSpecifier::CaseWithMode {
metadata_file_path,
case_idx,
mode,
} = value
else {
bail!("Expected a full test case specifier")
};
Ok(Self {
metadata_file_path: Cow::Owned(metadata_file_path),
case_idx,
mode: Cow::Owned(mode),
})
}
}
impl<'a> Serialize for TestSpecifier<'a> {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.to_string().serialize(serializer)
}
}
impl<'d, 'a> Deserialize<'d> for TestSpecifier<'a> {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: serde::Deserializer<'d>,
{
let string = String::deserialize(deserializer)?;
let mut splitted = string.split("::");
let (Some(metadata_file_path), Some(case_idx), Some(mode), None) = (
splitted.next(),
splitted.next(),
splitted.next(),
splitted.next(),
) else {
return Err(serde::de::Error::custom(
"Test specifier doesn't contain the components required",
));
};
let metadata_file_path = PathBuf::from(metadata_file_path);
let case_idx = usize::from_str(case_idx)
.map_err(|_| serde::de::Error::custom("Case idx is not a usize"))?;
let mode = Mode::from_str(mode).map_err(|_| serde::de::Error::custom("Invalid mode"))?;
Ok(Self {
metadata_file_path: Cow::Owned(metadata_file_path),
case_idx,
mode: Cow::Owned(mode),
})
}
}
+32 -40
View File
@@ -36,8 +36,6 @@ pub struct ReportAggregator {
runner_tx: Option<UnboundedSender<RunnerEvent>>, runner_tx: Option<UnboundedSender<RunnerEvent>>,
runner_rx: UnboundedReceiver<RunnerEvent>, runner_rx: UnboundedReceiver<RunnerEvent>,
listener_tx: Sender<ReporterEvent>, listener_tx: Sender<ReporterEvent>,
/* Context */
file_name: Option<String>,
} }
impl ReportAggregator { impl ReportAggregator {
@@ -45,11 +43,6 @@ impl ReportAggregator {
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>(); let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
let (listener_tx, _) = channel::<ReporterEvent>(0xFFFF); let (listener_tx, _) = channel::<ReporterEvent>(0xFFFF);
Self { Self {
file_name: match context {
Context::Test(ref context) => context.report_configuration.file_name.clone(),
Context::Benchmark(ref context) => context.report_configuration.file_name.clone(),
Context::ExportJsonSchema | Context::ExportGenesis(..) => None,
},
report: Report::new(context), report: Report::new(context),
remaining_cases: Default::default(), remaining_cases: Default::default(),
runner_tx: Some(runner_tx), runner_tx: Some(runner_tx),
@@ -58,7 +51,7 @@ impl ReportAggregator {
} }
} }
pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<Report>>) { pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<()>>) {
let reporter = self let reporter = self
.runner_tx .runner_tx
.take() .take()
@@ -67,7 +60,7 @@ impl ReportAggregator {
(reporter, async move { self.aggregate().await }) (reporter, async move { self.aggregate().await })
} }
async fn aggregate(mut self) -> Result<Report> { async fn aggregate(mut self) -> Result<()> {
debug!("Starting to aggregate report"); debug!("Starting to aggregate report");
while let Some(event) = self.runner_rx.recv().await { while let Some(event) = self.runner_rx.recv().await {
@@ -128,7 +121,7 @@ impl ReportAggregator {
self.handle_completion(CompletionEvent {}); self.handle_completion(CompletionEvent {});
debug!("Report aggregation completed"); debug!("Report aggregation completed");
let default_file_name = { let file_name = {
let current_timestamp = SystemTime::now() let current_timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH) .duration_since(UNIX_EPOCH)
.context("System clock is before UNIX_EPOCH; cannot compute report timestamp")? .context("System clock is before UNIX_EPOCH; cannot compute report timestamp")?
@@ -137,7 +130,6 @@ impl ReportAggregator {
file_name.push_str(".json"); file_name.push_str(".json");
file_name file_name
}; };
let file_name = self.file_name.unwrap_or(default_file_name);
let file_path = self let file_path = self
.report .report
.context .context
@@ -160,7 +152,7 @@ impl ReportAggregator {
format!("Failed to serialize report JSON to {}", file_path.display()) format!("Failed to serialize report JSON to {}", file_path.display())
})?; })?;
Ok(self.report) Ok(())
} }
fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) { fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) {
@@ -570,7 +562,7 @@ pub struct Report {
/// The list of metadata files that were found by the tool. /// The list of metadata files that were found by the tool.
pub metadata_files: BTreeSet<MetadataFilePath>, pub metadata_files: BTreeSet<MetadataFilePath>,
/// Metrics from the execution. /// Metrics from the execution.
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub metrics: Option<Metrics>, pub metrics: Option<Metrics>,
/// Information relating to each test case. /// Information relating to each test case.
pub execution_information: BTreeMap<MetadataFilePath, MetadataFileReport>, pub execution_information: BTreeMap<MetadataFilePath, MetadataFileReport>,
@@ -590,7 +582,7 @@ impl Report {
#[derive(Clone, Debug, Serialize, Deserialize, Default)] #[derive(Clone, Debug, Serialize, Deserialize, Default)]
pub struct MetadataFileReport { pub struct MetadataFileReport {
/// Metrics from the execution. /// Metrics from the execution.
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub metrics: Option<Metrics>, pub metrics: Option<Metrics>,
/// The report of each case keyed by the case idx. /// The report of each case keyed by the case idx.
pub case_reports: BTreeMap<CaseIdx, CaseReport>, pub case_reports: BTreeMap<CaseIdx, CaseReport>,
@@ -600,7 +592,7 @@ pub struct MetadataFileReport {
#[derive(Clone, Debug, Serialize, Deserialize, Default)] #[derive(Clone, Debug, Serialize, Deserialize, Default)]
pub struct CaseReport { pub struct CaseReport {
/// Metrics from the execution. /// Metrics from the execution.
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub metrics: Option<Metrics>, pub metrics: Option<Metrics>,
/// The [`ExecutionReport`] for each one of the [`Mode`]s. /// The [`ExecutionReport`] for each one of the [`Mode`]s.
#[serde_as(as = "HashMap<DisplayFromStr, _>")] #[serde_as(as = "HashMap<DisplayFromStr, _>")]
@@ -610,31 +602,31 @@ pub struct CaseReport {
#[derive(Clone, Debug, Serialize, Deserialize, Default)] #[derive(Clone, Debug, Serialize, Deserialize, Default)]
pub struct ExecutionReport { pub struct ExecutionReport {
/// Information on the status of the test case and whether it succeeded, failed, or was ignored. /// Information on the status of the test case and whether it succeeded, failed, or was ignored.
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub status: Option<TestCaseStatus>, pub status: Option<TestCaseStatus>,
/// Metrics from the execution. /// Metrics from the execution.
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub metrics: Option<Metrics>, pub metrics: Option<Metrics>,
/// Information related to the execution on one of the platforms. /// Information related to the execution on one of the platforms.
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")] #[serde(skip_serializing_if = "BTreeMap::is_empty")]
pub platform_execution: PlatformKeyedInformation<Option<ExecutionInformation>>, pub platform_execution: PlatformKeyedInformation<Option<ExecutionInformation>>,
/// Information on the compiled contracts. /// Information on the compiled contracts.
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")] #[serde(skip_serializing_if = "BTreeMap::is_empty")]
pub compiled_contracts: BTreeMap<PathBuf, BTreeMap<String, ContractInformation>>, pub compiled_contracts: BTreeMap<PathBuf, BTreeMap<String, ContractInformation>>,
/// The addresses of the deployed contracts /// The addresses of the deployed contracts
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")] #[serde(skip_serializing_if = "BTreeMap::is_empty")]
pub contract_addresses: BTreeMap<ContractInstance, PlatformKeyedInformation<Vec<Address>>>, pub contract_addresses: BTreeMap<ContractInstance, PlatformKeyedInformation<Vec<Address>>>,
/// Information on the mined blocks as part of this execution. /// Information on the mined blocks as part of this execution.
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")] #[serde(skip_serializing_if = "BTreeMap::is_empty")]
pub mined_block_information: PlatformKeyedInformation<Vec<MinedBlockInformation>>, pub mined_block_information: PlatformKeyedInformation<Vec<MinedBlockInformation>>,
/// Information tracked for each step that was executed. /// Information tracked for each step that was executed.
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")] #[serde(skip_serializing_if = "BTreeMap::is_empty")]
pub steps: BTreeMap<StepPath, StepReport>, pub steps: BTreeMap<StepPath, StepReport>,
} }
/// Information related to the status of the test. Could be that the test succeeded, failed, or that /// Information related to the status of the test. Could be that the test succeeded, failed, or that
/// it was ignored. /// it was ignored.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(tag = "status")] #[serde(tag = "status")]
pub enum TestCaseStatus { pub enum TestCaseStatus {
/// The test case succeeded. /// The test case succeeded.
@@ -672,19 +664,19 @@ pub struct TestCaseNodeInformation {
#[derive(Clone, Debug, Default, Serialize, Deserialize)] #[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct ExecutionInformation { pub struct ExecutionInformation {
/// Information related to the node assigned to this test case. /// Information related to the node assigned to this test case.
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub node: Option<TestCaseNodeInformation>, pub node: Option<TestCaseNodeInformation>,
/// Information on the pre-link compiled contracts. /// Information on the pre-link compiled contracts.
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub pre_link_compilation_status: Option<CompilationStatus>, pub pre_link_compilation_status: Option<CompilationStatus>,
/// Information on the post-link compiled contracts. /// Information on the post-link compiled contracts.
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub post_link_compilation_status: Option<CompilationStatus>, pub post_link_compilation_status: Option<CompilationStatus>,
/// Information on the deployed libraries. /// Information on the deployed libraries.
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub deployed_libraries: Option<BTreeMap<ContractInstance, Address>>, pub deployed_libraries: Option<BTreeMap<ContractInstance, Address>>,
/// Information on the deployed contracts. /// Information on the deployed contracts.
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub deployed_contracts: Option<BTreeMap<ContractInstance, Address>>, pub deployed_contracts: Option<BTreeMap<ContractInstance, Address>>,
} }
@@ -703,11 +695,11 @@ pub enum CompilationStatus {
/// The input provided to the compiler to compile the contracts. This is only included if /// The input provided to the compiler to compile the contracts. This is only included if
/// the appropriate flag is set in the CLI context and if the contracts were not cached and /// the appropriate flag is set in the CLI context and if the contracts were not cached and
/// the compiler was invoked. /// the compiler was invoked.
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_input: Option<CompilerInput>, compiler_input: Option<CompilerInput>,
/// The output of the compiler. This is only included if the appropriate flag is set in the /// The output of the compiler. This is only included if the appropriate flag is set in the
/// CLI contexts. /// CLI contexts.
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_output: Option<CompilerOutput>, compiler_output: Option<CompilerOutput>,
}, },
/// The compilation failed. /// The compilation failed.
@@ -715,15 +707,15 @@ pub enum CompilationStatus {
/// The failure reason. /// The failure reason.
reason: String, reason: String,
/// The version of the compiler used to compile the contracts. /// The version of the compiler used to compile the contracts.
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_version: Option<Version>, compiler_version: Option<Version>,
/// The path of the compiler used to compile the contracts. /// The path of the compiler used to compile the contracts.
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_path: Option<PathBuf>, compiler_path: Option<PathBuf>,
/// The input provided to the compiler to compile the contracts. This is only included if /// The input provided to the compiler to compile the contracts. This is only included if
/// the appropriate flag is set in the CLI context and if the contracts were not cached and /// the appropriate flag is set in the CLI context and if the contracts were not cached and
/// the compiler was invoked. /// the compiler was invoked.
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_input: Option<CompilerInput>, compiler_input: Option<CompilerInput>,
}, },
} }
@@ -751,24 +743,24 @@ pub struct Metrics {
pub gas_per_second: Metric<u64>, pub gas_per_second: Metric<u64>,
/* Block Fullness */ /* Block Fullness */
pub gas_block_fullness: Metric<u64>, pub gas_block_fullness: Metric<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub ref_time_block_fullness: Option<Metric<u64>>, pub ref_time_block_fullness: Option<Metric<u64>>,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub proof_size_block_fullness: Option<Metric<u64>>, pub proof_size_block_fullness: Option<Metric<u64>>,
} }
/// The data that we store for a given metric (e.g., TPS). /// The data that we store for a given metric (e.g., TPS).
#[derive(Clone, Debug, Default, Serialize, Deserialize)] #[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct Metric<T> { pub struct Metric<T> {
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub minimum: Option<PlatformKeyedInformation<T>>, pub minimum: Option<PlatformKeyedInformation<T>>,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub maximum: Option<PlatformKeyedInformation<T>>, pub maximum: Option<PlatformKeyedInformation<T>>,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub mean: Option<PlatformKeyedInformation<T>>, pub mean: Option<PlatformKeyedInformation<T>>,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub median: Option<PlatformKeyedInformation<T>>, pub median: Option<PlatformKeyedInformation<T>>,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub raw: Option<PlatformKeyedInformation<Vec<T>>>, pub raw: Option<PlatformKeyedInformation<Vec<T>>>,
} }
+29 -98
View File
@@ -28,7 +28,7 @@ from __future__ import annotations
import json import json
import sys import sys
import csv import csv
from typing import List, Mapping, TypedDict, no_type_check from typing import List, Mapping, TypedDict
class EthereumMinedBlockInformation(TypedDict): class EthereumMinedBlockInformation(TypedDict):
@@ -69,43 +69,7 @@ class MinedBlockInformation(TypedDict):
"""Block-level information for a mined block with both EVM and optional Substrate fields.""" """Block-level information for a mined block with both EVM and optional Substrate fields."""
ethereum_block_information: EthereumMinedBlockInformation ethereum_block_information: EthereumMinedBlockInformation
substrate_block_information: SubstrateMinedBlockInformation | None substrate_block_information: SubstrateMinedBlockInformation
def substrate_block_information_ref_time(
block: SubstrateMinedBlockInformation | None,
) -> int | None:
if block is None:
return None
else:
return block["ref_time"]
def substrate_block_information_max_ref_time(
block: SubstrateMinedBlockInformation | None,
) -> int | None:
if block is None:
return None
else:
return block["max_ref_time"]
def substrate_block_information_proof_size(
block: SubstrateMinedBlockInformation | None,
) -> int | None:
if block is None:
return None
else:
return block["proof_size"]
def substrate_block_information_max_proof_size(
block: SubstrateMinedBlockInformation | None,
) -> int | None:
if block is None:
return None
else:
return block["max_proof_size"]
class Metric(TypedDict): class Metric(TypedDict):
@@ -136,19 +100,8 @@ class Metrics(TypedDict):
transaction_per_second: Metric transaction_per_second: Metric
gas_per_second: Metric gas_per_second: Metric
gas_block_fullness: Metric gas_block_fullness: Metric
ref_time_block_fullness: Metric | None ref_time_block_fullness: Metric
proof_size_block_fullness: Metric | None proof_size_block_fullness: Metric
@no_type_check
def metrics_raw_item(
metrics: Metrics, name: str, target: str, index: int
) -> int | None:
l: list[int] = metrics.get(name, dict()).get("raw", dict()).get(target, dict())
try:
return l[index]
except:
return None
class ExecutionReport(TypedDict): class ExecutionReport(TypedDict):
@@ -191,15 +144,12 @@ BlockInformation = TypedDict(
"Transaction Count": int, "Transaction Count": int,
"TPS": int | None, "TPS": int | None,
"GPS": int | None, "GPS": int | None,
"Gas Mined": int, "Ref Time": int,
"Block Gas Limit": int, "Max Ref Time": int,
"Block Fullness Gas": float, "Block Fullness Ref Time": int,
"Ref Time": int | None, "Proof Size": int,
"Max Ref Time": int | None, "Max Proof Size": int,
"Block Fullness Ref Time": int | None, "Block Fullness Proof Size": int,
"Proof Size": int | None,
"Max Proof Size": int | None,
"Block Fullness Proof Size": int | None,
}, },
) )
"""A typed dictionary used to hold all of the block information""" """A typed dictionary used to hold all of the block information"""
@@ -225,7 +175,7 @@ def main() -> None:
report: ReportRoot = load_report(report_path) report: ReportRoot = load_report(report_path)
# TODO: Remove this in the future, but for now, the target is fixed. # TODO: Remove this in the future, but for now, the target is fixed.
target: str = sys.argv[2] target: str = "revive-dev-node-revm-solc"
csv_writer = csv.writer(sys.stdout) csv_writer = csv.writer(sys.stdout)
@@ -238,12 +188,6 @@ def main() -> None:
resolved_blocks: list[BlockInformation] = [] resolved_blocks: list[BlockInformation] = []
for i, block_information in enumerate(blocks_information): for i, block_information in enumerate(blocks_information):
mined_gas: int = block_information["ethereum_block_information"][
"mined_gas"
]
block_gas_limit: int = block_information[
"ethereum_block_information"
]["block_gas_limit"]
resolved_blocks.append( resolved_blocks.append(
{ {
"Block Number": block_information[ "Block Number": block_information[
@@ -272,37 +216,24 @@ def main() -> None:
"raw" "raw"
][target][i - 1] ][target][i - 1]
), ),
"Gas Mined": block_information[ "Ref Time": block_information[
"ethereum_block_information" "substrate_block_information"
]["mined_gas"], ]["ref_time"],
"Block Gas Limit": block_information[ "Max Ref Time": block_information[
"ethereum_block_information" "substrate_block_information"
]["block_gas_limit"], ]["max_ref_time"],
"Block Fullness Gas": mined_gas / block_gas_limit, "Block Fullness Ref Time": execution_report["metrics"][
"Ref Time": substrate_block_information_ref_time( "ref_time_block_fullness"
block_information["substrate_block_information"] ]["raw"][target][i],
), "Proof Size": block_information[
"Max Ref Time": substrate_block_information_max_ref_time( "substrate_block_information"
block_information["substrate_block_information"] ]["proof_size"],
), "Max Proof Size": block_information[
"Block Fullness Ref Time": metrics_raw_item( "substrate_block_information"
execution_report["metrics"], ]["max_proof_size"],
"ref_time_block_fullness", "Block Fullness Proof Size": execution_report["metrics"][
target, "proof_size_block_fullness"
i, ]["raw"][target][i],
),
"Proof Size": substrate_block_information_proof_size(
block_information["substrate_block_information"]
),
"Max Proof Size": substrate_block_information_max_proof_size(
block_information["substrate_block_information"]
),
"Block Fullness Proof Size": metrics_raw_item(
execution_report["metrics"],
"proof_size_block_fullness",
target,
i,
),
} }
) )
+36 -69
View File
@@ -5,54 +5,51 @@ CI. The full models used in the JSON report can be found in the revive different
the models used in this script are just a partial reproduction of the full report models. the models used in this script are just a partial reproduction of the full report models.
""" """
import json, typing, io, sys from typing import TypedDict, Literal, Union
import json, io
class Report(typing.TypedDict): class Report(TypedDict):
context: "Context" context: "Context"
execution_information: dict["MetadataFilePathString", "MetadataFileReport"] execution_information: dict[
"MetadataFilePathString",
dict["ModeString", dict["CaseIdxString", "CaseReport"]],
]
class MetadataFileReport(typing.TypedDict): class Context(TypedDict):
case_reports: dict["CaseIdxString", "CaseReport"]
class CaseReport(typing.TypedDict):
mode_execution_reports: dict["ModeString", "ExecutionReport"]
class ExecutionReport(typing.TypedDict):
status: "TestCaseStatus"
class Context(typing.TypedDict):
Test: "TestContext" Test: "TestContext"
class TestContext(typing.TypedDict): class TestContext(TypedDict):
corpus_configuration: "CorpusConfiguration" corpus_configuration: "CorpusConfiguration"
class CorpusConfiguration(typing.TypedDict): class CorpusConfiguration(TypedDict):
test_specifiers: list["TestSpecifier"] test_specifiers: list["TestSpecifier"]
class CaseStatusSuccess(typing.TypedDict): class CaseReport(TypedDict):
status: typing.Literal["Succeeded"] status: "CaseStatus"
class CaseStatusSuccess(TypedDict):
status: Literal["Succeeded"]
steps_executed: int steps_executed: int
class CaseStatusFailure(typing.TypedDict): class CaseStatusFailure(TypedDict):
status: typing.Literal["Failed"] status: Literal["Failed"]
reason: str reason: str
class CaseStatusIgnored(typing.TypedDict): class CaseStatusIgnored(TypedDict):
status: typing.Literal["Ignored"] status: Literal["Ignored"]
reason: str reason: str
TestCaseStatus = typing.Union[CaseStatusSuccess, CaseStatusFailure, CaseStatusIgnored] CaseStatus = Union[CaseStatusSuccess, CaseStatusFailure, CaseStatusIgnored]
"""A union type of all of the possible statuses that could be reported for a case.""" """A union type of all of the possible statuses that could be reported for a case."""
TestSpecifier = str TestSpecifier = str
@@ -67,12 +64,6 @@ MetadataFilePathString = str
CaseIdxString = str CaseIdxString = str
"""The index of a case as a string. For example '0'""" """The index of a case as a string. For example '0'"""
PlatformString = typing.Union[
typing.Literal["revive-dev-node-revm-solc"],
typing.Literal["revive-dev-node-polkavm-resolc"],
]
"""A string of the platform on which the test was run"""
def path_relative_to_resolc_compiler_test_directory(path: str) -> str: def path_relative_to_resolc_compiler_test_directory(path: str) -> str:
""" """
@@ -87,22 +78,12 @@ def path_relative_to_resolc_compiler_test_directory(path: str) -> str:
def main() -> None: def main() -> None:
with open(sys.argv[1], "r") as file: with open("report.json", "r") as file:
report: Report = json.load(file) report: Report = json.load(file)
# Getting the platform string and resolving it into a simpler version of
# itself.
platform_identifier: PlatformString = typing.cast(PlatformString, sys.argv[2])
if platform_identifier == "revive-dev-node-polkavm-resolc":
platform: str = "PolkaVM"
elif platform_identifier == "revive-dev-node-revm-solc":
platform: str = "REVM"
else:
platform: str = platform_identifier
# Starting the markdown document and adding information to it as we go. # Starting the markdown document and adding information to it as we go.
markdown_document: io.TextIOWrapper = open("report.md", "w") markdown_document: io.TextIOWrapper = open("report.md", "w")
print(f"# Differential Tests Results ({platform})", file=markdown_document) print("# Differential Tests Results", file=markdown_document)
# Getting all of the test specifiers from the report and making them relative to the tests dir. # Getting all of the test specifiers from the report and making them relative to the tests dir.
test_specifiers: list[str] = list( test_specifiers: list[str] = list(
@@ -113,7 +94,7 @@ def main() -> None:
) )
print("## Specified Tests", file=markdown_document) print("## Specified Tests", file=markdown_document)
for test_specifier in test_specifiers: for test_specifier in test_specifiers:
print(f"* ``{test_specifier}``", file=markdown_document) print(f"* `{test_specifier}`", file=markdown_document)
# Counting the total number of test cases, successes, failures, and ignored tests # Counting the total number of test cases, successes, failures, and ignored tests
total_number_of_cases: int = 0 total_number_of_cases: int = 0
@@ -121,13 +102,9 @@ def main() -> None:
total_number_of_failures: int = 0 total_number_of_failures: int = 0
total_number_of_ignores: int = 0 total_number_of_ignores: int = 0
for _, mode_to_case_mapping in report["execution_information"].items(): for _, mode_to_case_mapping in report["execution_information"].items():
for _, case_idx_to_report_mapping in mode_to_case_mapping[ for _, case_idx_to_report_mapping in mode_to_case_mapping.items():
"case_reports" for _, case_report in case_idx_to_report_mapping.items():
].items(): status: CaseStatus = case_report["status"]
for _, execution_report in case_idx_to_report_mapping[
"mode_execution_reports"
].items():
status: TestCaseStatus = execution_report["status"]
total_number_of_cases += 1 total_number_of_cases += 1
if status["status"] == "Succeeded": if status["status"] == "Succeeded":
@@ -167,13 +144,9 @@ def main() -> None:
for metadata_file_path, mode_to_case_mapping in report[ for metadata_file_path, mode_to_case_mapping in report[
"execution_information" "execution_information"
].items(): ].items():
for case_idx_string, case_idx_to_report_mapping in mode_to_case_mapping[ for mode_string, case_idx_to_report_mapping in mode_to_case_mapping.items():
"case_reports" for case_idx_string, case_report in case_idx_to_report_mapping.items():
].items(): status: CaseStatus = case_report["status"]
for mode_string, execution_report in case_idx_to_report_mapping[
"mode_execution_reports"
].items():
status: TestCaseStatus = execution_report["status"]
metadata_file_path: str = ( metadata_file_path: str = (
path_relative_to_resolc_compiler_test_directory(metadata_file_path) path_relative_to_resolc_compiler_test_directory(metadata_file_path)
) )
@@ -210,13 +183,9 @@ def main() -> None:
for metadata_file_path, mode_to_case_mapping in report[ for metadata_file_path, mode_to_case_mapping in report[
"execution_information" "execution_information"
].items(): ].items():
for case_idx_string, case_idx_to_report_mapping in mode_to_case_mapping[ for mode_string, case_idx_to_report_mapping in mode_to_case_mapping.items():
"case_reports" for case_idx_string, case_report in case_idx_to_report_mapping.items():
].items(): status: CaseStatus = case_report["status"]
for mode_string, execution_report in case_idx_to_report_mapping[
"mode_execution_reports"
].items():
status: TestCaseStatus = execution_report["status"]
metadata_file_path: str = ( metadata_file_path: str = (
path_relative_to_resolc_compiler_test_directory(metadata_file_path) path_relative_to_resolc_compiler_test_directory(metadata_file_path)
) )
@@ -225,9 +194,7 @@ def main() -> None:
if status["status"] != "Failed": if status["status"] != "Failed":
continue continue
failure_reason: str = ( failure_reason: str = status["reason"].replace("\n", " ")
status["reason"].replace("\n", " ").replace("|", " ")
)
note: str = "" note: str = ""
modes_where_this_case_succeeded: set[ModeString] = ( modes_where_this_case_succeeded: set[ModeString] = (
@@ -245,7 +212,7 @@ def main() -> None:
f"{metadata_file_path}::{case_idx_string}::{mode_string}" f"{metadata_file_path}::{case_idx_string}::{mode_string}"
) )
print( print(
f"| ``{test_specifier}`` | ``{failure_reason}`` | {note} |", f"| `{test_specifier}` | `{failure_reason}` | {note} |",
file=markdown_document, file=markdown_document,
) )
print("\n\n</details>", file=markdown_document) print("\n\n</details>", file=markdown_document)