mirror of
https://github.com/pezkuwichain/revive-differential-tests.git
synced 2026-04-22 21:57:58 +00:00
Compare commits
25 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| b8a71b40e2 | |||
| 6df00f567c | |||
| 75159229df | |||
| 9b75a4f236 | |||
| 2af1a62319 | |||
| e09be4f3fa | |||
| 33b5faca45 | |||
| 172fb4700f | |||
| fefea17c8e | |||
| b71445b632 | |||
| f1a911545e | |||
| 48e7d69158 | |||
| 260ac5d98e | |||
| 94f116f843 | |||
| 0d7a87a728 | |||
| 29bf5304ec | |||
| 491c23efb3 | |||
| 3c86cbb7ef | |||
| fde07b7c0d | |||
| ebc24a588b | |||
| 21e25f09e6 | |||
| 8c412dc924 | |||
| 6da3172581 | |||
| c6eb04b04e | |||
| e5114d31dc |
@@ -15,6 +15,7 @@ concurrency:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
CARGO_TERM_COLOR: always
|
CARGO_TERM_COLOR: always
|
||||||
|
POLKADOT_VERSION: polkadot-stable2506-2
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
cache-polkadot:
|
cache-polkadot:
|
||||||
@@ -50,15 +51,15 @@ jobs:
|
|||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
~/.cargo/bin/substrate-node
|
~/.cargo/bin/revive-dev-node
|
||||||
~/.cargo/bin/eth-rpc
|
~/.cargo/bin/eth-rpc
|
||||||
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}
|
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}-with-dev-node
|
||||||
|
|
||||||
- name: Build substrate-node
|
- name: Build revive-dev-node
|
||||||
if: steps.cache.outputs.cache-hit != 'true'
|
if: steps.cache.outputs.cache-hit != 'true'
|
||||||
run: |
|
run: |
|
||||||
cd polkadot-sdk
|
cd polkadot-sdk
|
||||||
cargo install --locked --force --profile=production --path substrate/bin/node/cli --bin substrate-node --features cli
|
cargo install --locked --force --profile=production --path substrate/frame/revive/dev-node/node --bin revive-dev-node
|
||||||
|
|
||||||
- name: Build eth-rpc
|
- name: Build eth-rpc
|
||||||
if: steps.cache.outputs.cache-hit != 'true'
|
if: steps.cache.outputs.cache-hit != 'true'
|
||||||
@@ -66,6 +67,37 @@ jobs:
|
|||||||
cd polkadot-sdk
|
cd polkadot-sdk
|
||||||
cargo install --path substrate/frame/revive/rpc --bin eth-rpc
|
cargo install --path substrate/frame/revive/rpc --bin eth-rpc
|
||||||
|
|
||||||
|
- name: Cache downloaded Polkadot binaries
|
||||||
|
id: cache-polkadot
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/polkadot-cache/polkadot
|
||||||
|
~/polkadot-cache/polkadot-execute-worker
|
||||||
|
~/polkadot-cache/polkadot-prepare-worker
|
||||||
|
~/polkadot-cache/polkadot-parachain
|
||||||
|
key: polkadot-downloaded-${{ matrix.os }}-${{ env.POLKADOT_VERSION }}
|
||||||
|
|
||||||
|
- name: Download Polkadot binaries on macOS
|
||||||
|
if: matrix.os == 'macos-14' && steps.cache-polkadot.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
mkdir -p ~/polkadot-cache
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-aarch64-apple-darwin -o ~/polkadot-cache/polkadot
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-execute-worker-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-execute-worker
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-prepare-worker-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-prepare-worker
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-parachain-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-parachain
|
||||||
|
chmod +x ~/polkadot-cache/*
|
||||||
|
|
||||||
|
- name: Download Polkadot binaries on Ubuntu
|
||||||
|
if: matrix.os == 'ubuntu-24.04' && steps.cache-polkadot.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
mkdir -p ~/polkadot-cache
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot -o ~/polkadot-cache/polkadot
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-execute-worker -o ~/polkadot-cache/polkadot-execute-worker
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-prepare-worker -o ~/polkadot-cache/polkadot-prepare-worker
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-parachain -o ~/polkadot-cache/polkadot-parachain
|
||||||
|
chmod +x ~/polkadot-cache/*
|
||||||
|
|
||||||
ci:
|
ci:
|
||||||
name: CI on ${{ matrix.os }}
|
name: CI on ${{ matrix.os }}
|
||||||
needs: cache-polkadot
|
needs: cache-polkadot
|
||||||
@@ -77,14 +109,34 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Checkout repo
|
- name: Checkout repo
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
|
||||||
- name: Restore binaries from cache
|
- name: Restore binaries from cache
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
~/.cargo/bin/substrate-node
|
~/.cargo/bin/revive-dev-node
|
||||||
~/.cargo/bin/eth-rpc
|
~/.cargo/bin/eth-rpc
|
||||||
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}
|
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}-with-dev-node
|
||||||
|
|
||||||
|
- name: Restore downloaded Polkadot binaries from cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/polkadot-cache/polkadot
|
||||||
|
~/polkadot-cache/polkadot-execute-worker
|
||||||
|
~/polkadot-cache/polkadot-prepare-worker
|
||||||
|
~/polkadot-cache/polkadot-parachain
|
||||||
|
key: polkadot-downloaded-${{ matrix.os }}-${{ env.POLKADOT_VERSION }}
|
||||||
|
|
||||||
|
- name: Install Polkadot binaries
|
||||||
|
run: |
|
||||||
|
sudo cp ~/polkadot-cache/polkadot /usr/local/bin/
|
||||||
|
sudo cp ~/polkadot-cache/polkadot-execute-worker /usr/local/bin/
|
||||||
|
sudo cp ~/polkadot-cache/polkadot-prepare-worker /usr/local/bin/
|
||||||
|
sudo cp ~/polkadot-cache/polkadot-parachain /usr/local/bin/
|
||||||
|
sudo chmod +x /usr/local/bin/polkadot*
|
||||||
|
|
||||||
- name: Setup Rust toolchain
|
- name: Setup Rust toolchain
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
@@ -152,8 +204,13 @@ jobs:
|
|||||||
sudo apt update
|
sudo apt update
|
||||||
sudo apt install kurtosis-cli
|
sudo apt install kurtosis-cli
|
||||||
|
|
||||||
|
- name: Install cargo-machete
|
||||||
|
uses: clechasseur/rs-cargo@v2
|
||||||
|
with:
|
||||||
|
command: install
|
||||||
|
args: cargo-machete@0.7.0
|
||||||
- name: Machete
|
- name: Machete
|
||||||
uses: bnjbvr/cargo-machete@v0.7.1
|
run: cargo machete crates
|
||||||
|
|
||||||
- name: Format
|
- name: Format
|
||||||
run: make format
|
run: make format
|
||||||
@@ -161,8 +218,8 @@ jobs:
|
|||||||
- name: Clippy
|
- name: Clippy
|
||||||
run: make clippy
|
run: make clippy
|
||||||
|
|
||||||
- name: Check substrate-node version
|
- name: Check revive-dev-node version
|
||||||
run: substrate-node --version
|
run: revive-dev-node --version
|
||||||
|
|
||||||
- name: Check eth-rpc version
|
- name: Check eth-rpc version
|
||||||
run: eth-rpc --version
|
run: eth-rpc --version
|
||||||
@@ -170,6 +227,18 @@ jobs:
|
|||||||
- name: Check resolc version
|
- name: Check resolc version
|
||||||
run: resolc --version
|
run: resolc --version
|
||||||
|
|
||||||
|
- name: Check polkadot version
|
||||||
|
run: polkadot --version
|
||||||
|
|
||||||
|
- name: Check polkadot-parachain version
|
||||||
|
run: polkadot-parachain --version
|
||||||
|
|
||||||
|
- name: Check polkadot-execute-worker version
|
||||||
|
run: polkadot-execute-worker --version
|
||||||
|
|
||||||
|
- name: Check polkadot-prepare-worker version
|
||||||
|
run: polkadot-prepare-worker --version
|
||||||
|
|
||||||
- name: Test Formatting
|
- name: Test Formatting
|
||||||
run: make format
|
run: make format
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,6 @@ node_modules
|
|||||||
*.log
|
*.log
|
||||||
|
|
||||||
profile.json.gz
|
profile.json.gz
|
||||||
resolc-compiler-tests
|
|
||||||
workdir
|
workdir
|
||||||
|
|
||||||
!/schema.json
|
!/schema.json
|
||||||
|
|||||||
@@ -1,3 +1,6 @@
|
|||||||
[submodule "polkadot-sdk"]
|
[submodule "polkadot-sdk"]
|
||||||
path = polkadot-sdk
|
path = polkadot-sdk
|
||||||
url = https://github.com/paritytech/polkadot-sdk.git
|
url = https://github.com/paritytech/polkadot-sdk.git
|
||||||
|
[submodule "resolc-compiler-tests"]
|
||||||
|
path = resolc-compiler-tests
|
||||||
|
url = https://github.com/paritytech/resolc-compiler-tests
|
||||||
|
|||||||
Generated
+2338
-58
File diff suppressed because it is too large
Load Diff
+6
-1
@@ -22,10 +22,11 @@ revive-dt-node-pool = { version = "0.1.0", path = "crates/node-pool" }
|
|||||||
revive-dt-report = { version = "0.1.0", path = "crates/report" }
|
revive-dt-report = { version = "0.1.0", path = "crates/report" }
|
||||||
revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
|
revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
|
||||||
|
|
||||||
|
ansi_term = "0.12.1"
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
bson = { version = "2.15.0" }
|
bson = { version = "2.15.0" }
|
||||||
cacache = { version = "13.1.0" }
|
cacache = { version = "13.1.0" }
|
||||||
clap = { version = "4", features = ["derive"] }
|
clap = { version = "4", features = ["derive", "wrap_help"] }
|
||||||
dashmap = { version = "6.1.0" }
|
dashmap = { version = "6.1.0" }
|
||||||
foundry-compilers-artifacts = { version = "0.18.0" }
|
foundry-compilers-artifacts = { version = "0.18.0" }
|
||||||
futures = { version = "0.3.31" }
|
futures = { version = "0.3.31" }
|
||||||
@@ -49,6 +50,7 @@ sha2 = { version = "0.10.9" }
|
|||||||
sp-core = "36.1.0"
|
sp-core = "36.1.0"
|
||||||
sp-runtime = "41.1.0"
|
sp-runtime = "41.1.0"
|
||||||
strum = { version = "0.27.2", features = ["derive"] }
|
strum = { version = "0.27.2", features = ["derive"] }
|
||||||
|
subxt = { version = "0.44.0" }
|
||||||
temp-dir = { version = "0.1.16" }
|
temp-dir = { version = "0.1.16" }
|
||||||
tempfile = "3.3"
|
tempfile = "3.3"
|
||||||
thiserror = "2"
|
thiserror = "2"
|
||||||
@@ -67,12 +69,15 @@ tracing-subscriber = { version = "0.3.19", default-features = false, features =
|
|||||||
"env-filter",
|
"env-filter",
|
||||||
] }
|
] }
|
||||||
indexmap = { version = "2.10.0", default-features = false }
|
indexmap = { version = "2.10.0", default-features = false }
|
||||||
|
itertools = { version = "0.14.0" }
|
||||||
|
|
||||||
# revive compiler
|
# revive compiler
|
||||||
revive-solc-json-interface = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
revive-solc-json-interface = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
||||||
revive-common = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
revive-common = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
||||||
revive-differential = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
revive-differential = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
||||||
|
|
||||||
|
zombienet-sdk = { git = "https://github.com/paritytech/zombienet-sdk.git", rev = "891f6554354ce466abd496366dbf8b4f82141241" }
|
||||||
|
|
||||||
[workspace.dependencies.alloy]
|
[workspace.dependencies.alloy]
|
||||||
version = "1.0.37"
|
version = "1.0.37"
|
||||||
default-features = false
|
default-features = false
|
||||||
|
|||||||
@@ -9,7 +9,7 @@
|
|||||||
This project compiles and executes declarative smart-contract tests against multiple platforms, then compares behavior (status, return data, events, and state diffs). Today it supports:
|
This project compiles and executes declarative smart-contract tests against multiple platforms, then compares behavior (status, return data, events, and state diffs). Today it supports:
|
||||||
|
|
||||||
- Geth (EVM reference implementation)
|
- Geth (EVM reference implementation)
|
||||||
- Revive Kitchensink (Substrate-based PolkaVM + `eth-rpc` proxy)
|
- Revive Dev Node (Substrate-based PolkaVM + `eth-rpc` proxy)
|
||||||
|
|
||||||
Use it to:
|
Use it to:
|
||||||
|
|
||||||
@@ -39,9 +39,9 @@ This repository contains none of the tests and only contains the testing framewo
|
|||||||
This section describes the required dependencies that this framework requires to run. Compiling this framework is pretty straightforward and no additional dependencies beyond what's specified in the `Cargo.toml` file should be required.
|
This section describes the required dependencies that this framework requires to run. Compiling this framework is pretty straightforward and no additional dependencies beyond what's specified in the `Cargo.toml` file should be required.
|
||||||
|
|
||||||
- Stable Rust
|
- Stable Rust
|
||||||
- Geth - When doing differential testing against the PVM we submit transactions to a Geth node and to Kitchensink to compare them.
|
- Geth - When doing differential testing against the PVM we submit transactions to a Geth node and to Revive Dev Node to compare them.
|
||||||
- Kitchensink - When doing differential testing against the PVM we submit transactions to a Geth node and to Kitchensink to compare them.
|
- Revive Dev Node - When doing differential testing against the PVM we submit transactions to a Geth node and to Revive Dev Node to compare them.
|
||||||
- ETH-RPC - All communication with Kitchensink is done through the ETH RPC.
|
- ETH-RPC - All communication with Revive Dev Node is done through the ETH RPC.
|
||||||
- Solc - This is actually a transitive dependency, while this tool doesn't require solc as it downloads the versions that it requires, resolc requires that Solc is installed and available in the path.
|
- Solc - This is actually a transitive dependency, while this tool doesn't require solc as it downloads the versions that it requires, resolc requires that Solc is installed and available in the path.
|
||||||
- Resolc - This is required to compile the contracts to PolkaVM bytecode.
|
- Resolc - This is required to compile the contracts to PolkaVM bytecode.
|
||||||
- Kurtosis - The Kurtosis CLI tool is required for the production Ethereum mainnet-like node configuration with Geth as the execution layer and lighthouse as the consensus layer. Kurtosis also requires docker to be installed since it runs everything inside of docker containers.
|
- Kurtosis - The Kurtosis CLI tool is required for the production Ethereum mainnet-like node configuration with Geth as the execution layer and lighthouse as the consensus layer. Kurtosis also requires docker to be installed since it runs everything inside of docker containers.
|
||||||
@@ -52,192 +52,69 @@ All of the above need to be installed and available in the path in order for the
|
|||||||
|
|
||||||
This tool is being updated quite frequently. Therefore, it's recommended that you don't install the tool and then run it, but rather that you run it from the root of the directory using `cargo run --release`. The help command of the tool gives you all of the information you need to know about each of the options and flags that the tool offers.
|
This tool is being updated quite frequently. Therefore, it's recommended that you don't install the tool and then run it, but rather that you run it from the root of the directory using `cargo run --release`. The help command of the tool gives you all of the information you need to know about each of the options and flags that the tool offers.
|
||||||
|
|
||||||
```bash
|
|
||||||
$ cargo run --release -- execute-tests --help
|
|
||||||
Error: Executes tests in the MatterLabs format differentially on multiple targets concurrently
|
|
||||||
|
|
||||||
Usage: retester execute-tests [OPTIONS]
|
|
||||||
|
|
||||||
Options:
|
|
||||||
-w, --working-directory <WORKING_DIRECTORY>
|
|
||||||
The working directory that the program will use for all of the temporary artifacts needed at runtime.
|
|
||||||
|
|
||||||
If not specified, then a temporary directory will be created and used by the program for all temporary artifacts.
|
|
||||||
|
|
||||||
[default: ]
|
|
||||||
|
|
||||||
-p, --platform <PLATFORMS>
|
|
||||||
The set of platforms that the differential tests should run on
|
|
||||||
|
|
||||||
[default: geth-evm-solc,revive-dev-node-polkavm-resolc]
|
|
||||||
|
|
||||||
Possible values:
|
|
||||||
- geth-evm-solc: The Go-ethereum reference full node EVM implementation with the solc compiler
|
|
||||||
- kitchensink-polkavm-resolc: The kitchensink node with the PolkaVM backend with the resolc compiler
|
|
||||||
- kitchensink-revm-solc: The kitchensink node with the REVM backend with the solc compiler
|
|
||||||
- revive-dev-node-polkavm-resolc: The revive dev node with the PolkaVM backend with the resolc compiler
|
|
||||||
- revive-dev-node-revm-solc: The revive dev node with the REVM backend with the solc compiler
|
|
||||||
|
|
||||||
-c, --corpus <CORPUS>
|
|
||||||
A list of test corpus JSON files to be tested
|
|
||||||
|
|
||||||
-h, --help
|
|
||||||
Print help (see a summary with '-h')
|
|
||||||
|
|
||||||
Solc Configuration:
|
|
||||||
--solc.version <VERSION>
|
|
||||||
Specifies the default version of the Solc compiler that should be used if there is no override specified by one of the test cases
|
|
||||||
|
|
||||||
[default: 0.8.29]
|
|
||||||
|
|
||||||
Resolc Configuration:
|
|
||||||
--resolc.path <resolc.path>
|
|
||||||
Specifies the path of the resolc compiler to be used by the tool.
|
|
||||||
|
|
||||||
If this is not specified, then the tool assumes that it should use the resolc binary that's provided in the user's $PATH.
|
|
||||||
|
|
||||||
[default: resolc]
|
|
||||||
|
|
||||||
Geth Configuration:
|
|
||||||
--geth.path <geth.path>
|
|
||||||
Specifies the path of the geth node to be used by the tool.
|
|
||||||
|
|
||||||
If this is not specified, then the tool assumes that it should use the geth binary that's provided in the user's $PATH.
|
|
||||||
|
|
||||||
[default: geth]
|
|
||||||
|
|
||||||
--geth.start-timeout-ms <geth.start-timeout-ms>
|
|
||||||
The amount of time to wait upon startup before considering that the node timed out
|
|
||||||
|
|
||||||
[default: 5000]
|
|
||||||
|
|
||||||
Kitchensink Configuration:
|
|
||||||
--kitchensink.path <kitchensink.path>
|
|
||||||
Specifies the path of the kitchensink node to be used by the tool.
|
|
||||||
|
|
||||||
If this is not specified, then the tool assumes that it should use the kitchensink binary that's provided in the user's $PATH.
|
|
||||||
|
|
||||||
[default: substrate-node]
|
|
||||||
|
|
||||||
--kitchensink.start-timeout-ms <kitchensink.start-timeout-ms>
|
|
||||||
The amount of time to wait upon startup before considering that the node timed out
|
|
||||||
|
|
||||||
[default: 5000]
|
|
||||||
|
|
||||||
--kitchensink.dont-use-dev-node
|
|
||||||
This configures the tool to use Kitchensink instead of using the revive-dev-node
|
|
||||||
|
|
||||||
Revive Dev Node Configuration:
|
|
||||||
--revive-dev-node.path <revive-dev-node.path>
|
|
||||||
Specifies the path of the revive dev node to be used by the tool.
|
|
||||||
|
|
||||||
If this is not specified, then the tool assumes that it should use the revive dev node binary that's provided in the user's $PATH.
|
|
||||||
|
|
||||||
[default: revive-dev-node]
|
|
||||||
|
|
||||||
--revive-dev-node.start-timeout-ms <revive-dev-node.start-timeout-ms>
|
|
||||||
The amount of time to wait upon startup before considering that the node timed out
|
|
||||||
|
|
||||||
[default: 5000]
|
|
||||||
|
|
||||||
Eth RPC Configuration:
|
|
||||||
--eth-rpc.path <eth-rpc.path>
|
|
||||||
Specifies the path of the ETH RPC to be used by the tool.
|
|
||||||
|
|
||||||
If this is not specified, then the tool assumes that it should use the ETH RPC binary that's provided in the user's $PATH.
|
|
||||||
|
|
||||||
[default: eth-rpc]
|
|
||||||
|
|
||||||
--eth-rpc.start-timeout-ms <eth-rpc.start-timeout-ms>
|
|
||||||
The amount of time to wait upon startup before considering that the node timed out
|
|
||||||
|
|
||||||
[default: 5000]
|
|
||||||
|
|
||||||
Genesis Configuration:
|
|
||||||
--genesis.path <genesis.path>
|
|
||||||
Specifies the path of the genesis file to use for the nodes that are started.
|
|
||||||
|
|
||||||
This is expected to be the path of a JSON geth genesis file.
|
|
||||||
|
|
||||||
Wallet Configuration:
|
|
||||||
--wallet.default-private-key <DEFAULT_KEY>
|
|
||||||
The private key of the default signer
|
|
||||||
|
|
||||||
[default: 0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d]
|
|
||||||
|
|
||||||
--wallet.additional-keys <ADDITIONAL_KEYS>
|
|
||||||
This argument controls which private keys the nodes should have access to and be added to its wallet signers. With a value of N, private keys (0, N] will be added to the signer set of the node
|
|
||||||
|
|
||||||
[default: 100000]
|
|
||||||
|
|
||||||
Concurrency Configuration:
|
|
||||||
--concurrency.number-of-nodes <NUMBER_OF_NODES>
|
|
||||||
Determines the amount of nodes that will be spawned for each chain
|
|
||||||
|
|
||||||
[default: 5]
|
|
||||||
|
|
||||||
--concurrency.number-of-threads <NUMBER_OF_THREADS>
|
|
||||||
Determines the amount of tokio worker threads that will will be used
|
|
||||||
|
|
||||||
[default: 16]
|
|
||||||
|
|
||||||
--concurrency.number-of-concurrent-tasks <NUMBER_CONCURRENT_TASKS>
|
|
||||||
Determines the amount of concurrent tasks that will be spawned to run tests.
|
|
||||||
|
|
||||||
Defaults to 10 x the number of nodes.
|
|
||||||
|
|
||||||
--concurrency.ignore-concurrency-limit
|
|
||||||
Determines if the concurrency limit should be ignored or not
|
|
||||||
|
|
||||||
Compilation Configuration:
|
|
||||||
--compilation.invalidate-cache
|
|
||||||
Controls if the compilation cache should be invalidated or not
|
|
||||||
|
|
||||||
Report Configuration:
|
|
||||||
--report.include-compiler-input
|
|
||||||
Controls if the compiler input is included in the final report
|
|
||||||
|
|
||||||
--report.include-compiler-output
|
|
||||||
Controls if the compiler output is included in the final report
|
|
||||||
```
|
|
||||||
|
|
||||||
To run tests with this tool you need a corpus JSON file that defines the tests included in the corpus. The simplest corpus file looks like the following:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
|
|
||||||
"path": "resolc-compiler-tests/fixtures/solidity"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
> [!NOTE]
|
> [!NOTE]
|
||||||
> Note that the tests can be found in the [`resolc-compiler-tests`](https://github.com/paritytech/resolc-compiler-tests) repository.
|
> Note that the tests can be found in the [`resolc-compiler-tests`](https://github.com/paritytech/resolc-compiler-tests) repository.
|
||||||
|
|
||||||
The above corpus file instructs the tool to look for all of the test cases contained within all of the metadata files of the specified directory.
|
|
||||||
|
|
||||||
The simplest command to run this tool is the following:
|
The simplest command to run this tool is the following:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
RUST_LOG="info" cargo run --release -- execute-tests \
|
RUST_LOG="info" cargo run --release -- test \
|
||||||
|
--test ./resolc-compiler-tests/fixtures/solidity \
|
||||||
--platform geth-evm-solc \
|
--platform geth-evm-solc \
|
||||||
--corpus corp.json \
|
|
||||||
--working-directory workdir \
|
--working-directory workdir \
|
||||||
--concurrency.number-of-nodes 5 \
|
|
||||||
--concurrency.ignore-concurrency-limit \
|
|
||||||
> logs.log \
|
> logs.log \
|
||||||
2> output.log
|
2> output.log
|
||||||
```
|
```
|
||||||
|
|
||||||
The above command will run the tool executing every one of the tests discovered in the path specified in the corpus file. All of the logs from the execution will be persisted in the `logs.log` file and all of the output of the tool will be persisted to the `output.log` file. If all that you're looking for is to run the tool and check which tests succeeded and failed, then the `output.log` file is what you need to be looking at. However, if you're contributing the to the tool then the `logs.log` file will be very valuable.
|
The above command will run the tool executing every one of the tests discovered in the path provided to the tool. All of the logs from the execution will be persisted in the `logs.log` file and all of the output of the tool will be persisted to the `output.log` file. If all that you're looking for is to run the tool and check which tests succeeded and failed, then the `output.log` file is what you need to be looking at. However, if you're contributing the to the tool then the `logs.log` file will be very valuable.
|
||||||
|
|
||||||
If you only want to run a subset of tests, then you can specify that in your corpus file. The following is an example:
|
<details>
|
||||||
|
<summary>User Managed Nodes</summary>
|
||||||
|
|
||||||
```json
|
This section describes how the user can make use of nodes that they manage rather than allowing the tool to spawn and manage the nodes on the user's behalf.
|
||||||
{
|
|
||||||
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
|
> ⚠️ This is an advanced feature of the tool and could lead test successes or failures to not be reproducible. Please use this feature with caution and only if you understand the implications of running your own node instead of having the framework manage your nodes. ⚠️
|
||||||
"paths": [
|
|
||||||
"path/to/a/single/metadata/file/I/want/to/run.json",
|
If you're an advanced user and you'd like to manage your own nodes instead of having the tool initialize, spawn, and manage them, then you can choose to run your own nodes and then provide them to the tool to make use of just like the following:
|
||||||
"path/to/a/directory/to/find/all/metadata/files/within"
|
|
||||||
]
|
```bash
|
||||||
}
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
PLATFORM="revive-dev-node-revm-solc"
|
||||||
|
|
||||||
|
retester export-genesis "$PLATFORM" > chainspec.json
|
||||||
|
|
||||||
|
# Start revive-dev-node in a detached tmux session
|
||||||
|
tmux new-session -d -s revive-dev-node \
|
||||||
|
'RUST_LOG="error,evm=debug,sc_rpc_server=info,runtime::revive=debug" revive-dev-node \
|
||||||
|
--dev \
|
||||||
|
--chain chainspec.json \
|
||||||
|
--force-authoring \
|
||||||
|
--rpc-methods Unsafe \
|
||||||
|
--rpc-cors all \
|
||||||
|
--rpc-max-connections 4294967295 \
|
||||||
|
--pool-limit 4294967295 \
|
||||||
|
--pool-kbytes 4294967295'
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
# Start eth-rpc in a detached tmux session
|
||||||
|
tmux new-session -d -s eth-rpc \
|
||||||
|
'RUST_LOG="info,eth-rpc=debug" eth-rpc \
|
||||||
|
--dev \
|
||||||
|
--node-rpc-url ws://127.0.0.1:9944 \
|
||||||
|
--rpc-max-connections 4294967295'
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
# Run the tests (logs to files as before)
|
||||||
|
RUST_LOG="info" retester test \
|
||||||
|
--platform "$PLATFORM" \
|
||||||
|
--corpus ./revive-differential-tests/fixtures/solidity \
|
||||||
|
--working-directory ./workdir \
|
||||||
|
--concurrency.number-of-nodes 1 \
|
||||||
|
--concurrency.number-of-concurrent-tasks 5 \
|
||||||
|
--revive-dev-node.existing-rpc-url "http://localhost:8545" \
|
||||||
|
> logs.log
|
||||||
```
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|||||||
Binary file not shown.
@@ -14,6 +14,7 @@ anyhow = { workspace = true }
|
|||||||
clap = { workspace = true }
|
clap = { workspace = true }
|
||||||
moka = { workspace = true, features = ["sync"] }
|
moka = { workspace = true, features = ["sync"] }
|
||||||
once_cell = { workspace = true }
|
once_cell = { workspace = true }
|
||||||
|
regex = { workspace = true }
|
||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
schemars = { workspace = true }
|
schemars = { workspace = true }
|
||||||
|
|||||||
@@ -31,14 +31,14 @@ pub enum PlatformIdentifier {
|
|||||||
GethEvmSolc,
|
GethEvmSolc,
|
||||||
/// The Lighthouse Go-ethereum reference full node EVM implementation with the solc compiler.
|
/// The Lighthouse Go-ethereum reference full node EVM implementation with the solc compiler.
|
||||||
LighthouseGethEvmSolc,
|
LighthouseGethEvmSolc,
|
||||||
/// The kitchensink node with the PolkaVM backend with the resolc compiler.
|
|
||||||
KitchensinkPolkavmResolc,
|
|
||||||
/// The kitchensink node with the REVM backend with the solc compiler.
|
|
||||||
KitchensinkRevmSolc,
|
|
||||||
/// The revive dev node with the PolkaVM backend with the resolc compiler.
|
/// The revive dev node with the PolkaVM backend with the resolc compiler.
|
||||||
ReviveDevNodePolkavmResolc,
|
ReviveDevNodePolkavmResolc,
|
||||||
/// The revive dev node with the REVM backend with the solc compiler.
|
/// The revive dev node with the REVM backend with the solc compiler.
|
||||||
ReviveDevNodeRevmSolc,
|
ReviveDevNodeRevmSolc,
|
||||||
|
/// A zombienet based Substrate/Polkadot node with the PolkaVM backend with the resolc compiler.
|
||||||
|
ZombienetPolkavmResolc,
|
||||||
|
/// A zombienet based Substrate/Polkadot node with the REVM backend with the solc compiler.
|
||||||
|
ZombienetRevmSolc,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An enum of the platform identifiers of all of the platforms supported by this framework.
|
/// An enum of the platform identifiers of all of the platforms supported by this framework.
|
||||||
@@ -91,10 +91,10 @@ pub enum NodeIdentifier {
|
|||||||
Geth,
|
Geth,
|
||||||
/// The go-ethereum node implementation.
|
/// The go-ethereum node implementation.
|
||||||
LighthouseGeth,
|
LighthouseGeth,
|
||||||
/// The Kitchensink node implementation.
|
|
||||||
Kitchensink,
|
|
||||||
/// The revive dev node implementation.
|
/// The revive dev node implementation.
|
||||||
ReviveDevNode,
|
ReviveDevNode,
|
||||||
|
/// A zombienet spawned nodes
|
||||||
|
Zombienet,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An enum representing the identifiers of the supported VMs.
|
/// An enum representing the identifiers of the supported VMs.
|
||||||
|
|||||||
@@ -1,11 +1,13 @@
|
|||||||
mod identifiers;
|
mod identifiers;
|
||||||
mod mode;
|
mod mode;
|
||||||
|
mod parsed_test_specifier;
|
||||||
mod private_key_allocator;
|
mod private_key_allocator;
|
||||||
mod round_robin_pool;
|
mod round_robin_pool;
|
||||||
mod version_or_requirement;
|
mod version_or_requirement;
|
||||||
|
|
||||||
pub use identifiers::*;
|
pub use identifiers::*;
|
||||||
pub use mode::*;
|
pub use mode::*;
|
||||||
|
pub use parsed_test_specifier::*;
|
||||||
pub use private_key_allocator::*;
|
pub use private_key_allocator::*;
|
||||||
pub use round_robin_pool::*;
|
pub use round_robin_pool::*;
|
||||||
pub use version_or_requirement::*;
|
pub use version_or_requirement::*;
|
||||||
|
|||||||
@@ -1,6 +1,11 @@
|
|||||||
|
use crate::iterators::EitherIter;
|
||||||
use crate::types::VersionOrRequirement;
|
use crate::types::VersionOrRequirement;
|
||||||
|
use anyhow::{Context as _, bail};
|
||||||
|
use regex::Regex;
|
||||||
|
use schemars::JsonSchema;
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::HashSet;
|
||||||
use std::fmt::Display;
|
use std::fmt::Display;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::sync::LazyLock;
|
use std::sync::LazyLock;
|
||||||
@@ -33,6 +38,19 @@ impl Display for Mode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl FromStr for Mode {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
let parsed_mode = ParsedMode::from_str(s)?;
|
||||||
|
let mut iter = parsed_mode.to_modes();
|
||||||
|
let (Some(mode), None) = (iter.next(), iter.next()) else {
|
||||||
|
bail!("Failed to parse the mode")
|
||||||
|
};
|
||||||
|
Ok(mode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Mode {
|
impl Mode {
|
||||||
/// Return all of the available mode combinations.
|
/// Return all of the available mode combinations.
|
||||||
pub fn all() -> impl Iterator<Item = &'static Mode> {
|
pub fn all() -> impl Iterator<Item = &'static Mode> {
|
||||||
@@ -171,3 +189,250 @@ impl ModeOptimizerSetting {
|
|||||||
!matches!(self, ModeOptimizerSetting::M0)
|
!matches!(self, ModeOptimizerSetting::M0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// This represents a mode that has been parsed from test metadata.
|
||||||
|
///
|
||||||
|
/// Mode strings can take the following form (in pseudo-regex):
|
||||||
|
///
|
||||||
|
/// ```text
|
||||||
|
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// We can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)]
|
||||||
|
#[serde(try_from = "String", into = "String")]
|
||||||
|
pub struct ParsedMode {
|
||||||
|
pub pipeline: Option<ModePipeline>,
|
||||||
|
pub optimize_flag: Option<bool>,
|
||||||
|
pub optimize_setting: Option<ModeOptimizerSetting>,
|
||||||
|
pub version: Option<semver::VersionReq>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for ParsedMode {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
static REGEX: LazyLock<Regex> = LazyLock::new(|| {
|
||||||
|
Regex::new(r"(?x)
|
||||||
|
^
|
||||||
|
(?:(?P<pipeline>[YEILV])(?P<optimize_flag>[+-])?)? # Pipeline to use eg Y, E+, E-
|
||||||
|
\s*
|
||||||
|
(?P<optimize_setting>M[a-zA-Z0-9])? # Optimize setting eg M0, Ms, Mz
|
||||||
|
\s*
|
||||||
|
(?P<version>[>=<^]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8
|
||||||
|
$
|
||||||
|
").unwrap()
|
||||||
|
});
|
||||||
|
|
||||||
|
let Some(caps) = REGEX.captures(s) else {
|
||||||
|
anyhow::bail!("Cannot parse mode '{s}' from string");
|
||||||
|
};
|
||||||
|
|
||||||
|
let pipeline = match caps.name("pipeline") {
|
||||||
|
Some(m) => Some(
|
||||||
|
ModePipeline::from_str(m.as_str())
|
||||||
|
.context("Failed to parse mode pipeline from string")?,
|
||||||
|
),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
|
||||||
|
|
||||||
|
let optimize_setting = match caps.name("optimize_setting") {
|
||||||
|
Some(m) => Some(
|
||||||
|
ModeOptimizerSetting::from_str(m.as_str())
|
||||||
|
.context("Failed to parse optimizer setting from string")?,
|
||||||
|
),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let version = match caps.name("version") {
|
||||||
|
Some(m) => Some(
|
||||||
|
semver::VersionReq::parse(m.as_str())
|
||||||
|
.map_err(|e| {
|
||||||
|
anyhow::anyhow!(
|
||||||
|
"Cannot parse the version requirement '{}': {e}",
|
||||||
|
m.as_str()
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.context("Failed to parse semver requirement from mode string")?,
|
||||||
|
),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(ParsedMode {
|
||||||
|
pipeline,
|
||||||
|
optimize_flag,
|
||||||
|
optimize_setting,
|
||||||
|
version,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for ParsedMode {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
let mut has_written = false;
|
||||||
|
|
||||||
|
if let Some(pipeline) = self.pipeline {
|
||||||
|
pipeline.fmt(f)?;
|
||||||
|
if let Some(optimize_flag) = self.optimize_flag {
|
||||||
|
f.write_str(if optimize_flag { "+" } else { "-" })?;
|
||||||
|
}
|
||||||
|
has_written = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(optimize_setting) = self.optimize_setting {
|
||||||
|
if has_written {
|
||||||
|
f.write_str(" ")?;
|
||||||
|
}
|
||||||
|
optimize_setting.fmt(f)?;
|
||||||
|
has_written = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(version) = &self.version {
|
||||||
|
if has_written {
|
||||||
|
f.write_str(" ")?;
|
||||||
|
}
|
||||||
|
version.fmt(f)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<ParsedMode> for String {
|
||||||
|
fn from(parsed_mode: ParsedMode) -> Self {
|
||||||
|
parsed_mode.to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<String> for ParsedMode {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||||
|
ParsedMode::from_str(&value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ParsedMode {
|
||||||
|
/// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try.
|
||||||
|
pub fn to_modes(&self) -> impl Iterator<Item = Mode> {
|
||||||
|
let pipeline_iter = self.pipeline.as_ref().map_or_else(
|
||||||
|
|| EitherIter::A(ModePipeline::test_cases()),
|
||||||
|
|p| EitherIter::B(std::iter::once(*p)),
|
||||||
|
);
|
||||||
|
|
||||||
|
let optimize_flag_setting = self.optimize_flag.map(|flag| {
|
||||||
|
if flag {
|
||||||
|
ModeOptimizerSetting::M3
|
||||||
|
} else {
|
||||||
|
ModeOptimizerSetting::M0
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let optimize_flag_iter = match optimize_flag_setting {
|
||||||
|
Some(setting) => EitherIter::A(std::iter::once(setting)),
|
||||||
|
None => EitherIter::B(ModeOptimizerSetting::test_cases()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else(
|
||||||
|
|| EitherIter::A(optimize_flag_iter),
|
||||||
|
|s| EitherIter::B(std::iter::once(*s)),
|
||||||
|
);
|
||||||
|
|
||||||
|
pipeline_iter.flat_map(move |pipeline| {
|
||||||
|
optimize_settings_iter
|
||||||
|
.clone()
|
||||||
|
.map(move |optimize_setting| Mode {
|
||||||
|
pipeline,
|
||||||
|
optimize_setting,
|
||||||
|
version: self.version.clone(),
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s.
|
||||||
|
/// This avoids any duplicate entries.
|
||||||
|
pub fn many_to_modes<'a>(
|
||||||
|
parsed: impl Iterator<Item = &'a ParsedMode>,
|
||||||
|
) -> impl Iterator<Item = Mode> {
|
||||||
|
let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect();
|
||||||
|
modes.into_iter()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parsed_mode_from_str() {
|
||||||
|
let strings = vec![
|
||||||
|
("Mz", "Mz"),
|
||||||
|
("Y", "Y"),
|
||||||
|
("Y+", "Y+"),
|
||||||
|
("Y-", "Y-"),
|
||||||
|
("E", "E"),
|
||||||
|
("E+", "E+"),
|
||||||
|
("E-", "E-"),
|
||||||
|
("Y M0", "Y M0"),
|
||||||
|
("Y M1", "Y M1"),
|
||||||
|
("Y M2", "Y M2"),
|
||||||
|
("Y M3", "Y M3"),
|
||||||
|
("Y Ms", "Y Ms"),
|
||||||
|
("Y Mz", "Y Mz"),
|
||||||
|
("E M0", "E M0"),
|
||||||
|
("E M1", "E M1"),
|
||||||
|
("E M2", "E M2"),
|
||||||
|
("E M3", "E M3"),
|
||||||
|
("E Ms", "E Ms"),
|
||||||
|
("E Mz", "E Mz"),
|
||||||
|
// When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning)
|
||||||
|
("Y 0.8.0", "Y ^0.8.0"),
|
||||||
|
("E+ 0.8.0", "E+ ^0.8.0"),
|
||||||
|
("Y M3 >=0.8.0", "Y M3 >=0.8.0"),
|
||||||
|
("E Mz <0.7.0", "E Mz <0.7.0"),
|
||||||
|
// We can parse +- _and_ M1/M2 but the latter takes priority.
|
||||||
|
("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"),
|
||||||
|
("E- M2 0.7.0", "E- M2 ^0.7.0"),
|
||||||
|
// We don't see this in the wild but it is parsed.
|
||||||
|
("<=0.8", "<=0.8"),
|
||||||
|
];
|
||||||
|
|
||||||
|
for (actual, expected) in strings {
|
||||||
|
let parsed = ParsedMode::from_str(actual)
|
||||||
|
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||||
|
assert_eq!(
|
||||||
|
expected,
|
||||||
|
parsed.to_string(),
|
||||||
|
"Mode string '{actual}' did not parse to '{expected}': got '{parsed}'"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parsed_mode_to_test_modes() {
|
||||||
|
let strings = vec![
|
||||||
|
("Mz", vec!["Y Mz", "E Mz"]),
|
||||||
|
("Y", vec!["Y M0", "Y M3"]),
|
||||||
|
("E", vec!["E M0", "E M3"]),
|
||||||
|
("Y+", vec!["Y M3"]),
|
||||||
|
("Y-", vec!["Y M0"]),
|
||||||
|
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
|
||||||
|
(
|
||||||
|
"<=0.8",
|
||||||
|
vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"],
|
||||||
|
),
|
||||||
|
];
|
||||||
|
|
||||||
|
for (actual, expected) in strings {
|
||||||
|
let parsed = ParsedMode::from_str(actual)
|
||||||
|
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||||
|
let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect();
|
||||||
|
let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
expected_set, actual_set,
|
||||||
|
"Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -0,0 +1,133 @@
|
|||||||
|
use std::{fmt::Display, path::PathBuf, str::FromStr};
|
||||||
|
|
||||||
|
use anyhow::{Context as _, bail};
|
||||||
|
|
||||||
|
use crate::types::Mode;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
|
pub enum ParsedTestSpecifier {
|
||||||
|
/// All of the test cases in the file should be ran across all of the specified modes
|
||||||
|
FileOrDirectory {
|
||||||
|
/// The path of the metadata file containing the test cases.
|
||||||
|
metadata_or_directory_file_path: PathBuf,
|
||||||
|
},
|
||||||
|
/// Only a specific case within the metadata file should be ran across all of the modes in the
|
||||||
|
/// file.
|
||||||
|
Case {
|
||||||
|
/// The path of the metadata file containing the test cases.
|
||||||
|
metadata_file_path: PathBuf,
|
||||||
|
|
||||||
|
/// The index of the specific case to run.
|
||||||
|
case_idx: usize,
|
||||||
|
},
|
||||||
|
/// A specific case and a specific mode should be ran. This is the most specific out of all of
|
||||||
|
/// the specifier types.
|
||||||
|
CaseWithMode {
|
||||||
|
/// The path of the metadata file containing the test cases.
|
||||||
|
metadata_file_path: PathBuf,
|
||||||
|
|
||||||
|
/// The index of the specific case to run.
|
||||||
|
case_idx: usize,
|
||||||
|
|
||||||
|
/// The parsed mode that the test should be run in.
|
||||||
|
mode: Mode,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for ParsedTestSpecifier {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
ParsedTestSpecifier::FileOrDirectory {
|
||||||
|
metadata_or_directory_file_path,
|
||||||
|
} => {
|
||||||
|
write!(f, "{}", metadata_or_directory_file_path.display())
|
||||||
|
}
|
||||||
|
ParsedTestSpecifier::Case {
|
||||||
|
metadata_file_path,
|
||||||
|
case_idx,
|
||||||
|
} => {
|
||||||
|
write!(f, "{}::{}", metadata_file_path.display(), case_idx)
|
||||||
|
}
|
||||||
|
ParsedTestSpecifier::CaseWithMode {
|
||||||
|
metadata_file_path,
|
||||||
|
case_idx,
|
||||||
|
mode,
|
||||||
|
} => {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"{}::{}::{}",
|
||||||
|
metadata_file_path.display(),
|
||||||
|
case_idx,
|
||||||
|
mode
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for ParsedTestSpecifier {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
let mut split_iter = s.split("::");
|
||||||
|
|
||||||
|
let Some(path_string) = split_iter.next() else {
|
||||||
|
bail!("Could not find the path in the test specifier")
|
||||||
|
};
|
||||||
|
let path = PathBuf::from(path_string)
|
||||||
|
.canonicalize()
|
||||||
|
.context("Failed to canonicalize the path of the test")?;
|
||||||
|
|
||||||
|
let Some(case_idx_string) = split_iter.next() else {
|
||||||
|
return Ok(Self::FileOrDirectory {
|
||||||
|
metadata_or_directory_file_path: path,
|
||||||
|
});
|
||||||
|
};
|
||||||
|
let case_idx = usize::from_str(case_idx_string)
|
||||||
|
.context("Failed to parse the case idx of the test specifier from string")?;
|
||||||
|
|
||||||
|
// At this point the provided path must be a file.
|
||||||
|
if !path.is_file() {
|
||||||
|
bail!(
|
||||||
|
"Test specifier with a path and case idx must point to a file and not a directory"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some(mode_string) = split_iter.next() else {
|
||||||
|
return Ok(Self::Case {
|
||||||
|
metadata_file_path: path,
|
||||||
|
case_idx,
|
||||||
|
});
|
||||||
|
};
|
||||||
|
let mode = Mode::from_str(mode_string)
|
||||||
|
.context("Failed to parse the mode string in the parsed test specifier")?;
|
||||||
|
|
||||||
|
Ok(Self::CaseWithMode {
|
||||||
|
metadata_file_path: path,
|
||||||
|
case_idx,
|
||||||
|
mode,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<ParsedTestSpecifier> for String {
|
||||||
|
fn from(value: ParsedTestSpecifier) -> Self {
|
||||||
|
value.to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<String> for ParsedTestSpecifier {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
|
||||||
|
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||||
|
value.parse()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<&str> for ParsedTestSpecifier {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
|
||||||
|
fn try_from(value: &str) -> Result<Self, Self::Error> {
|
||||||
|
value.parse()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -7,7 +7,10 @@ pragma solidity >=0.6.9;
|
|||||||
import "./callable.sol";
|
import "./callable.sol";
|
||||||
|
|
||||||
contract Main {
|
contract Main {
|
||||||
function main(uint[1] calldata p1, Callable callable) public returns(uint) {
|
function main(
|
||||||
|
uint[1] calldata p1,
|
||||||
|
Callable callable
|
||||||
|
) public pure returns (uint) {
|
||||||
return callable.f(p1);
|
return callable.f(p1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ semver = { workspace = true }
|
|||||||
temp-dir = { workspace = true }
|
temp-dir = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
|
serde_with = { workspace = true }
|
||||||
strum = { workspace = true }
|
strum = { workspace = true }
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
|
|||||||
+391
-127
@@ -12,20 +12,19 @@ use std::{
|
|||||||
|
|
||||||
use alloy::{
|
use alloy::{
|
||||||
genesis::Genesis,
|
genesis::Genesis,
|
||||||
hex::ToHexExt,
|
|
||||||
network::EthereumWallet,
|
network::EthereumWallet,
|
||||||
primitives::{FixedBytes, U256},
|
primitives::{B256, FixedBytes, U256},
|
||||||
signers::local::PrivateKeySigner,
|
signers::local::PrivateKeySigner,
|
||||||
};
|
};
|
||||||
use clap::{Parser, ValueEnum, ValueHint};
|
use clap::{Parser, ValueEnum, ValueHint};
|
||||||
use revive_dt_common::types::PlatformIdentifier;
|
use revive_dt_common::types::{ParsedTestSpecifier, PlatformIdentifier};
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
use serde::{Serialize, Serializer};
|
use serde::{Deserialize, Serialize, Serializer};
|
||||||
use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
|
use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
|
||||||
use temp_dir::TempDir;
|
use temp_dir::TempDir;
|
||||||
|
|
||||||
#[derive(Clone, Debug, Parser, Serialize)]
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
#[command(name = "retester")]
|
#[command(name = "retester", term_width = 100)]
|
||||||
pub enum Context {
|
pub enum Context {
|
||||||
/// Executes tests in the MatterLabs format differentially on multiple targets concurrently.
|
/// Executes tests in the MatterLabs format differentially on multiple targets concurrently.
|
||||||
Test(Box<TestExecutionContext>),
|
Test(Box<TestExecutionContext>),
|
||||||
@@ -35,6 +34,9 @@ pub enum Context {
|
|||||||
|
|
||||||
/// Exports the JSON schema of the MatterLabs test format used by the tool.
|
/// Exports the JSON schema of the MatterLabs test format used by the tool.
|
||||||
ExportJsonSchema,
|
ExportJsonSchema,
|
||||||
|
|
||||||
|
/// Exports the genesis file of the desired platform.
|
||||||
|
ExportGenesis(Box<ExportGenesisContext>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Context {
|
impl Context {
|
||||||
@@ -45,6 +47,15 @@ impl Context {
|
|||||||
pub fn report_configuration(&self) -> &ReportConfiguration {
|
pub fn report_configuration(&self) -> &ReportConfiguration {
|
||||||
self.as_ref()
|
self.as_ref()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn update_for_profile(&mut self) {
|
||||||
|
match self {
|
||||||
|
Context::Test(ctx) => ctx.update_for_profile(),
|
||||||
|
Context::Benchmark(ctx) => ctx.update_for_profile(),
|
||||||
|
Context::ExportJsonSchema => {}
|
||||||
|
Context::ExportGenesis(..) => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AsRef<WorkingDirectoryConfiguration> for Context {
|
impl AsRef<WorkingDirectoryConfiguration> for Context {
|
||||||
@@ -52,7 +63,7 @@ impl AsRef<WorkingDirectoryConfiguration> for Context {
|
|||||||
match self {
|
match self {
|
||||||
Self::Test(context) => context.as_ref().as_ref(),
|
Self::Test(context) => context.as_ref().as_ref(),
|
||||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||||
Self::ExportJsonSchema => unreachable!(),
|
Self::ExportJsonSchema | Self::ExportGenesis(..) => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -62,7 +73,7 @@ impl AsRef<CorpusConfiguration> for Context {
|
|||||||
match self {
|
match self {
|
||||||
Self::Test(context) => context.as_ref().as_ref(),
|
Self::Test(context) => context.as_ref().as_ref(),
|
||||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||||
Self::ExportJsonSchema => unreachable!(),
|
Self::ExportJsonSchema | Self::ExportGenesis(..) => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -72,7 +83,7 @@ impl AsRef<SolcConfiguration> for Context {
|
|||||||
match self {
|
match self {
|
||||||
Self::Test(context) => context.as_ref().as_ref(),
|
Self::Test(context) => context.as_ref().as_ref(),
|
||||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||||
Self::ExportJsonSchema => unreachable!(),
|
Self::ExportJsonSchema | Self::ExportGenesis(..) => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -82,7 +93,7 @@ impl AsRef<ResolcConfiguration> for Context {
|
|||||||
match self {
|
match self {
|
||||||
Self::Test(context) => context.as_ref().as_ref(),
|
Self::Test(context) => context.as_ref().as_ref(),
|
||||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||||
Self::ExportJsonSchema => unreachable!(),
|
Self::ExportJsonSchema | Self::ExportGenesis(..) => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -92,6 +103,7 @@ impl AsRef<GethConfiguration> for Context {
|
|||||||
match self {
|
match self {
|
||||||
Self::Test(context) => context.as_ref().as_ref(),
|
Self::Test(context) => context.as_ref().as_ref(),
|
||||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||||
|
Self::ExportGenesis(context) => context.as_ref().as_ref(),
|
||||||
Self::ExportJsonSchema => unreachable!(),
|
Self::ExportJsonSchema => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -102,16 +114,18 @@ impl AsRef<KurtosisConfiguration> for Context {
|
|||||||
match self {
|
match self {
|
||||||
Self::Test(context) => context.as_ref().as_ref(),
|
Self::Test(context) => context.as_ref().as_ref(),
|
||||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||||
|
Self::ExportGenesis(context) => context.as_ref().as_ref(),
|
||||||
Self::ExportJsonSchema => unreachable!(),
|
Self::ExportJsonSchema => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AsRef<KitchensinkConfiguration> for Context {
|
impl AsRef<PolkadotParachainConfiguration> for Context {
|
||||||
fn as_ref(&self) -> &KitchensinkConfiguration {
|
fn as_ref(&self) -> &PolkadotParachainConfiguration {
|
||||||
match self {
|
match self {
|
||||||
Self::Test(context) => context.as_ref().as_ref(),
|
Self::Test(context) => context.as_ref().as_ref(),
|
||||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||||
|
Self::ExportGenesis(context) => context.as_ref().as_ref(),
|
||||||
Self::ExportJsonSchema => unreachable!(),
|
Self::ExportJsonSchema => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -122,6 +136,7 @@ impl AsRef<ReviveDevNodeConfiguration> for Context {
|
|||||||
match self {
|
match self {
|
||||||
Self::Test(context) => context.as_ref().as_ref(),
|
Self::Test(context) => context.as_ref().as_ref(),
|
||||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||||
|
Self::ExportGenesis(context) => context.as_ref().as_ref(),
|
||||||
Self::ExportJsonSchema => unreachable!(),
|
Self::ExportJsonSchema => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -132,7 +147,7 @@ impl AsRef<EthRpcConfiguration> for Context {
|
|||||||
match self {
|
match self {
|
||||||
Self::Test(context) => context.as_ref().as_ref(),
|
Self::Test(context) => context.as_ref().as_ref(),
|
||||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||||
Self::ExportJsonSchema => unreachable!(),
|
Self::ExportJsonSchema | Self::ExportGenesis(..) => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -141,7 +156,7 @@ impl AsRef<GenesisConfiguration> for Context {
|
|||||||
fn as_ref(&self) -> &GenesisConfiguration {
|
fn as_ref(&self) -> &GenesisConfiguration {
|
||||||
match self {
|
match self {
|
||||||
Self::Test(context) => context.as_ref().as_ref(),
|
Self::Test(context) => context.as_ref().as_ref(),
|
||||||
Self::Benchmark(..) => {
|
Self::Benchmark(..) | Self::ExportGenesis(..) => {
|
||||||
static GENESIS: LazyLock<GenesisConfiguration> = LazyLock::new(Default::default);
|
static GENESIS: LazyLock<GenesisConfiguration> = LazyLock::new(Default::default);
|
||||||
&GENESIS
|
&GENESIS
|
||||||
}
|
}
|
||||||
@@ -155,6 +170,7 @@ impl AsRef<WalletConfiguration> for Context {
|
|||||||
match self {
|
match self {
|
||||||
Self::Test(context) => context.as_ref().as_ref(),
|
Self::Test(context) => context.as_ref().as_ref(),
|
||||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||||
|
Self::ExportGenesis(context) => context.as_ref().as_ref(),
|
||||||
Self::ExportJsonSchema => unreachable!(),
|
Self::ExportJsonSchema => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -165,7 +181,7 @@ impl AsRef<ConcurrencyConfiguration> for Context {
|
|||||||
match self {
|
match self {
|
||||||
Self::Test(context) => context.as_ref().as_ref(),
|
Self::Test(context) => context.as_ref().as_ref(),
|
||||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||||
Self::ExportJsonSchema => unreachable!(),
|
Self::ExportJsonSchema | Self::ExportGenesis(..) => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -175,7 +191,7 @@ impl AsRef<CompilationConfiguration> for Context {
|
|||||||
match self {
|
match self {
|
||||||
Self::Test(context) => context.as_ref().as_ref(),
|
Self::Test(context) => context.as_ref().as_ref(),
|
||||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||||
Self::ExportJsonSchema => unreachable!(),
|
Self::ExportJsonSchema | Self::ExportGenesis(..) => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -185,13 +201,40 @@ impl AsRef<ReportConfiguration> for Context {
|
|||||||
match self {
|
match self {
|
||||||
Self::Test(context) => context.as_ref().as_ref(),
|
Self::Test(context) => context.as_ref().as_ref(),
|
||||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||||
Self::ExportJsonSchema => unreachable!(),
|
Self::ExportJsonSchema | Self::ExportGenesis(..) => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Parser, Serialize)]
|
impl AsRef<IgnoreSuccessConfiguration> for Context {
|
||||||
|
fn as_ref(&self) -> &IgnoreSuccessConfiguration {
|
||||||
|
match self {
|
||||||
|
Self::Test(context) => context.as_ref().as_ref(),
|
||||||
|
Self::Benchmark(..) => unreachable!(),
|
||||||
|
Self::ExportJsonSchema | Self::ExportGenesis(..) => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
pub struct TestExecutionContext {
|
pub struct TestExecutionContext {
|
||||||
|
/// The commandline profile to use. Different profiles change the defaults of the various cli
|
||||||
|
/// arguments.
|
||||||
|
#[arg(long = "profile", default_value_t = Profile::Default)]
|
||||||
|
pub profile: Profile,
|
||||||
|
|
||||||
|
/// The set of platforms that the differential tests should run on.
|
||||||
|
#[arg(
|
||||||
|
short = 'p',
|
||||||
|
long = "platform",
|
||||||
|
default_values = ["geth-evm-solc", "revive-dev-node-polkavm-resolc"]
|
||||||
|
)]
|
||||||
|
pub platforms: Vec<PlatformIdentifier>,
|
||||||
|
|
||||||
|
/// The output format to use for the tool's output.
|
||||||
|
#[arg(short, long, default_value_t = OutputFormat::CargoTestLike)]
|
||||||
|
pub output_format: OutputFormat,
|
||||||
|
|
||||||
/// The working directory that the program will use for all of the temporary artifacts needed at
|
/// The working directory that the program will use for all of the temporary artifacts needed at
|
||||||
/// runtime.
|
/// runtime.
|
||||||
///
|
///
|
||||||
@@ -205,14 +248,6 @@ pub struct TestExecutionContext {
|
|||||||
)]
|
)]
|
||||||
pub working_directory: WorkingDirectoryConfiguration,
|
pub working_directory: WorkingDirectoryConfiguration,
|
||||||
|
|
||||||
/// The set of platforms that the differential tests should run on.
|
|
||||||
#[arg(
|
|
||||||
short = 'p',
|
|
||||||
long = "platform",
|
|
||||||
default_values = ["geth-evm-solc", "revive-dev-node-polkavm-resolc"]
|
|
||||||
)]
|
|
||||||
pub platforms: Vec<PlatformIdentifier>,
|
|
||||||
|
|
||||||
/// Configuration parameters for the corpus files to use.
|
/// Configuration parameters for the corpus files to use.
|
||||||
#[clap(flatten, next_help_heading = "Corpus Configuration")]
|
#[clap(flatten, next_help_heading = "Corpus Configuration")]
|
||||||
pub corpus_configuration: CorpusConfiguration,
|
pub corpus_configuration: CorpusConfiguration,
|
||||||
@@ -225,6 +260,10 @@ pub struct TestExecutionContext {
|
|||||||
#[clap(flatten, next_help_heading = "Resolc Configuration")]
|
#[clap(flatten, next_help_heading = "Resolc Configuration")]
|
||||||
pub resolc_configuration: ResolcConfiguration,
|
pub resolc_configuration: ResolcConfiguration,
|
||||||
|
|
||||||
|
/// Configuration parameters for the Polkadot Parachain.
|
||||||
|
#[clap(flatten, next_help_heading = "Polkadot Parachain Configuration")]
|
||||||
|
pub polkadot_parachain_configuration: PolkadotParachainConfiguration,
|
||||||
|
|
||||||
/// Configuration parameters for the geth node.
|
/// Configuration parameters for the geth node.
|
||||||
#[clap(flatten, next_help_heading = "Geth Configuration")]
|
#[clap(flatten, next_help_heading = "Geth Configuration")]
|
||||||
pub geth_configuration: GethConfiguration,
|
pub geth_configuration: GethConfiguration,
|
||||||
@@ -233,10 +272,6 @@ pub struct TestExecutionContext {
|
|||||||
#[clap(flatten, next_help_heading = "Lighthouse Configuration")]
|
#[clap(flatten, next_help_heading = "Lighthouse Configuration")]
|
||||||
pub lighthouse_configuration: KurtosisConfiguration,
|
pub lighthouse_configuration: KurtosisConfiguration,
|
||||||
|
|
||||||
/// Configuration parameters for the Kitchensink.
|
|
||||||
#[clap(flatten, next_help_heading = "Kitchensink Configuration")]
|
|
||||||
pub kitchensink_configuration: KitchensinkConfiguration,
|
|
||||||
|
|
||||||
/// Configuration parameters for the Revive Dev Node.
|
/// Configuration parameters for the Revive Dev Node.
|
||||||
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
|
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
|
||||||
pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
|
pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
|
||||||
@@ -264,10 +299,55 @@ pub struct TestExecutionContext {
|
|||||||
/// Configuration parameters for the report.
|
/// Configuration parameters for the report.
|
||||||
#[clap(flatten, next_help_heading = "Report Configuration")]
|
#[clap(flatten, next_help_heading = "Report Configuration")]
|
||||||
pub report_configuration: ReportConfiguration,
|
pub report_configuration: ReportConfiguration,
|
||||||
|
|
||||||
|
/// Configuration parameters for ignoring certain test cases based on the report
|
||||||
|
#[clap(flatten, next_help_heading = "Ignore Success Configuration")]
|
||||||
|
pub ignore_success_configuration: IgnoreSuccessConfiguration,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Parser, Serialize)]
|
impl TestExecutionContext {
|
||||||
|
pub fn update_for_profile(&mut self) {
|
||||||
|
match self.profile {
|
||||||
|
Profile::Default => {}
|
||||||
|
Profile::Debug => {
|
||||||
|
let default_concurrency_config =
|
||||||
|
ConcurrencyConfiguration::parse_from(["concurrency-configuration"]);
|
||||||
|
let working_directory_config = WorkingDirectoryConfiguration::default();
|
||||||
|
|
||||||
|
if self.concurrency_configuration.number_of_nodes
|
||||||
|
== default_concurrency_config.number_of_nodes
|
||||||
|
{
|
||||||
|
self.concurrency_configuration.number_of_nodes = 1;
|
||||||
|
}
|
||||||
|
if self.concurrency_configuration.number_of_threads
|
||||||
|
== default_concurrency_config.number_of_threads
|
||||||
|
{
|
||||||
|
self.concurrency_configuration.number_of_threads = 5;
|
||||||
|
}
|
||||||
|
if self.concurrency_configuration.number_concurrent_tasks
|
||||||
|
== default_concurrency_config.number_concurrent_tasks
|
||||||
|
{
|
||||||
|
self.concurrency_configuration.number_concurrent_tasks = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if working_directory_config == self.working_directory {
|
||||||
|
let home_directory =
|
||||||
|
PathBuf::from(std::env::var("HOME").expect("Home dir not found"));
|
||||||
|
let working_directory = home_directory.join(".retester-workdir");
|
||||||
|
self.working_directory = WorkingDirectoryConfiguration::Path(working_directory)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
pub struct BenchmarkingContext {
|
pub struct BenchmarkingContext {
|
||||||
|
/// The commandline profile to use. Different profiles change the defaults of the various cli
|
||||||
|
/// arguments.
|
||||||
|
#[arg(long = "profile", default_value_t = Profile::Default)]
|
||||||
|
pub profile: Profile,
|
||||||
|
|
||||||
/// The working directory that the program will use for all of the temporary artifacts needed at
|
/// The working directory that the program will use for all of the temporary artifacts needed at
|
||||||
/// runtime.
|
/// runtime.
|
||||||
///
|
///
|
||||||
@@ -314,9 +394,9 @@ pub struct BenchmarkingContext {
|
|||||||
#[clap(flatten, next_help_heading = "Lighthouse Configuration")]
|
#[clap(flatten, next_help_heading = "Lighthouse Configuration")]
|
||||||
pub lighthouse_configuration: KurtosisConfiguration,
|
pub lighthouse_configuration: KurtosisConfiguration,
|
||||||
|
|
||||||
/// Configuration parameters for the Kitchensink.
|
/// Configuration parameters for the Polkadot Parachain.
|
||||||
#[clap(flatten, next_help_heading = "Kitchensink Configuration")]
|
#[clap(flatten, next_help_heading = "Polkadot Parachain Configuration")]
|
||||||
pub kitchensink_configuration: KitchensinkConfiguration,
|
pub polkadot_parachain_configuration: PolkadotParachainConfiguration,
|
||||||
|
|
||||||
/// Configuration parameters for the Revive Dev Node.
|
/// Configuration parameters for the Revive Dev Node.
|
||||||
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
|
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
|
||||||
@@ -343,9 +423,71 @@ pub struct BenchmarkingContext {
|
|||||||
pub report_configuration: ReportConfiguration,
|
pub report_configuration: ReportConfiguration,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl BenchmarkingContext {
|
||||||
|
pub fn update_for_profile(&mut self) {
|
||||||
|
match self.profile {
|
||||||
|
Profile::Default => {}
|
||||||
|
Profile::Debug => {
|
||||||
|
let default_concurrency_config =
|
||||||
|
ConcurrencyConfiguration::parse_from(["concurrency-configuration"]);
|
||||||
|
let working_directory_config = WorkingDirectoryConfiguration::default();
|
||||||
|
|
||||||
|
if self.concurrency_configuration.number_of_nodes
|
||||||
|
== default_concurrency_config.number_of_nodes
|
||||||
|
{
|
||||||
|
self.concurrency_configuration.number_of_nodes = 1;
|
||||||
|
}
|
||||||
|
if self.concurrency_configuration.number_of_threads
|
||||||
|
== default_concurrency_config.number_of_threads
|
||||||
|
{
|
||||||
|
self.concurrency_configuration.number_of_threads = 5;
|
||||||
|
}
|
||||||
|
if self.concurrency_configuration.number_concurrent_tasks
|
||||||
|
== default_concurrency_config.number_concurrent_tasks
|
||||||
|
{
|
||||||
|
self.concurrency_configuration.number_concurrent_tasks = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if working_directory_config == self.working_directory {
|
||||||
|
let home_directory =
|
||||||
|
PathBuf::from(std::env::var("HOME").expect("Home dir not found"));
|
||||||
|
let working_directory = home_directory.join(".retester-workdir");
|
||||||
|
self.working_directory = WorkingDirectoryConfiguration::Path(working_directory)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
|
pub struct ExportGenesisContext {
|
||||||
|
/// The platform of choice to export the genesis for.
|
||||||
|
pub platform: PlatformIdentifier,
|
||||||
|
|
||||||
|
/// Configuration parameters for the geth node.
|
||||||
|
#[clap(flatten, next_help_heading = "Geth Configuration")]
|
||||||
|
pub geth_configuration: GethConfiguration,
|
||||||
|
|
||||||
|
/// Configuration parameters for the lighthouse node.
|
||||||
|
#[clap(flatten, next_help_heading = "Lighthouse Configuration")]
|
||||||
|
pub lighthouse_configuration: KurtosisConfiguration,
|
||||||
|
|
||||||
|
/// Configuration parameters for the Polkadot Parachain.
|
||||||
|
#[clap(flatten, next_help_heading = "Polkadot Parachain Configuration")]
|
||||||
|
pub polkadot_parachain_configuration: PolkadotParachainConfiguration,
|
||||||
|
|
||||||
|
/// Configuration parameters for the Revive Dev Node.
|
||||||
|
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
|
||||||
|
pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
|
||||||
|
|
||||||
|
/// Configuration parameters for the wallet.
|
||||||
|
#[clap(flatten, next_help_heading = "Wallet Configuration")]
|
||||||
|
pub wallet_configuration: WalletConfiguration,
|
||||||
|
}
|
||||||
|
|
||||||
impl Default for TestExecutionContext {
|
impl Default for TestExecutionContext {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self::parse_from(["execution-context"])
|
Self::parse_from(["execution-context", "--test", "."])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -379,15 +521,15 @@ impl AsRef<GethConfiguration> for TestExecutionContext {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AsRef<KurtosisConfiguration> for TestExecutionContext {
|
impl AsRef<PolkadotParachainConfiguration> for TestExecutionContext {
|
||||||
fn as_ref(&self) -> &KurtosisConfiguration {
|
fn as_ref(&self) -> &PolkadotParachainConfiguration {
|
||||||
&self.lighthouse_configuration
|
&self.polkadot_parachain_configuration
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AsRef<KitchensinkConfiguration> for TestExecutionContext {
|
impl AsRef<KurtosisConfiguration> for TestExecutionContext {
|
||||||
fn as_ref(&self) -> &KitchensinkConfiguration {
|
fn as_ref(&self) -> &KurtosisConfiguration {
|
||||||
&self.kitchensink_configuration
|
&self.lighthouse_configuration
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -433,9 +575,15 @@ impl AsRef<ReportConfiguration> for TestExecutionContext {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl AsRef<IgnoreSuccessConfiguration> for TestExecutionContext {
|
||||||
|
fn as_ref(&self) -> &IgnoreSuccessConfiguration {
|
||||||
|
&self.ignore_success_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Default for BenchmarkingContext {
|
impl Default for BenchmarkingContext {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self::parse_from(["execution-context"])
|
Self::parse_from(["benchmarking-context", "--test", "."])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -475,9 +623,9 @@ impl AsRef<KurtosisConfiguration> for BenchmarkingContext {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AsRef<KitchensinkConfiguration> for BenchmarkingContext {
|
impl AsRef<PolkadotParachainConfiguration> for BenchmarkingContext {
|
||||||
fn as_ref(&self) -> &KitchensinkConfiguration {
|
fn as_ref(&self) -> &PolkadotParachainConfiguration {
|
||||||
&self.kitchensink_configuration
|
&self.polkadot_parachain_configuration
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -517,16 +665,65 @@ impl AsRef<ReportConfiguration> for BenchmarkingContext {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for ExportGenesisContext {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::parse_from(["export-genesis-context"])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<GethConfiguration> for ExportGenesisContext {
|
||||||
|
fn as_ref(&self) -> &GethConfiguration {
|
||||||
|
&self.geth_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<KurtosisConfiguration> for ExportGenesisContext {
|
||||||
|
fn as_ref(&self) -> &KurtosisConfiguration {
|
||||||
|
&self.lighthouse_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<PolkadotParachainConfiguration> for ExportGenesisContext {
|
||||||
|
fn as_ref(&self) -> &PolkadotParachainConfiguration {
|
||||||
|
&self.polkadot_parachain_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<ReviveDevNodeConfiguration> for ExportGenesisContext {
|
||||||
|
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
|
||||||
|
&self.revive_dev_node_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<WalletConfiguration> for ExportGenesisContext {
|
||||||
|
fn as_ref(&self) -> &WalletConfiguration {
|
||||||
|
&self.wallet_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A set of configuration parameters for the corpus files to use for the execution.
|
/// A set of configuration parameters for the corpus files to use for the execution.
|
||||||
#[derive(Clone, Debug, Parser, Serialize)]
|
#[serde_with::serde_as]
|
||||||
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
pub struct CorpusConfiguration {
|
pub struct CorpusConfiguration {
|
||||||
/// A list of test corpus JSON files to be tested.
|
/// A list of test specifiers for the tests that the tool should run.
|
||||||
#[arg(short = 'c', long = "corpus")]
|
///
|
||||||
pub paths: Vec<PathBuf>,
|
/// Test specifiers follow the following format:
|
||||||
|
///
|
||||||
|
/// - `{directory_path|metadata_file_path}`: A path to a metadata file where all of the cases
|
||||||
|
/// live and should be run. Alternatively, it points to a directory instructing the framework
|
||||||
|
/// to discover of the metadata files that live there an execute them.
|
||||||
|
/// - `{metadata_file_path}::{case_idx}`: The path to a metadata file and then a case idx
|
||||||
|
/// separated by two colons. This specifies that only this specific test case within the
|
||||||
|
/// metadata file should be executed.
|
||||||
|
/// - `{metadata_file_path}::{case_idx}::{mode}`: This is very similar to the above specifier
|
||||||
|
/// with the exception that in this case the mode is specified and will be used in the test.
|
||||||
|
#[serde_as(as = "Vec<serde_with::DisplayFromStr>")]
|
||||||
|
#[arg(short = 't', long = "test", required = true)]
|
||||||
|
pub test_specifiers: Vec<ParsedTestSpecifier>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A set of configuration parameters for Solc.
|
/// A set of configuration parameters for Solc.
|
||||||
#[derive(Clone, Debug, Parser, Serialize)]
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
pub struct SolcConfiguration {
|
pub struct SolcConfiguration {
|
||||||
/// Specifies the default version of the Solc compiler that should be used if there is no
|
/// Specifies the default version of the Solc compiler that should be used if there is no
|
||||||
/// override specified by one of the test cases.
|
/// override specified by one of the test cases.
|
||||||
@@ -535,7 +732,7 @@ pub struct SolcConfiguration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A set of configuration parameters for Resolc.
|
/// A set of configuration parameters for Resolc.
|
||||||
#[derive(Clone, Debug, Parser, Serialize)]
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
pub struct ResolcConfiguration {
|
pub struct ResolcConfiguration {
|
||||||
/// Specifies the path of the resolc compiler to be used by the tool.
|
/// Specifies the path of the resolc compiler to be used by the tool.
|
||||||
///
|
///
|
||||||
@@ -545,8 +742,32 @@ pub struct ResolcConfiguration {
|
|||||||
pub path: PathBuf,
|
pub path: PathBuf,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A set of configuration parameters for Polkadot Parachain.
|
||||||
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
|
pub struct PolkadotParachainConfiguration {
|
||||||
|
/// Specifies the path of the polkadot-parachain node to be used by the tool.
|
||||||
|
///
|
||||||
|
/// If this is not specified, then the tool assumes that it should use the polkadot-parachain binary
|
||||||
|
/// that's provided in the user's $PATH.
|
||||||
|
#[clap(
|
||||||
|
id = "polkadot-parachain.path",
|
||||||
|
long = "polkadot-parachain.path",
|
||||||
|
default_value = "polkadot-parachain"
|
||||||
|
)]
|
||||||
|
pub path: PathBuf,
|
||||||
|
|
||||||
|
/// The amount of time to wait upon startup before considering that the node timed out.
|
||||||
|
#[clap(
|
||||||
|
id = "polkadot-parachain.start-timeout-ms",
|
||||||
|
long = "polkadot-parachain.start-timeout-ms",
|
||||||
|
default_value = "5000",
|
||||||
|
value_parser = parse_duration
|
||||||
|
)]
|
||||||
|
pub start_timeout_ms: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
/// A set of configuration parameters for Geth.
|
/// A set of configuration parameters for Geth.
|
||||||
#[derive(Clone, Debug, Parser, Serialize)]
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
pub struct GethConfiguration {
|
pub struct GethConfiguration {
|
||||||
/// Specifies the path of the geth node to be used by the tool.
|
/// Specifies the path of the geth node to be used by the tool.
|
||||||
///
|
///
|
||||||
@@ -566,7 +787,7 @@ pub struct GethConfiguration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A set of configuration parameters for kurtosis.
|
/// A set of configuration parameters for kurtosis.
|
||||||
#[derive(Clone, Debug, Parser, Serialize)]
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
pub struct KurtosisConfiguration {
|
pub struct KurtosisConfiguration {
|
||||||
/// Specifies the path of the kurtosis node to be used by the tool.
|
/// Specifies the path of the kurtosis node to be used by the tool.
|
||||||
///
|
///
|
||||||
@@ -580,32 +801,8 @@ pub struct KurtosisConfiguration {
|
|||||||
pub path: PathBuf,
|
pub path: PathBuf,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A set of configuration parameters for Kitchensink.
|
|
||||||
#[derive(Clone, Debug, Parser, Serialize)]
|
|
||||||
pub struct KitchensinkConfiguration {
|
|
||||||
/// Specifies the path of the kitchensink node to be used by the tool.
|
|
||||||
///
|
|
||||||
/// If this is not specified, then the tool assumes that it should use the kitchensink binary
|
|
||||||
/// that's provided in the user's $PATH.
|
|
||||||
#[clap(
|
|
||||||
id = "kitchensink.path",
|
|
||||||
long = "kitchensink.path",
|
|
||||||
default_value = "substrate-node"
|
|
||||||
)]
|
|
||||||
pub path: PathBuf,
|
|
||||||
|
|
||||||
/// The amount of time to wait upon startup before considering that the node timed out.
|
|
||||||
#[clap(
|
|
||||||
id = "kitchensink.start-timeout-ms",
|
|
||||||
long = "kitchensink.start-timeout-ms",
|
|
||||||
default_value = "30000",
|
|
||||||
value_parser = parse_duration
|
|
||||||
)]
|
|
||||||
pub start_timeout_ms: Duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A set of configuration parameters for the revive dev node.
|
/// A set of configuration parameters for the revive dev node.
|
||||||
#[derive(Clone, Debug, Parser, Serialize)]
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
pub struct ReviveDevNodeConfiguration {
|
pub struct ReviveDevNodeConfiguration {
|
||||||
/// Specifies the path of the revive dev node to be used by the tool.
|
/// Specifies the path of the revive dev node to be used by the tool.
|
||||||
///
|
///
|
||||||
@@ -626,10 +823,36 @@ pub struct ReviveDevNodeConfiguration {
|
|||||||
value_parser = parse_duration
|
value_parser = parse_duration
|
||||||
)]
|
)]
|
||||||
pub start_timeout_ms: Duration,
|
pub start_timeout_ms: Duration,
|
||||||
|
|
||||||
|
/// The consensus to use for the spawned revive-dev-node.
|
||||||
|
#[clap(
|
||||||
|
id = "revive-dev-node.consensus",
|
||||||
|
long = "revive-dev-node.consensus",
|
||||||
|
default_value = "instant-seal"
|
||||||
|
)]
|
||||||
|
pub consensus: String,
|
||||||
|
|
||||||
|
/// Specifies the connection string of an existing node that's not managed by the framework.
|
||||||
|
///
|
||||||
|
/// If this argument is specified then the framework will not spawn certain nodes itself but
|
||||||
|
/// rather it will opt to using the existing node's through their provided connection strings.
|
||||||
|
///
|
||||||
|
/// This means that if `ConcurrencyConfiguration.number_of_nodes` is 10 and we only specify the
|
||||||
|
/// connection strings of 2 nodes here, then nodes 0 and 1 will use the provided connection
|
||||||
|
/// strings and nodes 2 through 10 (exclusive) will all be spawned and managed by the framework.
|
||||||
|
///
|
||||||
|
/// Thus, if you want all of the transactions and tests to happen against the node that you
|
||||||
|
/// spawned and manage then you need to specify a `ConcurrencyConfiguration.number_of_nodes` of
|
||||||
|
/// 1.
|
||||||
|
#[clap(
|
||||||
|
id = "revive-dev-node.existing-rpc-url",
|
||||||
|
long = "revive-dev-node.existing-rpc-url"
|
||||||
|
)]
|
||||||
|
pub existing_rpc_url: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A set of configuration parameters for the ETH RPC.
|
/// A set of configuration parameters for the ETH RPC.
|
||||||
#[derive(Clone, Debug, Parser, Serialize)]
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
pub struct EthRpcConfiguration {
|
pub struct EthRpcConfiguration {
|
||||||
/// Specifies the path of the ETH RPC to be used by the tool.
|
/// Specifies the path of the ETH RPC to be used by the tool.
|
||||||
///
|
///
|
||||||
@@ -649,7 +872,7 @@ pub struct EthRpcConfiguration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A set of configuration parameters for the genesis.
|
/// A set of configuration parameters for the genesis.
|
||||||
#[derive(Clone, Debug, Default, Parser, Serialize)]
|
#[derive(Clone, Debug, Default, Parser, Serialize, Deserialize)]
|
||||||
pub struct GenesisConfiguration {
|
pub struct GenesisConfiguration {
|
||||||
/// Specifies the path of the genesis file to use for the nodes that are started.
|
/// Specifies the path of the genesis file to use for the nodes that are started.
|
||||||
///
|
///
|
||||||
@@ -687,15 +910,14 @@ impl GenesisConfiguration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A set of configuration parameters for the wallet.
|
/// A set of configuration parameters for the wallet.
|
||||||
#[derive(Clone, Debug, Parser, Serialize)]
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
pub struct WalletConfiguration {
|
pub struct WalletConfiguration {
|
||||||
/// The private key of the default signer.
|
/// The private key of the default signer.
|
||||||
#[clap(
|
#[clap(
|
||||||
long = "wallet.default-private-key",
|
long = "wallet.default-private-key",
|
||||||
default_value = "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d"
|
default_value = "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d"
|
||||||
)]
|
)]
|
||||||
#[serde(serialize_with = "serialize_private_key")]
|
default_key: B256,
|
||||||
default_key: PrivateKeySigner,
|
|
||||||
|
|
||||||
/// This argument controls which private keys the nodes should have access to and be added to
|
/// This argument controls which private keys the nodes should have access to and be added to
|
||||||
/// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set
|
/// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set
|
||||||
@@ -713,7 +935,8 @@ impl WalletConfiguration {
|
|||||||
pub fn wallet(&self) -> Arc<EthereumWallet> {
|
pub fn wallet(&self) -> Arc<EthereumWallet> {
|
||||||
self.wallet
|
self.wallet
|
||||||
.get_or_init(|| {
|
.get_or_init(|| {
|
||||||
let mut wallet = EthereumWallet::new(self.default_key.clone());
|
let mut wallet =
|
||||||
|
EthereumWallet::new(PrivateKeySigner::from_bytes(&self.default_key).unwrap());
|
||||||
for signer in (1..=self.additional_keys)
|
for signer in (1..=self.additional_keys)
|
||||||
.map(|id| U256::from(id))
|
.map(|id| U256::from(id))
|
||||||
.map(|id| id.to_be_bytes::<32>())
|
.map(|id| id.to_be_bytes::<32>())
|
||||||
@@ -731,15 +954,8 @@ impl WalletConfiguration {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn serialize_private_key<S>(value: &PrivateKeySigner, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: Serializer,
|
|
||||||
{
|
|
||||||
value.to_bytes().encode_hex().serialize(serializer)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A set of configuration for concurrency.
|
/// A set of configuration for concurrency.
|
||||||
#[derive(Clone, Debug, Parser, Serialize)]
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
pub struct ConcurrencyConfiguration {
|
pub struct ConcurrencyConfiguration {
|
||||||
/// Determines the amount of nodes that will be spawned for each chain.
|
/// Determines the amount of nodes that will be spawned for each chain.
|
||||||
#[clap(long = "concurrency.number-of-nodes", default_value_t = 5)]
|
#[clap(long = "concurrency.number-of-nodes", default_value_t = 5)]
|
||||||
@@ -749,42 +965,38 @@ pub struct ConcurrencyConfiguration {
|
|||||||
#[arg(
|
#[arg(
|
||||||
long = "concurrency.number-of-threads",
|
long = "concurrency.number-of-threads",
|
||||||
default_value_t = std::thread::available_parallelism()
|
default_value_t = std::thread::available_parallelism()
|
||||||
.map(|n| n.get())
|
.map(|n| n.get() * 4 / 6)
|
||||||
.unwrap_or(1)
|
.unwrap_or(1)
|
||||||
)]
|
)]
|
||||||
pub number_of_threads: usize,
|
pub number_of_threads: usize,
|
||||||
|
|
||||||
/// Determines the amount of concurrent tasks that will be spawned to run tests.
|
/// Determines the amount of concurrent tasks that will be spawned to run tests. This means that
|
||||||
|
/// at any given time there is `concurrency.number-of-concurrent-tasks` tests concurrently
|
||||||
|
/// executing.
|
||||||
///
|
///
|
||||||
/// Defaults to 10 x the number of nodes.
|
/// Note that a task limit of `0` means no limit on the number of concurrent tasks.
|
||||||
#[arg(long = "concurrency.number-of-concurrent-tasks")]
|
#[arg(long = "concurrency.number-of-concurrent-tasks", default_value_t = 500)]
|
||||||
number_concurrent_tasks: Option<usize>,
|
number_concurrent_tasks: usize,
|
||||||
|
|
||||||
/// Determines if the concurrency limit should be ignored or not.
|
|
||||||
#[arg(long = "concurrency.ignore-concurrency-limit")]
|
|
||||||
ignore_concurrency_limit: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ConcurrencyConfiguration {
|
impl ConcurrencyConfiguration {
|
||||||
pub fn concurrency_limit(&self) -> Option<usize> {
|
pub fn concurrency_limit(&self) -> Option<usize> {
|
||||||
match self.ignore_concurrency_limit {
|
if self.number_concurrent_tasks == 0 {
|
||||||
true => None,
|
None
|
||||||
false => Some(
|
} else {
|
||||||
self.number_concurrent_tasks
|
Some(self.number_concurrent_tasks)
|
||||||
.unwrap_or(20 * self.number_of_nodes),
|
|
||||||
),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Parser, Serialize)]
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
pub struct CompilationConfiguration {
|
pub struct CompilationConfiguration {
|
||||||
/// Controls if the compilation cache should be invalidated or not.
|
/// Controls if the compilation cache should be invalidated or not.
|
||||||
#[arg(long = "compilation.invalidate-cache")]
|
#[arg(long = "compilation.invalidate-cache")]
|
||||||
pub invalidate_compilation_cache: bool,
|
pub invalidate_compilation_cache: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Parser, Serialize)]
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
pub struct ReportConfiguration {
|
pub struct ReportConfiguration {
|
||||||
/// Controls if the compiler input is included in the final report.
|
/// Controls if the compiler input is included in the final report.
|
||||||
#[clap(long = "report.include-compiler-input")]
|
#[clap(long = "report.include-compiler-input")]
|
||||||
@@ -795,8 +1007,15 @@ pub struct ReportConfiguration {
|
|||||||
pub include_compiler_output: bool,
|
pub include_compiler_output: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
|
pub struct IgnoreSuccessConfiguration {
|
||||||
|
/// The path of the report generated by the tool to use to ignore the cases that succeeded.
|
||||||
|
#[clap(long = "ignore-success.report-path")]
|
||||||
|
pub path: Option<PathBuf>,
|
||||||
|
}
|
||||||
|
|
||||||
/// Represents the working directory that the program uses.
|
/// Represents the working directory that the program uses.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub enum WorkingDirectoryConfiguration {
|
pub enum WorkingDirectoryConfiguration {
|
||||||
/// A temporary directory is used as the working directory. This will be removed when dropped.
|
/// A temporary directory is used as the working directory. This will be removed when dropped.
|
||||||
TemporaryDirectory(Arc<TempDir>),
|
TemporaryDirectory(Arc<TempDir>),
|
||||||
@@ -804,6 +1023,24 @@ pub enum WorkingDirectoryConfiguration {
|
|||||||
Path(PathBuf),
|
Path(PathBuf),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Serialize for WorkingDirectoryConfiguration {
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: Serializer,
|
||||||
|
{
|
||||||
|
self.as_path().serialize(serializer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Deserialize<'a> for WorkingDirectoryConfiguration {
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: serde::Deserializer<'a>,
|
||||||
|
{
|
||||||
|
PathBuf::deserialize(deserializer).map(Self::Path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl WorkingDirectoryConfiguration {
|
impl WorkingDirectoryConfiguration {
|
||||||
pub fn as_path(&self) -> &Path {
|
pub fn as_path(&self) -> &Path {
|
||||||
self.as_ref()
|
self.as_ref()
|
||||||
@@ -853,24 +1090,13 @@ impl Display for WorkingDirectoryConfiguration {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Serialize for WorkingDirectoryConfiguration {
|
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::Serializer,
|
|
||||||
{
|
|
||||||
self.as_path().serialize(serializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_duration(s: &str) -> anyhow::Result<Duration> {
|
fn parse_duration(s: &str) -> anyhow::Result<Duration> {
|
||||||
u64::from_str(s)
|
u64::from_str(s)
|
||||||
.map(Duration::from_millis)
|
.map(Duration::from_millis)
|
||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The Solidity compatible node implementation.
|
/// The output format to use for the test execution output.
|
||||||
///
|
|
||||||
/// This describes the solutions to be tested against on a high level.
|
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone,
|
Clone,
|
||||||
Copy,
|
Copy,
|
||||||
@@ -881,6 +1107,7 @@ fn parse_duration(s: &str) -> anyhow::Result<Duration> {
|
|||||||
Ord,
|
Ord,
|
||||||
Hash,
|
Hash,
|
||||||
Serialize,
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
ValueEnum,
|
ValueEnum,
|
||||||
EnumString,
|
EnumString,
|
||||||
Display,
|
Display,
|
||||||
@@ -888,9 +1115,46 @@ fn parse_duration(s: &str) -> anyhow::Result<Duration> {
|
|||||||
IntoStaticStr,
|
IntoStaticStr,
|
||||||
)]
|
)]
|
||||||
#[strum(serialize_all = "kebab-case")]
|
#[strum(serialize_all = "kebab-case")]
|
||||||
pub enum TestingPlatform {
|
pub enum OutputFormat {
|
||||||
/// The go-ethereum reference full node EVM implementation.
|
/// The legacy format that was used in the past for the output.
|
||||||
Geth,
|
Legacy,
|
||||||
/// The kitchensink runtime provides the PolkaVM (PVM) based node implementation.
|
|
||||||
Kitchensink,
|
/// An output format that looks heavily resembles the output from `cargo test`.
|
||||||
|
CargoTestLike,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Command line profiles used to override the default values provided for the commands.
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Copy,
|
||||||
|
Debug,
|
||||||
|
Default,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
PartialOrd,
|
||||||
|
Ord,
|
||||||
|
Hash,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
ValueEnum,
|
||||||
|
EnumString,
|
||||||
|
Display,
|
||||||
|
AsRefStr,
|
||||||
|
IntoStaticStr,
|
||||||
|
)]
|
||||||
|
#[strum(serialize_all = "kebab-case")]
|
||||||
|
pub enum Profile {
|
||||||
|
/// The default profile used by the framework. This profile is optimized to make the test
|
||||||
|
/// and workload execution happen as fast as possible.
|
||||||
|
#[default]
|
||||||
|
Default,
|
||||||
|
|
||||||
|
/// A debug profile optimized for use cases when certain tests are being debugged. This profile
|
||||||
|
/// sets up the framework with the following:
|
||||||
|
///
|
||||||
|
/// * `concurrency.number-of-nodes` set to 1 node.
|
||||||
|
/// * `concurrency.number-of-concurrent-tasks` set to 1 such that tests execute sequentially.
|
||||||
|
/// * `concurrency.number-of-threads` set to 5.
|
||||||
|
/// * `working-directory` set to ~/.retester-workdir
|
||||||
|
Debug,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ revive-dt-node = { workspace = true }
|
|||||||
revive-dt-node-interaction = { workspace = true }
|
revive-dt-node-interaction = { workspace = true }
|
||||||
revive-dt-report = { workspace = true }
|
revive-dt-report = { workspace = true }
|
||||||
|
|
||||||
|
ansi_term = { workspace = true }
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
bson = { workspace = true }
|
bson = { workspace = true }
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ use alloy::{
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
use anyhow::{Context as _, Result, bail};
|
use anyhow::{Context as _, Result, bail};
|
||||||
|
use futures::TryFutureExt;
|
||||||
use indexmap::IndexMap;
|
use indexmap::IndexMap;
|
||||||
use revive_dt_common::{
|
use revive_dt_common::{
|
||||||
futures::{PollingWaitBehavior, poll},
|
futures::{PollingWaitBehavior, poll},
|
||||||
@@ -30,12 +31,12 @@ use revive_dt_common::{
|
|||||||
use revive_dt_format::{
|
use revive_dt_format::{
|
||||||
metadata::{ContractInstance, ContractPathAndIdent},
|
metadata::{ContractInstance, ContractPathAndIdent},
|
||||||
steps::{
|
steps::{
|
||||||
AllocateAccountStep, BalanceAssertionStep, Calldata, EtherValue, FunctionCallStep, Method,
|
AllocateAccountStep, Calldata, EtherValue, FunctionCallStep, Method, RepeatStep, Step,
|
||||||
RepeatStep, Step, StepAddress, StepIdx, StepPath, StorageEmptyAssertionStep,
|
StepIdx, StepPath,
|
||||||
},
|
},
|
||||||
traits::{ResolutionContext, ResolverApi},
|
traits::{ResolutionContext, ResolverApi},
|
||||||
};
|
};
|
||||||
use tokio::sync::{Mutex, mpsc::UnboundedSender};
|
use tokio::sync::{Mutex, OnceCell, mpsc::UnboundedSender};
|
||||||
use tracing::{Instrument, Span, debug, error, field::display, info, info_span, instrument};
|
use tracing::{Instrument, Span, debug, error, field::display, info, info_span, instrument};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@@ -123,13 +124,7 @@ where
|
|||||||
&self.platform_information.reporter,
|
&self.platform_information.reporter,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.inspect_err(|err| {
|
.inspect_err(|err| error!(?err, "Pre-linking compilation failed"))
|
||||||
error!(
|
|
||||||
?err,
|
|
||||||
platform_identifier = %self.platform_information.platform.platform_identifier(),
|
|
||||||
"Pre-linking compilation failed"
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.context("Failed to produce the pre-linking compiled contracts")?;
|
.context("Failed to produce the pre-linking compiled contracts")?;
|
||||||
|
|
||||||
let mut deployed_libraries = None::<HashMap<_, _>>;
|
let mut deployed_libraries = None::<HashMap<_, _>>;
|
||||||
@@ -137,13 +132,7 @@ where
|
|||||||
.test_definition
|
.test_definition
|
||||||
.metadata
|
.metadata
|
||||||
.contract_sources()
|
.contract_sources()
|
||||||
.inspect_err(|err| {
|
.inspect_err(|err| error!(?err, "Failed to retrieve contract sources from metadata"))
|
||||||
error!(
|
|
||||||
?err,
|
|
||||||
platform_identifier = %self.platform_information.platform.platform_identifier(),
|
|
||||||
"Failed to retrieve contract sources from metadata"
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.context("Failed to get the contract instances from the metadata file")?;
|
.context("Failed to get the contract instances from the metadata file")?;
|
||||||
for library_instance in self
|
for library_instance in self
|
||||||
.test_definition
|
.test_definition
|
||||||
@@ -191,20 +180,19 @@ where
|
|||||||
TransactionRequest::default().from(deployer_address),
|
TransactionRequest::default().from(deployer_address),
|
||||||
code,
|
code,
|
||||||
);
|
);
|
||||||
let receipt = self.execute_transaction(tx).await.inspect_err(|err| {
|
let receipt = self
|
||||||
|
.execute_transaction(tx, None)
|
||||||
|
.and_then(|(_, receipt_fut)| receipt_fut)
|
||||||
|
.await
|
||||||
|
.inspect_err(|err| {
|
||||||
error!(
|
error!(
|
||||||
?err,
|
?err,
|
||||||
%library_instance,
|
%library_instance,
|
||||||
platform_identifier = %self.platform_information.platform.platform_identifier(),
|
|
||||||
"Failed to deploy the library"
|
"Failed to deploy the library"
|
||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
debug!(
|
debug!(?library_instance, "Deployed library");
|
||||||
?library_instance,
|
|
||||||
platform_identifier = %self.platform_information.platform.platform_identifier(),
|
|
||||||
"Deployed library"
|
|
||||||
);
|
|
||||||
|
|
||||||
let library_address = receipt
|
let library_address = receipt
|
||||||
.contract_address
|
.contract_address
|
||||||
@@ -227,15 +215,25 @@ where
|
|||||||
&self.platform_information.reporter,
|
&self.platform_information.reporter,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.inspect_err(|err| {
|
.inspect_err(|err| error!(?err, "Post-linking compilation failed"))
|
||||||
error!(
|
|
||||||
?err,
|
|
||||||
platform_identifier = %self.platform_information.platform.platform_identifier(),
|
|
||||||
"Post-linking compilation failed"
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.context("Failed to compile the post-link contracts")?;
|
.context("Failed to compile the post-link contracts")?;
|
||||||
|
|
||||||
|
for (contract_path, contract_name_to_info_mapping) in compiler_output.contracts.iter() {
|
||||||
|
for (contract_name, (contract_bytecode, _)) in contract_name_to_info_mapping.iter() {
|
||||||
|
let contract_bytecode = hex::decode(contract_bytecode)
|
||||||
|
.expect("Impossible for us to get an undecodable bytecode after linking");
|
||||||
|
|
||||||
|
self.platform_information
|
||||||
|
.reporter
|
||||||
|
.report_contract_information_event(
|
||||||
|
contract_path.to_path_buf(),
|
||||||
|
contract_name.clone(),
|
||||||
|
contract_bytecode.len(),
|
||||||
|
)
|
||||||
|
.expect("Should not fail");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
self.execution_state = ExecutionState::new(
|
self.execution_state = ExecutionState::new(
|
||||||
compiler_output.contracts,
|
compiler_output.contracts,
|
||||||
deployed_libraries.unwrap_or_default(),
|
deployed_libraries.unwrap_or_default(),
|
||||||
@@ -269,7 +267,6 @@ where
|
|||||||
skip_all,
|
skip_all,
|
||||||
fields(
|
fields(
|
||||||
driver_id = self.driver_id,
|
driver_id = self.driver_id,
|
||||||
platform_identifier = %self.platform_information.platform.platform_identifier(),
|
|
||||||
%step_path,
|
%step_path,
|
||||||
),
|
),
|
||||||
err(Debug),
|
err(Debug),
|
||||||
@@ -298,22 +295,18 @@ where
|
|||||||
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id))]
|
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id))]
|
||||||
pub async fn execute_function_call(
|
pub async fn execute_function_call(
|
||||||
&mut self,
|
&mut self,
|
||||||
_: &StepPath,
|
step_path: &StepPath,
|
||||||
step: &FunctionCallStep,
|
step: &FunctionCallStep,
|
||||||
) -> Result<usize> {
|
) -> Result<usize> {
|
||||||
let deployment_receipts = self
|
let deployment_receipts = self
|
||||||
.handle_function_call_contract_deployment(step)
|
.handle_function_call_contract_deployment(step_path, step)
|
||||||
.await
|
.await
|
||||||
.context("Failed to deploy contracts for the function call step")?;
|
.context("Failed to deploy contracts for the function call step")?;
|
||||||
let execution_receipt = self
|
let transaction_hash = self
|
||||||
.handle_function_call_execution(step, deployment_receipts)
|
.handle_function_call_execution(step_path, step, deployment_receipts)
|
||||||
.await
|
.await
|
||||||
.context("Failed to handle the function call execution")?;
|
.context("Failed to handle the function call execution")?;
|
||||||
let tracing_result = self
|
self.handle_function_call_variable_assignment(step, transaction_hash)
|
||||||
.handle_function_call_call_frame_tracing(execution_receipt.transaction_hash)
|
|
||||||
.await
|
|
||||||
.context("Failed to handle the function call call frame tracing")?;
|
|
||||||
self.handle_function_call_variable_assignment(step, &tracing_result)
|
|
||||||
.await
|
.await
|
||||||
.context("Failed to handle function call variable assignment")?;
|
.context("Failed to handle function call variable assignment")?;
|
||||||
Ok(1)
|
Ok(1)
|
||||||
@@ -321,6 +314,7 @@ where
|
|||||||
|
|
||||||
async fn handle_function_call_contract_deployment(
|
async fn handle_function_call_contract_deployment(
|
||||||
&mut self,
|
&mut self,
|
||||||
|
step_path: &StepPath,
|
||||||
step: &FunctionCallStep,
|
step: &FunctionCallStep,
|
||||||
) -> Result<HashMap<ContractInstance, TransactionReceipt>> {
|
) -> Result<HashMap<ContractInstance, TransactionReceipt>> {
|
||||||
let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new();
|
let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new();
|
||||||
@@ -352,7 +346,13 @@ where
|
|||||||
.await?
|
.await?
|
||||||
};
|
};
|
||||||
if let (_, _, Some(receipt)) = self
|
if let (_, _, Some(receipt)) = self
|
||||||
.get_or_deploy_contract_instance(&instance, caller, calldata, value)
|
.get_or_deploy_contract_instance(
|
||||||
|
&instance,
|
||||||
|
caller,
|
||||||
|
calldata,
|
||||||
|
value,
|
||||||
|
Some(step_path),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.context("Failed to get or deploy contract instance during input execution")?
|
.context("Failed to get or deploy contract instance during input execution")?
|
||||||
{
|
{
|
||||||
@@ -365,20 +365,22 @@ where
|
|||||||
|
|
||||||
async fn handle_function_call_execution(
|
async fn handle_function_call_execution(
|
||||||
&mut self,
|
&mut self,
|
||||||
|
step_path: &StepPath,
|
||||||
step: &FunctionCallStep,
|
step: &FunctionCallStep,
|
||||||
mut deployment_receipts: HashMap<ContractInstance, TransactionReceipt>,
|
mut deployment_receipts: HashMap<ContractInstance, TransactionReceipt>,
|
||||||
) -> Result<TransactionReceipt> {
|
) -> Result<TxHash> {
|
||||||
match step.method {
|
match step.method {
|
||||||
// This step was already executed when `handle_step` was called. We just need to
|
// This step was already executed when `handle_step` was called. We just need to
|
||||||
// lookup the transaction receipt in this case and continue on.
|
// lookup the transaction receipt in this case and continue on.
|
||||||
Method::Deployer => deployment_receipts
|
Method::Deployer => deployment_receipts
|
||||||
.remove(&step.instance)
|
.remove(&step.instance)
|
||||||
.context("Failed to find deployment receipt for constructor call"),
|
.context("Failed to find deployment receipt for constructor call")
|
||||||
|
.map(|receipt| receipt.transaction_hash),
|
||||||
Method::Fallback | Method::FunctionName(_) => {
|
Method::Fallback | Method::FunctionName(_) => {
|
||||||
let tx = step
|
let tx = step
|
||||||
.as_transaction(self.resolver.as_ref(), self.default_resolution_context())
|
.as_transaction(self.resolver.as_ref(), self.default_resolution_context())
|
||||||
.await?;
|
.await?;
|
||||||
self.execute_transaction(tx).await
|
Ok(self.execute_transaction(tx, Some(step_path)).await?.0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -417,15 +419,19 @@ where
|
|||||||
async fn handle_function_call_variable_assignment(
|
async fn handle_function_call_variable_assignment(
|
||||||
&mut self,
|
&mut self,
|
||||||
step: &FunctionCallStep,
|
step: &FunctionCallStep,
|
||||||
tracing_result: &CallFrame,
|
tx_hash: TxHash,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let Some(ref assignments) = step.variable_assignments else {
|
let Some(ref assignments) = step.variable_assignments else {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
|
|
||||||
// Handling the return data variable assignments.
|
// Handling the return data variable assignments.
|
||||||
|
let callframe = OnceCell::new();
|
||||||
for (variable_name, output_word) in assignments.return_data.iter().zip(
|
for (variable_name, output_word) in assignments.return_data.iter().zip(
|
||||||
tracing_result
|
callframe
|
||||||
|
.get_or_try_init(|| self.handle_function_call_call_frame_tracing(tx_hash))
|
||||||
|
.await
|
||||||
|
.context("Failed to get the callframe trace for transaction")?
|
||||||
.output
|
.output
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.unwrap_or_default()
|
.unwrap_or_default()
|
||||||
@@ -446,26 +452,6 @@ where
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id))]
|
|
||||||
pub async fn execute_balance_assertion(
|
|
||||||
&mut self,
|
|
||||||
_: &StepPath,
|
|
||||||
_: &BalanceAssertionStep,
|
|
||||||
) -> anyhow::Result<usize> {
|
|
||||||
// Kept empty intentionally for the benchmark driver.
|
|
||||||
Ok(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id), err(Debug))]
|
|
||||||
async fn execute_storage_empty_assertion_step(
|
|
||||||
&mut self,
|
|
||||||
_: &StepPath,
|
|
||||||
_: &StorageEmptyAssertionStep,
|
|
||||||
) -> Result<usize> {
|
|
||||||
// Kept empty intentionally for the benchmark driver.
|
|
||||||
Ok(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id), err(Debug))]
|
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id), err(Debug))]
|
||||||
async fn execute_repeat_step(
|
async fn execute_repeat_step(
|
||||||
&mut self,
|
&mut self,
|
||||||
@@ -547,7 +533,6 @@ where
|
|||||||
skip_all,
|
skip_all,
|
||||||
fields(
|
fields(
|
||||||
driver_id = self.driver_id,
|
driver_id = self.driver_id,
|
||||||
platform_identifier = %self.platform_information.platform.platform_identifier(),
|
|
||||||
%contract_instance,
|
%contract_instance,
|
||||||
%deployer
|
%deployer
|
||||||
),
|
),
|
||||||
@@ -559,6 +544,7 @@ where
|
|||||||
deployer: Address,
|
deployer: Address,
|
||||||
calldata: Option<&Calldata>,
|
calldata: Option<&Calldata>,
|
||||||
value: Option<EtherValue>,
|
value: Option<EtherValue>,
|
||||||
|
step_path: Option<&StepPath>,
|
||||||
) -> Result<(Address, JsonAbi, Option<TransactionReceipt>)> {
|
) -> Result<(Address, JsonAbi, Option<TransactionReceipt>)> {
|
||||||
if let Some((_, address, abi)) = self
|
if let Some((_, address, abi)) = self
|
||||||
.execution_state
|
.execution_state
|
||||||
@@ -574,7 +560,7 @@ where
|
|||||||
} else {
|
} else {
|
||||||
info!("Contract instance requires deployment.");
|
info!("Contract instance requires deployment.");
|
||||||
let (address, abi, receipt) = self
|
let (address, abi, receipt) = self
|
||||||
.deploy_contract(contract_instance, deployer, calldata, value)
|
.deploy_contract(contract_instance, deployer, calldata, value, step_path)
|
||||||
.await
|
.await
|
||||||
.context("Failed to deploy contract")?;
|
.context("Failed to deploy contract")?;
|
||||||
info!(
|
info!(
|
||||||
@@ -590,7 +576,6 @@ where
|
|||||||
skip_all,
|
skip_all,
|
||||||
fields(
|
fields(
|
||||||
driver_id = self.driver_id,
|
driver_id = self.driver_id,
|
||||||
platform_identifier = %self.platform_information.platform.platform_identifier(),
|
|
||||||
%contract_instance,
|
%contract_instance,
|
||||||
%deployer
|
%deployer
|
||||||
),
|
),
|
||||||
@@ -602,6 +587,7 @@ where
|
|||||||
deployer: Address,
|
deployer: Address,
|
||||||
calldata: Option<&Calldata>,
|
calldata: Option<&Calldata>,
|
||||||
value: Option<EtherValue>,
|
value: Option<EtherValue>,
|
||||||
|
step_path: Option<&StepPath>,
|
||||||
) -> Result<(Address, JsonAbi, TransactionReceipt)> {
|
) -> Result<(Address, JsonAbi, TransactionReceipt)> {
|
||||||
let Some(ContractPathAndIdent {
|
let Some(ContractPathAndIdent {
|
||||||
contract_source_path,
|
contract_source_path,
|
||||||
@@ -660,7 +646,11 @@ where
|
|||||||
TransactionBuilder::<Ethereum>::with_deploy_code(tx, code)
|
TransactionBuilder::<Ethereum>::with_deploy_code(tx, code)
|
||||||
};
|
};
|
||||||
|
|
||||||
let receipt = match self.execute_transaction(tx).await {
|
let receipt = match self
|
||||||
|
.execute_transaction(tx, step_path)
|
||||||
|
.and_then(|(_, receipt_fut)| receipt_fut)
|
||||||
|
.await
|
||||||
|
{
|
||||||
Ok(receipt) => receipt,
|
Ok(receipt) => receipt,
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
tracing::error!(?error, "Contract deployment transaction failed.");
|
tracing::error!(?error, "Contract deployment transaction failed.");
|
||||||
@@ -687,33 +677,6 @@ where
|
|||||||
|
|
||||||
Ok((address, abi, receipt))
|
Ok((address, abi, receipt))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "info", fields(driver_id = self.driver_id), skip_all)]
|
|
||||||
async fn step_address_auto_deployment(
|
|
||||||
&mut self,
|
|
||||||
step_address: &StepAddress,
|
|
||||||
) -> Result<Address> {
|
|
||||||
match step_address {
|
|
||||||
StepAddress::Address(address) => Ok(*address),
|
|
||||||
StepAddress::ResolvableAddress(resolvable) => {
|
|
||||||
let Some(instance) = resolvable
|
|
||||||
.strip_suffix(".address")
|
|
||||||
.map(ContractInstance::new)
|
|
||||||
else {
|
|
||||||
bail!("Not an address variable");
|
|
||||||
};
|
|
||||||
|
|
||||||
self.get_or_deploy_contract_instance(
|
|
||||||
&instance,
|
|
||||||
FunctionCallStep::default_caller_address(),
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map(|v| v.0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// endregion:Contract Deployment
|
// endregion:Contract Deployment
|
||||||
|
|
||||||
// region:Resolution & Resolver
|
// region:Resolution & Resolver
|
||||||
@@ -734,7 +697,8 @@ where
|
|||||||
async fn execute_transaction(
|
async fn execute_transaction(
|
||||||
&self,
|
&self,
|
||||||
transaction: TransactionRequest,
|
transaction: TransactionRequest,
|
||||||
) -> anyhow::Result<TransactionReceipt> {
|
step_path: Option<&StepPath>,
|
||||||
|
) -> anyhow::Result<(TxHash, impl Future<Output = Result<TransactionReceipt>>)> {
|
||||||
let node = self.platform_information.node;
|
let node = self.platform_information.node;
|
||||||
let transaction_hash = node
|
let transaction_hash = node
|
||||||
.submit_transaction(transaction)
|
.submit_transaction(transaction)
|
||||||
@@ -743,10 +707,16 @@ where
|
|||||||
Span::current().record("transaction_hash", display(transaction_hash));
|
Span::current().record("transaction_hash", display(transaction_hash));
|
||||||
|
|
||||||
info!("Submitted transaction");
|
info!("Submitted transaction");
|
||||||
|
if let Some(step_path) = step_path {
|
||||||
self.watcher_tx
|
self.watcher_tx
|
||||||
.send(WatcherEvent::SubmittedTransaction { transaction_hash })
|
.send(WatcherEvent::SubmittedTransaction {
|
||||||
|
transaction_hash,
|
||||||
|
step_path: step_path.clone(),
|
||||||
|
})
|
||||||
.context("Failed to send the transaction hash to the watcher")?;
|
.context("Failed to send the transaction hash to the watcher")?;
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok((transaction_hash, async move {
|
||||||
info!("Starting to poll for transaction receipt");
|
info!("Starting to poll for transaction receipt");
|
||||||
poll(
|
poll(
|
||||||
Duration::from_secs(30 * 60),
|
Duration::from_secs(30 * 60),
|
||||||
@@ -764,7 +734,10 @@ where
|
|||||||
.instrument(info_span!("Polling for receipt"))
|
.instrument(info_span!("Polling for receipt"))
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
.instrument(info_span!("Polling for receipt", %transaction_hash))
|
||||||
.await
|
.await
|
||||||
|
.inspect(|_| info!("Found the transaction receipt"))
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
// endregion:Transaction Execution
|
// endregion:Transaction Execution
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,16 +6,19 @@ use anyhow::Context as _;
|
|||||||
use futures::{FutureExt, StreamExt};
|
use futures::{FutureExt, StreamExt};
|
||||||
use revive_dt_common::types::PrivateKeyAllocator;
|
use revive_dt_common::types::PrivateKeyAllocator;
|
||||||
use revive_dt_core::Platform;
|
use revive_dt_core::Platform;
|
||||||
use revive_dt_format::steps::{Step, StepIdx, StepPath};
|
use revive_dt_format::{
|
||||||
|
corpus::Corpus,
|
||||||
|
steps::{Step, StepIdx, StepPath},
|
||||||
|
};
|
||||||
use tokio::sync::Mutex;
|
use tokio::sync::Mutex;
|
||||||
use tracing::{error, info, info_span, instrument, warn};
|
use tracing::{Instrument, error, info, info_span, instrument, warn};
|
||||||
|
|
||||||
use revive_dt_config::{BenchmarkingContext, Context};
|
use revive_dt_config::{BenchmarkingContext, Context};
|
||||||
use revive_dt_report::Reporter;
|
use revive_dt_report::Reporter;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
differential_benchmarks::{Driver, Watcher, WatcherEvent},
|
differential_benchmarks::{Driver, Watcher, WatcherEvent},
|
||||||
helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream},
|
helpers::{CachedCompiler, NodePool, create_test_definitions_stream},
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Handles the differential testing executing it according to the information defined in the
|
/// Handles the differential testing executing it according to the information defined in the
|
||||||
@@ -39,9 +42,17 @@ pub async fn handle_differential_benchmarks(
|
|||||||
let full_context = Context::Benchmark(Box::new(context.clone()));
|
let full_context = Context::Benchmark(Box::new(context.clone()));
|
||||||
|
|
||||||
// Discover all of the metadata files that are defined in the context.
|
// Discover all of the metadata files that are defined in the context.
|
||||||
let metadata_files = collect_metadata_files(&context)
|
let corpus = context
|
||||||
.context("Failed to collect metadata files for differential testing")?;
|
.corpus_configuration
|
||||||
info!(len = metadata_files.len(), "Discovered metadata files");
|
.test_specifiers
|
||||||
|
.clone()
|
||||||
|
.into_iter()
|
||||||
|
.try_fold(Corpus::default(), Corpus::with_test_specifier)
|
||||||
|
.context("Failed to parse the test corpus")?;
|
||||||
|
info!(
|
||||||
|
len = corpus.metadata_file_count(),
|
||||||
|
"Discovered metadata files"
|
||||||
|
);
|
||||||
|
|
||||||
// Discover the list of platforms that the tests should run on based on the context.
|
// Discover the list of platforms that the tests should run on based on the context.
|
||||||
let platforms = context
|
let platforms = context
|
||||||
@@ -84,8 +95,9 @@ pub async fn handle_differential_benchmarks(
|
|||||||
// Preparing test definitions for the execution.
|
// Preparing test definitions for the execution.
|
||||||
let test_definitions = create_test_definitions_stream(
|
let test_definitions = create_test_definitions_stream(
|
||||||
&full_context,
|
&full_context,
|
||||||
metadata_files.iter(),
|
&corpus,
|
||||||
&platforms_and_nodes,
|
&platforms_and_nodes,
|
||||||
|
None,
|
||||||
reporter.clone(),
|
reporter.clone(),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@@ -133,12 +145,14 @@ pub async fn handle_differential_benchmarks(
|
|||||||
context.wallet_configuration.highest_private_key_exclusive(),
|
context.wallet_configuration.highest_private_key_exclusive(),
|
||||||
)));
|
)));
|
||||||
let (watcher, watcher_tx) = Watcher::new(
|
let (watcher, watcher_tx) = Watcher::new(
|
||||||
platform_identifier,
|
|
||||||
platform_information
|
platform_information
|
||||||
.node
|
.node
|
||||||
.subscribe_to_full_blocks_information()
|
.subscribe_to_full_blocks_information()
|
||||||
.await
|
.await
|
||||||
.context("Failed to subscribe to full blocks information from the node")?,
|
.context("Failed to subscribe to full blocks information from the node")?,
|
||||||
|
test_definition
|
||||||
|
.reporter
|
||||||
|
.execution_specific_reporter(0usize, platform_identifier),
|
||||||
);
|
);
|
||||||
let driver = Driver::new(
|
let driver = Driver::new(
|
||||||
platform_information,
|
platform_information,
|
||||||
@@ -159,7 +173,10 @@ pub async fn handle_differential_benchmarks(
|
|||||||
|
|
||||||
futures::future::try_join(
|
futures::future::try_join(
|
||||||
watcher.run(),
|
watcher.run(),
|
||||||
driver.execute_all().inspect(|_| {
|
driver
|
||||||
|
.execute_all()
|
||||||
|
.instrument(info_span!("Executing Benchmarks", %platform_identifier))
|
||||||
|
.inspect(|_| {
|
||||||
info!("All transactions submitted - driver completed execution");
|
info!("All transactions submitted - driver completed execution");
|
||||||
watcher_tx
|
watcher_tx
|
||||||
.send(WatcherEvent::AllTransactionsSubmitted)
|
.send(WatcherEvent::AllTransactionsSubmitted)
|
||||||
|
|||||||
@@ -1,10 +1,15 @@
|
|||||||
use std::{collections::HashSet, pin::Pin, sync::Arc};
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
|
pin::Pin,
|
||||||
|
sync::Arc,
|
||||||
|
time::{SystemTime, UNIX_EPOCH},
|
||||||
|
};
|
||||||
|
|
||||||
use alloy::primitives::{BlockNumber, TxHash};
|
use alloy::primitives::{BlockNumber, TxHash};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use futures::{Stream, StreamExt};
|
use futures::{Stream, StreamExt};
|
||||||
use revive_dt_common::types::PlatformIdentifier;
|
use revive_dt_format::steps::StepPath;
|
||||||
use revive_dt_node_interaction::MinedBlockInformation;
|
use revive_dt_report::{ExecutionSpecificReporter, MinedBlockInformation, TransactionInformation};
|
||||||
use tokio::sync::{
|
use tokio::sync::{
|
||||||
RwLock,
|
RwLock,
|
||||||
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
|
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
|
||||||
@@ -15,9 +20,6 @@ use tracing::{info, instrument};
|
|||||||
/// and MUST NOT be re-used between workloads since it holds important internal state for a given
|
/// and MUST NOT be re-used between workloads since it holds important internal state for a given
|
||||||
/// workload and is not designed for reuse.
|
/// workload and is not designed for reuse.
|
||||||
pub struct Watcher {
|
pub struct Watcher {
|
||||||
/// The identifier of the platform that this watcher is for.
|
|
||||||
platform_identifier: PlatformIdentifier,
|
|
||||||
|
|
||||||
/// The receive side of the channel that all of the drivers and various other parts of the code
|
/// The receive side of the channel that all of the drivers and various other parts of the code
|
||||||
/// send events to the watcher on.
|
/// send events to the watcher on.
|
||||||
rx: UnboundedReceiver<WatcherEvent>,
|
rx: UnboundedReceiver<WatcherEvent>,
|
||||||
@@ -25,19 +27,22 @@ pub struct Watcher {
|
|||||||
/// This is a stream of the blocks that were mined by the node. This is for a single platform
|
/// This is a stream of the blocks that were mined by the node. This is for a single platform
|
||||||
/// and a single node from that platform.
|
/// and a single node from that platform.
|
||||||
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
||||||
|
|
||||||
|
/// The reporter used to send events to the report aggregator.
|
||||||
|
reporter: ExecutionSpecificReporter,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Watcher {
|
impl Watcher {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
platform_identifier: PlatformIdentifier,
|
|
||||||
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
||||||
|
reporter: ExecutionSpecificReporter,
|
||||||
) -> (Self, UnboundedSender<WatcherEvent>) {
|
) -> (Self, UnboundedSender<WatcherEvent>) {
|
||||||
let (tx, rx) = unbounded_channel::<WatcherEvent>();
|
let (tx, rx) = unbounded_channel::<WatcherEvent>();
|
||||||
(
|
(
|
||||||
Self {
|
Self {
|
||||||
platform_identifier,
|
|
||||||
rx,
|
rx,
|
||||||
blocks_stream,
|
blocks_stream,
|
||||||
|
reporter,
|
||||||
},
|
},
|
||||||
tx,
|
tx,
|
||||||
)
|
)
|
||||||
@@ -61,7 +66,8 @@ impl Watcher {
|
|||||||
// This is the set of the transaction hashes that the watcher should be looking for and
|
// This is the set of the transaction hashes that the watcher should be looking for and
|
||||||
// watch for them in the blocks. The watcher will keep watching for blocks until it sees
|
// watch for them in the blocks. The watcher will keep watching for blocks until it sees
|
||||||
// that all of the transactions that it was watching for has been seen in the mined blocks.
|
// that all of the transactions that it was watching for has been seen in the mined blocks.
|
||||||
let watch_for_transaction_hashes = Arc::new(RwLock::new(HashSet::<TxHash>::new()));
|
let watch_for_transaction_hashes =
|
||||||
|
Arc::new(RwLock::new(HashMap::<TxHash, (StepPath, SystemTime)>::new()));
|
||||||
|
|
||||||
// A boolean that keeps track of whether all of the transactions were submitted or if more
|
// A boolean that keeps track of whether all of the transactions were submitted or if more
|
||||||
// txs are expected to come through the receive side of the channel. We do not want to rely
|
// txs are expected to come through the receive side of the channel. We do not want to rely
|
||||||
@@ -81,11 +87,14 @@ impl Watcher {
|
|||||||
// contain nested repetitions and therefore there's no use in doing any
|
// contain nested repetitions and therefore there's no use in doing any
|
||||||
// action if the repetitions are nested.
|
// action if the repetitions are nested.
|
||||||
WatcherEvent::RepetitionStartEvent { .. } => {}
|
WatcherEvent::RepetitionStartEvent { .. } => {}
|
||||||
WatcherEvent::SubmittedTransaction { transaction_hash } => {
|
WatcherEvent::SubmittedTransaction {
|
||||||
|
transaction_hash,
|
||||||
|
step_path,
|
||||||
|
} => {
|
||||||
watch_for_transaction_hashes
|
watch_for_transaction_hashes
|
||||||
.write()
|
.write()
|
||||||
.await
|
.await
|
||||||
.insert(transaction_hash);
|
.insert(transaction_hash, (step_path, SystemTime::now()));
|
||||||
}
|
}
|
||||||
WatcherEvent::AllTransactionsSubmitted => {
|
WatcherEvent::AllTransactionsSubmitted => {
|
||||||
*all_transactions_submitted.write().await = true;
|
*all_transactions_submitted.write().await = true;
|
||||||
@@ -97,19 +106,32 @@ impl Watcher {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
let reporter = self.reporter.clone();
|
||||||
let block_information_watching_task = {
|
let block_information_watching_task = {
|
||||||
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
|
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
|
||||||
let all_transactions_submitted = all_transactions_submitted.clone();
|
let all_transactions_submitted = all_transactions_submitted.clone();
|
||||||
let mut blocks_information_stream = self.blocks_stream;
|
let mut blocks_information_stream = self.blocks_stream;
|
||||||
async move {
|
async move {
|
||||||
let mut mined_blocks_information = Vec::new();
|
while let Some(mut block) = blocks_information_stream.next().await {
|
||||||
|
|
||||||
while let Some(block) = blocks_information_stream.next().await {
|
|
||||||
// If the block number is equal to or less than the last block before the
|
// If the block number is equal to or less than the last block before the
|
||||||
// repetition then we ignore it and continue on to the next block.
|
// repetition then we ignore it and continue on to the next block.
|
||||||
if block.block_number <= ignore_block_before {
|
if block.ethereum_block_information.block_number <= ignore_block_before {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
{
|
||||||
|
let watch_for_transaction_hashes =
|
||||||
|
watch_for_transaction_hashes.read().await;
|
||||||
|
for tx_hash in block.ethereum_block_information.transaction_hashes.iter() {
|
||||||
|
let Some((step_path, _)) = watch_for_transaction_hashes.get(tx_hash)
|
||||||
|
else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
*block.tx_counts.entry(step_path.clone()).or_default() += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
reporter
|
||||||
|
.report_block_mined_event(block.clone())
|
||||||
|
.expect("Can't fail");
|
||||||
|
|
||||||
if *all_transactions_submitted.read().await
|
if *all_transactions_submitted.read().await
|
||||||
&& watch_for_transaction_hashes.read().await.is_empty()
|
&& watch_for_transaction_hashes.read().await.is_empty()
|
||||||
@@ -118,8 +140,9 @@ impl Watcher {
|
|||||||
}
|
}
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
|
block_number = block.ethereum_block_information.block_number,
|
||||||
|
block_tx_count = block.ethereum_block_information.transaction_hashes.len(),
|
||||||
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
|
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
|
||||||
block_tx_count = block.transaction_hashes.len(),
|
|
||||||
"Observed a block"
|
"Observed a block"
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -127,58 +150,43 @@ impl Watcher {
|
|||||||
// are currently watching for.
|
// are currently watching for.
|
||||||
let mut watch_for_transaction_hashes =
|
let mut watch_for_transaction_hashes =
|
||||||
watch_for_transaction_hashes.write().await;
|
watch_for_transaction_hashes.write().await;
|
||||||
for tx_hash in block.transaction_hashes.iter() {
|
for tx_hash in block.ethereum_block_information.transaction_hashes.iter() {
|
||||||
watch_for_transaction_hashes.remove(tx_hash);
|
let Some((step_path, submission_time)) =
|
||||||
|
watch_for_transaction_hashes.remove(tx_hash)
|
||||||
|
else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let transaction_information = TransactionInformation {
|
||||||
|
transaction_hash: *tx_hash,
|
||||||
|
submission_timestamp: submission_time
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.expect("Can't fail")
|
||||||
|
.as_secs() as _,
|
||||||
|
block_timestamp: block.ethereum_block_information.block_timestamp,
|
||||||
|
block_number: block.ethereum_block_information.block_number,
|
||||||
|
};
|
||||||
|
reporter
|
||||||
|
.report_step_transaction_information_event(
|
||||||
|
step_path,
|
||||||
|
transaction_information,
|
||||||
|
)
|
||||||
|
.expect("Can't fail")
|
||||||
}
|
}
|
||||||
|
|
||||||
mined_blocks_information.push(block);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("Watcher's Block Watching Task Finished");
|
info!("Watcher's Block Watching Task Finished");
|
||||||
mined_blocks_information
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let (_, mined_blocks_information) =
|
let (_, _) =
|
||||||
futures::future::join(watcher_event_watching_task, block_information_watching_task)
|
futures::future::join(watcher_event_watching_task, block_information_watching_task)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
// region:TEMPORARY
|
|
||||||
{
|
|
||||||
// TODO: The following core is TEMPORARY and will be removed once we have proper
|
|
||||||
// reporting in place and then it can be removed. This serves as as way of doing some
|
|
||||||
// very simple reporting for the time being.
|
|
||||||
use std::io::Write;
|
|
||||||
|
|
||||||
let mut stderr = std::io::stderr().lock();
|
|
||||||
writeln!(
|
|
||||||
stderr,
|
|
||||||
"Watcher information for {}",
|
|
||||||
self.platform_identifier
|
|
||||||
)?;
|
|
||||||
writeln!(
|
|
||||||
stderr,
|
|
||||||
"block_number,block_timestamp,mined_gas,block_gas_limit,tx_count"
|
|
||||||
)?;
|
|
||||||
for block in mined_blocks_information {
|
|
||||||
writeln!(
|
|
||||||
stderr,
|
|
||||||
"{},{},{},{},{}",
|
|
||||||
block.block_number,
|
|
||||||
block.block_timestamp,
|
|
||||||
block.mined_gas,
|
|
||||||
block.block_gas_limit,
|
|
||||||
block.transaction_hashes.len()
|
|
||||||
)?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// endregion:TEMPORARY
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
pub enum WatcherEvent {
|
pub enum WatcherEvent {
|
||||||
/// Informs the watcher that it should begin watching for the blocks mined by the platforms.
|
/// Informs the watcher that it should begin watching for the blocks mined by the platforms.
|
||||||
/// Before the watcher receives this event it will not be watching for the mined blocks. The
|
/// Before the watcher receives this event it will not be watching for the mined blocks. The
|
||||||
@@ -192,14 +200,14 @@ pub enum WatcherEvent {
|
|||||||
/// streaming the blocks.
|
/// streaming the blocks.
|
||||||
ignore_block_before: BlockNumber,
|
ignore_block_before: BlockNumber,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// Informs the watcher that a transaction was submitted and that the watcher should watch for a
|
/// Informs the watcher that a transaction was submitted and that the watcher should watch for a
|
||||||
/// transaction with this hash in the blocks that it watches.
|
/// transaction with this hash in the blocks that it watches.
|
||||||
SubmittedTransaction {
|
SubmittedTransaction {
|
||||||
/// The hash of the submitted transaction.
|
/// The hash of the submitted transaction.
|
||||||
transaction_hash: TxHash,
|
transaction_hash: TxHash,
|
||||||
|
/// The step path of the step that the transaction belongs to.
|
||||||
|
step_path: StepPath,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// Informs the watcher that all of the transactions of this benchmark have been submitted and
|
/// Informs the watcher that all of the transactions of this benchmark have been submitted and
|
||||||
/// that it can expect to receive no further transaction hashes and not even watch the channel
|
/// that it can expect to receive no further transaction hashes and not even watch the channel
|
||||||
/// any longer.
|
/// any longer.
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ use revive_dt_format::{
|
|||||||
traits::ResolutionContext,
|
traits::ResolutionContext,
|
||||||
};
|
};
|
||||||
use tokio::sync::Mutex;
|
use tokio::sync::Mutex;
|
||||||
use tracing::{debug, error, info, instrument};
|
use tracing::{error, info, instrument};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
differential_tests::ExecutionState,
|
differential_tests::ExecutionState,
|
||||||
@@ -109,7 +109,6 @@ impl<'a> Driver<'a, StepsIterator> {
|
|||||||
// endregion:Constructors
|
// endregion:Constructors
|
||||||
|
|
||||||
// region:Execution
|
// region:Execution
|
||||||
#[instrument(level = "info", skip_all)]
|
|
||||||
pub async fn execute_all(mut self) -> Result<usize> {
|
pub async fn execute_all(mut self) -> Result<usize> {
|
||||||
let platform_drivers = std::mem::take(&mut self.platform_drivers);
|
let platform_drivers = std::mem::take(&mut self.platform_drivers);
|
||||||
let results = futures::future::try_join_all(
|
let results = futures::future::try_join_all(
|
||||||
@@ -218,8 +217,6 @@ where
|
|||||||
.flatten()
|
.flatten()
|
||||||
.flat_map(|(_, map)| map.values())
|
.flat_map(|(_, map)| map.values())
|
||||||
{
|
{
|
||||||
debug!(%library_instance, "Deploying Library Instance");
|
|
||||||
|
|
||||||
let ContractPathAndIdent {
|
let ContractPathAndIdent {
|
||||||
contract_source_path: library_source_path,
|
contract_source_path: library_source_path,
|
||||||
contract_ident: library_ident,
|
contract_ident: library_ident,
|
||||||
@@ -268,12 +265,6 @@ where
|
|||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
debug!(
|
|
||||||
?library_instance,
|
|
||||||
platform_identifier = %platform_information.platform.platform_identifier(),
|
|
||||||
"Deployed library"
|
|
||||||
);
|
|
||||||
|
|
||||||
let library_address = receipt
|
let library_address = receipt
|
||||||
.contract_address
|
.contract_address
|
||||||
.expect("Failed to deploy the library");
|
.expect("Failed to deploy the library");
|
||||||
@@ -312,7 +303,6 @@ where
|
|||||||
// endregion:Constructors & Initialization
|
// endregion:Constructors & Initialization
|
||||||
|
|
||||||
// region:Step Handling
|
// region:Step Handling
|
||||||
#[instrument(level = "info", skip_all)]
|
|
||||||
pub async fn execute_all(mut self) -> Result<usize> {
|
pub async fn execute_all(mut self) -> Result<usize> {
|
||||||
while let Some(result) = self.execute_next_step().await {
|
while let Some(result) = self.execute_next_step().await {
|
||||||
result?
|
result?
|
||||||
@@ -320,14 +310,6 @@ where
|
|||||||
Ok(self.steps_executed)
|
Ok(self.steps_executed)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(
|
|
||||||
level = "info",
|
|
||||||
skip_all,
|
|
||||||
fields(
|
|
||||||
platform_identifier = %self.platform_information.platform.platform_identifier(),
|
|
||||||
node_id = self.platform_information.node.id(),
|
|
||||||
),
|
|
||||||
)]
|
|
||||||
pub async fn execute_next_step(&mut self) -> Option<Result<()>> {
|
pub async fn execute_next_step(&mut self) -> Option<Result<()>> {
|
||||||
let (step_path, step) = self.steps_iterator.next()?;
|
let (step_path, step) = self.steps_iterator.next()?;
|
||||||
info!(%step_path, "Executing Step");
|
info!(%step_path, "Executing Step");
|
||||||
@@ -344,6 +326,7 @@ where
|
|||||||
skip_all,
|
skip_all,
|
||||||
fields(
|
fields(
|
||||||
platform_identifier = %self.platform_information.platform.platform_identifier(),
|
platform_identifier = %self.platform_information.platform.platform_identifier(),
|
||||||
|
node_id = self.platform_information.node.id(),
|
||||||
%step_path,
|
%step_path,
|
||||||
),
|
),
|
||||||
err(Debug),
|
err(Debug),
|
||||||
@@ -370,7 +353,8 @@ where
|
|||||||
.execute_account_allocation(step_path, step.as_ref())
|
.execute_account_allocation(step_path, step.as_ref())
|
||||||
.await
|
.await
|
||||||
.context("Account Allocation Step Failed"),
|
.context("Account Allocation Step Failed"),
|
||||||
}?;
|
}
|
||||||
|
.context(format!("Failure on step {step_path}"))?;
|
||||||
self.steps_executed += steps_executed;
|
self.steps_executed += steps_executed;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -402,6 +386,7 @@ where
|
|||||||
Ok(1)
|
Ok(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "debug", skip_all)]
|
||||||
async fn handle_function_call_contract_deployment(
|
async fn handle_function_call_contract_deployment(
|
||||||
&mut self,
|
&mut self,
|
||||||
step: &FunctionCallStep,
|
step: &FunctionCallStep,
|
||||||
@@ -447,6 +432,7 @@ where
|
|||||||
Ok(receipts)
|
Ok(receipts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "debug", skip_all)]
|
||||||
async fn handle_function_call_execution(
|
async fn handle_function_call_execution(
|
||||||
&mut self,
|
&mut self,
|
||||||
step: &FunctionCallStep,
|
step: &FunctionCallStep,
|
||||||
@@ -470,14 +456,12 @@ where
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
match self.platform_information.node.execute_transaction(tx).await {
|
self.platform_information.node.execute_transaction(tx).await
|
||||||
Ok(receipt) => Ok(receipt),
|
|
||||||
Err(err) => Err(err),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "debug", skip_all)]
|
||||||
async fn handle_function_call_call_frame_tracing(
|
async fn handle_function_call_call_frame_tracing(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx_hash: TxHash,
|
tx_hash: TxHash,
|
||||||
@@ -509,6 +493,7 @@ where
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "debug", skip_all)]
|
||||||
async fn handle_function_call_variable_assignment(
|
async fn handle_function_call_variable_assignment(
|
||||||
&mut self,
|
&mut self,
|
||||||
step: &FunctionCallStep,
|
step: &FunctionCallStep,
|
||||||
@@ -541,6 +526,7 @@ where
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "debug", skip_all)]
|
||||||
async fn handle_function_call_assertions(
|
async fn handle_function_call_assertions(
|
||||||
&mut self,
|
&mut self,
|
||||||
step: &FunctionCallStep,
|
step: &FunctionCallStep,
|
||||||
@@ -583,6 +569,7 @@ where
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "debug", skip_all)]
|
||||||
async fn handle_function_call_assertion_item(
|
async fn handle_function_call_assertion_item(
|
||||||
&self,
|
&self,
|
||||||
receipt: &TransactionReceipt,
|
receipt: &TransactionReceipt,
|
||||||
@@ -611,15 +598,20 @@ where
|
|||||||
let expected = !assertion.exception;
|
let expected = !assertion.exception;
|
||||||
let actual = receipt.status();
|
let actual = receipt.status();
|
||||||
if actual != expected {
|
if actual != expected {
|
||||||
|
let revert_reason = tracing_result
|
||||||
|
.revert_reason
|
||||||
|
.as_ref()
|
||||||
|
.or(tracing_result.error.as_ref());
|
||||||
tracing::error!(
|
tracing::error!(
|
||||||
expected,
|
expected,
|
||||||
actual,
|
actual,
|
||||||
?receipt,
|
?receipt,
|
||||||
?tracing_result,
|
?tracing_result,
|
||||||
|
?revert_reason,
|
||||||
"Transaction status assertion failed"
|
"Transaction status assertion failed"
|
||||||
);
|
);
|
||||||
anyhow::bail!(
|
anyhow::bail!(
|
||||||
"Transaction status assertion failed - Expected {expected} but got {actual}",
|
"Transaction status assertion failed - Expected {expected} but got {actual}. Revert reason: {revert_reason:?}",
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -865,7 +857,6 @@ where
|
|||||||
level = "info",
|
level = "info",
|
||||||
skip_all,
|
skip_all,
|
||||||
fields(
|
fields(
|
||||||
platform_identifier = %self.platform_information.platform.platform_identifier(),
|
|
||||||
%contract_instance,
|
%contract_instance,
|
||||||
%deployer
|
%deployer
|
||||||
),
|
),
|
||||||
@@ -907,7 +898,6 @@ where
|
|||||||
level = "info",
|
level = "info",
|
||||||
skip_all,
|
skip_all,
|
||||||
fields(
|
fields(
|
||||||
platform_identifier = %self.platform_information.platform.platform_identifier(),
|
|
||||||
%contract_instance,
|
%contract_instance,
|
||||||
%deployer
|
%deployer
|
||||||
),
|
),
|
||||||
|
|||||||
@@ -1,25 +1,27 @@
|
|||||||
//! The main entry point into differential testing.
|
//! The main entry point into differential testing.
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::BTreeMap,
|
collections::{BTreeMap, BTreeSet},
|
||||||
io::{BufWriter, Write, stderr},
|
io::{BufWriter, Write, stderr},
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
time::Instant,
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use ansi_term::{ANSIStrings, Color};
|
||||||
use anyhow::Context as _;
|
use anyhow::Context as _;
|
||||||
use futures::{FutureExt, StreamExt};
|
use futures::{FutureExt, StreamExt};
|
||||||
use revive_dt_common::types::PrivateKeyAllocator;
|
use revive_dt_common::{cached_fs::read_to_string, types::PrivateKeyAllocator};
|
||||||
use revive_dt_core::Platform;
|
use revive_dt_core::Platform;
|
||||||
use tokio::sync::Mutex;
|
use revive_dt_format::corpus::Corpus;
|
||||||
|
use tokio::sync::{Mutex, RwLock, Semaphore};
|
||||||
use tracing::{Instrument, error, info, info_span, instrument};
|
use tracing::{Instrument, error, info, info_span, instrument};
|
||||||
|
|
||||||
use revive_dt_config::{Context, TestExecutionContext};
|
use revive_dt_config::{Context, OutputFormat, TestExecutionContext};
|
||||||
use revive_dt_report::{Reporter, ReporterEvent, TestCaseStatus};
|
use revive_dt_report::{Reporter, ReporterEvent, TestCaseStatus};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
differential_tests::Driver,
|
differential_tests::Driver,
|
||||||
helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream},
|
helpers::{CachedCompiler, NodePool, create_test_definitions_stream},
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Handles the differential testing executing it according to the information defined in the
|
/// Handles the differential testing executing it according to the information defined in the
|
||||||
@@ -32,9 +34,17 @@ pub async fn handle_differential_tests(
|
|||||||
let reporter_clone = reporter.clone();
|
let reporter_clone = reporter.clone();
|
||||||
|
|
||||||
// Discover all of the metadata files that are defined in the context.
|
// Discover all of the metadata files that are defined in the context.
|
||||||
let metadata_files = collect_metadata_files(&context)
|
let corpus = context
|
||||||
.context("Failed to collect metadata files for differential testing")?;
|
.corpus_configuration
|
||||||
info!(len = metadata_files.len(), "Discovered metadata files");
|
.test_specifiers
|
||||||
|
.clone()
|
||||||
|
.into_iter()
|
||||||
|
.try_fold(Corpus::default(), Corpus::with_test_specifier)
|
||||||
|
.context("Failed to parse the test corpus")?;
|
||||||
|
info!(
|
||||||
|
len = corpus.metadata_file_count(),
|
||||||
|
"Discovered metadata files"
|
||||||
|
);
|
||||||
|
|
||||||
// Discover the list of platforms that the tests should run on based on the context.
|
// Discover the list of platforms that the tests should run on based on the context.
|
||||||
let platforms = context
|
let platforms = context
|
||||||
@@ -71,11 +81,20 @@ pub async fn handle_differential_tests(
|
|||||||
info!("Spawned the platform nodes");
|
info!("Spawned the platform nodes");
|
||||||
|
|
||||||
// Preparing test definitions.
|
// Preparing test definitions.
|
||||||
|
let only_execute_failed_tests = match context.ignore_success_configuration.path.as_ref() {
|
||||||
|
Some(path) => {
|
||||||
|
let report = read_to_string(path)
|
||||||
|
.context("Failed to read the report file to ignore the succeeding test cases")?;
|
||||||
|
Some(serde_json::from_str(&report).context("Failed to deserialize report")?)
|
||||||
|
}
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
let full_context = Context::Test(Box::new(context.clone()));
|
let full_context = Context::Test(Box::new(context.clone()));
|
||||||
let test_definitions = create_test_definitions_stream(
|
let test_definitions = create_test_definitions_stream(
|
||||||
&full_context,
|
&full_context,
|
||||||
metadata_files.iter(),
|
&corpus,
|
||||||
&platforms_and_nodes,
|
&platforms_and_nodes,
|
||||||
|
only_execute_failed_tests.as_ref(),
|
||||||
reporter.clone(),
|
reporter.clone(),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@@ -101,19 +120,39 @@ pub async fn handle_differential_tests(
|
|||||||
)));
|
)));
|
||||||
|
|
||||||
// Creating the driver and executing all of the steps.
|
// Creating the driver and executing all of the steps.
|
||||||
let driver_task = futures::future::join_all(test_definitions.iter().map(|test_definition| {
|
let semaphore = context
|
||||||
|
.concurrency_configuration
|
||||||
|
.concurrency_limit()
|
||||||
|
.map(Semaphore::new)
|
||||||
|
.map(Arc::new);
|
||||||
|
let running_task_list = Arc::new(RwLock::new(BTreeSet::<usize>::new()));
|
||||||
|
let driver_task = futures::future::join_all(test_definitions.iter().enumerate().map(
|
||||||
|
|(test_id, test_definition)| {
|
||||||
|
let running_task_list = running_task_list.clone();
|
||||||
|
let semaphore = semaphore.clone();
|
||||||
|
|
||||||
let private_key_allocator = private_key_allocator.clone();
|
let private_key_allocator = private_key_allocator.clone();
|
||||||
let cached_compiler = cached_compiler.clone();
|
let cached_compiler = cached_compiler.clone();
|
||||||
let mode = test_definition.mode.clone();
|
let mode = test_definition.mode.clone();
|
||||||
let span = info_span!(
|
let span = info_span!(
|
||||||
"Executing Test Case",
|
"Executing Test Case",
|
||||||
|
test_id,
|
||||||
metadata_file_path = %test_definition.metadata_file_path.display(),
|
metadata_file_path = %test_definition.metadata_file_path.display(),
|
||||||
case_idx = %test_definition.case_idx,
|
case_idx = %test_definition.case_idx,
|
||||||
mode = %mode
|
mode = %mode,
|
||||||
);
|
);
|
||||||
async move {
|
async move {
|
||||||
let driver =
|
let permit = match semaphore.as_ref() {
|
||||||
match Driver::new_root(test_definition, private_key_allocator, &cached_compiler)
|
Some(semaphore) => Some(semaphore.acquire().await.expect("Can't fail")),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
running_task_list.write().await.insert(test_id);
|
||||||
|
let driver = match Driver::new_root(
|
||||||
|
test_definition,
|
||||||
|
private_key_allocator,
|
||||||
|
&cached_compiler,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(driver) => driver,
|
Ok(driver) => driver,
|
||||||
@@ -123,6 +162,8 @@ pub async fn handle_differential_tests(
|
|||||||
.report_test_failed_event(format!("{error:#}"))
|
.report_test_failed_event(format!("{error:#}"))
|
||||||
.expect("Can't fail");
|
.expect("Can't fail");
|
||||||
error!("Test Case Failed");
|
error!("Test Case Failed");
|
||||||
|
drop(permit);
|
||||||
|
running_task_list.write().await.remove(&test_id);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -141,17 +182,33 @@ pub async fn handle_differential_tests(
|
|||||||
error!("Test Case Failed");
|
error!("Test Case Failed");
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
info!("Finished the execution of the test case")
|
info!("Finished the execution of the test case");
|
||||||
|
drop(permit);
|
||||||
|
running_task_list.write().await.remove(&test_id);
|
||||||
}
|
}
|
||||||
.instrument(span)
|
.instrument(span)
|
||||||
}))
|
},
|
||||||
|
))
|
||||||
.inspect(|_| {
|
.inspect(|_| {
|
||||||
info!("Finished executing all test cases");
|
info!("Finished executing all test cases");
|
||||||
reporter_clone
|
reporter_clone
|
||||||
.report_completion_event()
|
.report_completion_event()
|
||||||
.expect("Can't fail")
|
.expect("Can't fail")
|
||||||
});
|
});
|
||||||
let cli_reporting_task = start_cli_reporting_task(reporter);
|
let cli_reporting_task = start_cli_reporting_task(context.output_format, reporter);
|
||||||
|
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
loop {
|
||||||
|
let remaining_tasks = running_task_list.read().await;
|
||||||
|
info!(
|
||||||
|
count = remaining_tasks.len(),
|
||||||
|
?remaining_tasks,
|
||||||
|
"Remaining Tests"
|
||||||
|
);
|
||||||
|
drop(remaining_tasks);
|
||||||
|
tokio::time::sleep(Duration::from_secs(10)).await
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
futures::future::join(driver_task, cli_reporting_task).await;
|
futures::future::join(driver_task, cli_reporting_task).await;
|
||||||
|
|
||||||
@@ -159,21 +216,15 @@ pub async fn handle_differential_tests(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
|
#[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
|
||||||
async fn start_cli_reporting_task(reporter: Reporter) {
|
async fn start_cli_reporting_task(output_format: OutputFormat, reporter: Reporter) {
|
||||||
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
|
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
|
||||||
drop(reporter);
|
drop(reporter);
|
||||||
|
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
|
|
||||||
const GREEN: &str = "\x1B[32m";
|
let mut global_success_count = 0;
|
||||||
const RED: &str = "\x1B[31m";
|
let mut global_failure_count = 0;
|
||||||
const GREY: &str = "\x1B[90m";
|
let mut global_ignore_count = 0;
|
||||||
const COLOR_RESET: &str = "\x1B[0m";
|
|
||||||
const BOLD: &str = "\x1B[1m";
|
|
||||||
const BOLD_RESET: &str = "\x1B[22m";
|
|
||||||
|
|
||||||
let mut number_of_successes = 0;
|
|
||||||
let mut number_of_failures = 0;
|
|
||||||
|
|
||||||
let mut buf = BufWriter::new(stderr());
|
let mut buf = BufWriter::new(stderr());
|
||||||
while let Ok(event) = aggregator_events_rx.recv().await {
|
while let Ok(event) = aggregator_events_rx.recv().await {
|
||||||
@@ -186,55 +237,135 @@ async fn start_cli_reporting_task(reporter: Reporter) {
|
|||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
match output_format {
|
||||||
|
OutputFormat::Legacy => {
|
||||||
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
|
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
|
||||||
for (case_idx, case_status) in case_status.into_iter() {
|
for (case_idx, case_status) in case_status.into_iter() {
|
||||||
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
|
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
|
||||||
let _ = match case_status {
|
let _ = match case_status {
|
||||||
TestCaseStatus::Succeeded { steps_executed } => {
|
TestCaseStatus::Succeeded { steps_executed } => {
|
||||||
number_of_successes += 1;
|
global_success_count += 1;
|
||||||
writeln!(
|
writeln!(
|
||||||
buf,
|
buf,
|
||||||
"{}{}Case Succeeded{} - Steps Executed: {}{}",
|
"{}",
|
||||||
GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET
|
ANSIStrings(&[
|
||||||
|
Color::Green.bold().paint("Case Succeeded"),
|
||||||
|
Color::Green
|
||||||
|
.paint(format!(" - Steps Executed: {steps_executed}")),
|
||||||
|
])
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
TestCaseStatus::Failed { reason } => {
|
TestCaseStatus::Failed { reason } => {
|
||||||
number_of_failures += 1;
|
global_failure_count += 1;
|
||||||
writeln!(
|
writeln!(
|
||||||
buf,
|
buf,
|
||||||
"{}{}Case Failed{} - Reason: {}{}",
|
"{}",
|
||||||
RED,
|
ANSIStrings(&[
|
||||||
BOLD,
|
Color::Red.bold().paint("Case Failed"),
|
||||||
BOLD_RESET,
|
Color::Red.paint(format!(" - Reason: {}", reason.trim())),
|
||||||
reason.trim(),
|
])
|
||||||
COLOR_RESET,
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
TestCaseStatus::Ignored { reason, .. } => writeln!(
|
TestCaseStatus::Ignored { reason, .. } => {
|
||||||
|
global_ignore_count += 1;
|
||||||
|
writeln!(
|
||||||
buf,
|
buf,
|
||||||
"{}{}Case Ignored{} - Reason: {}{}",
|
"{}",
|
||||||
GREY,
|
ANSIStrings(&[
|
||||||
BOLD,
|
Color::Yellow.bold().paint("Case Ignored"),
|
||||||
BOLD_RESET,
|
Color::Yellow.paint(format!(" - Reason: {}", reason.trim())),
|
||||||
reason.trim(),
|
])
|
||||||
COLOR_RESET,
|
)
|
||||||
),
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
let _ = writeln!(buf);
|
let _ = writeln!(buf);
|
||||||
}
|
}
|
||||||
|
OutputFormat::CargoTestLike => {
|
||||||
|
writeln!(
|
||||||
|
buf,
|
||||||
|
"\t{} {} - {}\n",
|
||||||
|
Color::Green.paint("Running"),
|
||||||
|
metadata_file_path.display(),
|
||||||
|
mode
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut success_count = 0;
|
||||||
|
let mut failure_count = 0;
|
||||||
|
let mut ignored_count = 0;
|
||||||
|
writeln!(buf, "running {} tests", case_status.len()).unwrap();
|
||||||
|
for (case_idx, case_result) in case_status.iter() {
|
||||||
|
let status = match case_result {
|
||||||
|
TestCaseStatus::Succeeded { .. } => {
|
||||||
|
success_count += 1;
|
||||||
|
global_success_count += 1;
|
||||||
|
Color::Green.paint("ok")
|
||||||
|
}
|
||||||
|
TestCaseStatus::Failed { reason } => {
|
||||||
|
failure_count += 1;
|
||||||
|
global_failure_count += 1;
|
||||||
|
Color::Red.paint(format!("FAILED, {reason}"))
|
||||||
|
}
|
||||||
|
TestCaseStatus::Ignored { reason, .. } => {
|
||||||
|
ignored_count += 1;
|
||||||
|
global_ignore_count += 1;
|
||||||
|
Color::Yellow.paint(format!("ignored, {reason:?}"))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
writeln!(buf, "test case_idx_{} ... {}", case_idx, status).unwrap();
|
||||||
|
}
|
||||||
|
writeln!(buf).unwrap();
|
||||||
|
|
||||||
|
let status = if failure_count > 0 {
|
||||||
|
Color::Red.paint("FAILED")
|
||||||
|
} else {
|
||||||
|
Color::Green.paint("ok")
|
||||||
|
};
|
||||||
|
writeln!(
|
||||||
|
buf,
|
||||||
|
"test result: {}. {} passed; {} failed; {} ignored",
|
||||||
|
status, success_count, failure_count, ignored_count,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
writeln!(buf).unwrap();
|
||||||
|
|
||||||
|
if aggregator_events_rx.is_empty() {
|
||||||
|
buf = tokio::task::spawn_blocking(move || {
|
||||||
|
buf.flush().unwrap();
|
||||||
|
buf
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info!("Aggregator Broadcast Channel Closed");
|
||||||
|
|
||||||
// Summary at the end.
|
// Summary at the end.
|
||||||
let _ = writeln!(
|
match output_format {
|
||||||
|
OutputFormat::Legacy => {
|
||||||
|
writeln!(
|
||||||
buf,
|
buf,
|
||||||
"{} cases: {}{}{} cases succeeded, {}{}{} cases failed in {} seconds",
|
"{} cases: {} cases succeeded, {} cases failed in {} seconds",
|
||||||
number_of_successes + number_of_failures,
|
global_success_count + global_failure_count + global_ignore_count,
|
||||||
GREEN,
|
Color::Green.paint(global_success_count.to_string()),
|
||||||
number_of_successes,
|
Color::Red.paint(global_failure_count.to_string()),
|
||||||
COLOR_RESET,
|
|
||||||
RED,
|
|
||||||
number_of_failures,
|
|
||||||
COLOR_RESET,
|
|
||||||
start.elapsed().as_secs()
|
start.elapsed().as_secs()
|
||||||
);
|
)
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
OutputFormat::CargoTestLike => {
|
||||||
|
writeln!(
|
||||||
|
buf,
|
||||||
|
"run finished. {} passed; {} failed; {} ignored; finished in {}s",
|
||||||
|
global_success_count,
|
||||||
|
global_failure_count,
|
||||||
|
global_ignore_count,
|
||||||
|
start.elapsed().as_secs()
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -201,7 +201,7 @@ async fn compile_contracts(
|
|||||||
// Puts a limit on how many compilations we can perform at any given instance which helps us
|
// Puts a limit on how many compilations we can perform at any given instance which helps us
|
||||||
// with some of the errors we've been seeing with high concurrency on MacOS (we have not tried
|
// with some of the errors we've been seeing with high concurrency on MacOS (we have not tried
|
||||||
// it on Linux so we don't know if these issues also persist there or not.)
|
// it on Linux so we don't know if these issues also persist there or not.)
|
||||||
static SPAWN_GATE: LazyLock<Semaphore> = LazyLock::new(|| Semaphore::new(100));
|
static SPAWN_GATE: LazyLock<Semaphore> = LazyLock::new(|| Semaphore::new(5));
|
||||||
let _permit = SPAWN_GATE.acquire().await?;
|
let _permit = SPAWN_GATE.acquire().await?;
|
||||||
|
|
||||||
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
|
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
|
||||||
@@ -325,26 +325,6 @@ impl ArtifactsCache {
|
|||||||
let value = bson::from_slice::<CacheValue>(&value).ok()?;
|
let value = bson::from_slice::<CacheValue>(&value).ok()?;
|
||||||
Some(value)
|
Some(value)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "debug", skip_all, err)]
|
|
||||||
pub async fn get_or_insert_with(
|
|
||||||
&self,
|
|
||||||
key: &CacheKey<'_>,
|
|
||||||
callback: impl AsyncFnOnce() -> Result<CacheValue>,
|
|
||||||
) -> Result<CacheValue> {
|
|
||||||
match self.get(key).await {
|
|
||||||
Some(value) => {
|
|
||||||
debug!("Cache hit");
|
|
||||||
Ok(value)
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
debug!("Cache miss");
|
|
||||||
let value = callback().await?;
|
|
||||||
self.insert(key, &value).await?;
|
|
||||||
Ok(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)]
|
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)]
|
||||||
|
|||||||
@@ -1,33 +0,0 @@
|
|||||||
use revive_dt_config::CorpusConfiguration;
|
|
||||||
use revive_dt_format::{corpus::Corpus, metadata::MetadataFile};
|
|
||||||
use tracing::{info, info_span, instrument};
|
|
||||||
|
|
||||||
/// Given an object that implements [`AsRef<CorpusConfiguration>`], this function finds all of the
|
|
||||||
/// corpus files and produces a map containing all of the [`MetadataFile`]s discovered.
|
|
||||||
#[instrument(level = "debug", name = "Collecting Corpora", skip_all)]
|
|
||||||
pub fn collect_metadata_files(
|
|
||||||
context: impl AsRef<CorpusConfiguration>,
|
|
||||||
) -> anyhow::Result<Vec<MetadataFile>> {
|
|
||||||
let mut metadata_files = Vec::new();
|
|
||||||
|
|
||||||
let corpus_configuration = AsRef::<CorpusConfiguration>::as_ref(&context);
|
|
||||||
for path in &corpus_configuration.paths {
|
|
||||||
let span = info_span!("Processing corpus file", path = %path.display());
|
|
||||||
let _guard = span.enter();
|
|
||||||
|
|
||||||
let corpus = Corpus::try_from_path(path)?;
|
|
||||||
info!(
|
|
||||||
name = corpus.name(),
|
|
||||||
number_of_contained_paths = corpus.path_count(),
|
|
||||||
"Deserialized corpus file"
|
|
||||||
);
|
|
||||||
metadata_files.extend(corpus.enumerate_tests());
|
|
||||||
}
|
|
||||||
|
|
||||||
// There's a possibility that there are certain paths that all lead to the same metadata files
|
|
||||||
// and therefore it's important that we sort them and then deduplicate them.
|
|
||||||
metadata_files.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
|
|
||||||
metadata_files.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
|
|
||||||
|
|
||||||
Ok(metadata_files)
|
|
||||||
}
|
|
||||||
@@ -1,9 +1,7 @@
|
|||||||
mod cached_compiler;
|
mod cached_compiler;
|
||||||
mod metadata;
|
|
||||||
mod pool;
|
mod pool;
|
||||||
mod test;
|
mod test;
|
||||||
|
|
||||||
pub use cached_compiler::*;
|
pub use cached_compiler::*;
|
||||||
pub use metadata::*;
|
|
||||||
pub use pool::*;
|
pub use pool::*;
|
||||||
pub use test::*;
|
pub use test::*;
|
||||||
|
|||||||
@@ -4,10 +4,9 @@ use std::{borrow::Cow, path::Path};
|
|||||||
|
|
||||||
use futures::{Stream, StreamExt, stream};
|
use futures::{Stream, StreamExt, stream};
|
||||||
use indexmap::{IndexMap, indexmap};
|
use indexmap::{IndexMap, indexmap};
|
||||||
use revive_dt_common::iterators::EitherIter;
|
|
||||||
use revive_dt_common::types::PlatformIdentifier;
|
use revive_dt_common::types::PlatformIdentifier;
|
||||||
use revive_dt_config::Context;
|
use revive_dt_config::Context;
|
||||||
use revive_dt_format::mode::ParsedMode;
|
use revive_dt_format::corpus::Corpus;
|
||||||
use serde_json::{Value, json};
|
use serde_json::{Value, json};
|
||||||
|
|
||||||
use revive_dt_compiler::Mode;
|
use revive_dt_compiler::Mode;
|
||||||
@@ -17,7 +16,7 @@ use revive_dt_format::{
|
|||||||
metadata::MetadataFile,
|
metadata::MetadataFile,
|
||||||
};
|
};
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
use revive_dt_report::{ExecutionSpecificReporter, Reporter};
|
use revive_dt_report::{ExecutionSpecificReporter, Report, Reporter, TestCaseStatus};
|
||||||
use revive_dt_report::{TestSpecificReporter, TestSpecifier};
|
use revive_dt_report::{TestSpecificReporter, TestSpecifier};
|
||||||
use tracing::{debug, error, info};
|
use tracing::{debug, error, info};
|
||||||
|
|
||||||
@@ -28,34 +27,26 @@ pub async fn create_test_definitions_stream<'a>(
|
|||||||
// This is only required for creating the compiler objects and is not used anywhere else in the
|
// This is only required for creating the compiler objects and is not used anywhere else in the
|
||||||
// function.
|
// function.
|
||||||
context: &Context,
|
context: &Context,
|
||||||
metadata_files: impl IntoIterator<Item = &'a MetadataFile>,
|
corpus: &'a Corpus,
|
||||||
platforms_and_nodes: &'a BTreeMap<PlatformIdentifier, (&dyn Platform, NodePool)>,
|
platforms_and_nodes: &'a BTreeMap<PlatformIdentifier, (&dyn Platform, NodePool)>,
|
||||||
|
only_execute_failed_tests: Option<&Report>,
|
||||||
reporter: Reporter,
|
reporter: Reporter,
|
||||||
) -> impl Stream<Item = TestDefinition<'a>> {
|
) -> impl Stream<Item = TestDefinition<'a>> {
|
||||||
|
let cloned_reporter = reporter.clone();
|
||||||
stream::iter(
|
stream::iter(
|
||||||
metadata_files
|
corpus
|
||||||
.into_iter()
|
.cases_iterator()
|
||||||
// Flatten over the cases.
|
.inspect(move |(metadata_file, ..)| {
|
||||||
.flat_map(|metadata_file| {
|
cloned_reporter
|
||||||
metadata_file
|
.report_metadata_file_discovery_event(
|
||||||
.cases
|
metadata_file.metadata_file_path.clone(),
|
||||||
.iter()
|
metadata_file.content.clone(),
|
||||||
.enumerate()
|
)
|
||||||
.map(move |(case_idx, case)| (metadata_file, case_idx, case))
|
.unwrap();
|
||||||
})
|
})
|
||||||
// Flatten over the modes, prefer the case modes over the metadata file modes.
|
.map(move |(metadata_file, case_idx, case, mode)| {
|
||||||
.flat_map(move |(metadata_file, case_idx, case)| {
|
|
||||||
let reporter = reporter.clone();
|
let reporter = reporter.clone();
|
||||||
|
|
||||||
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
|
|
||||||
let modes = match modes {
|
|
||||||
Some(modes) => EitherIter::A(
|
|
||||||
ParsedMode::many_to_modes(modes.iter()).map(Cow::<'static, _>::Owned),
|
|
||||||
),
|
|
||||||
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
|
|
||||||
};
|
|
||||||
|
|
||||||
modes.into_iter().map(move |mode| {
|
|
||||||
(
|
(
|
||||||
metadata_file,
|
metadata_file,
|
||||||
case_idx,
|
case_idx,
|
||||||
@@ -68,7 +59,6 @@ pub async fn create_test_definitions_stream<'a>(
|
|||||||
})),
|
})),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
})
|
|
||||||
// Inform the reporter of each one of the test cases that were discovered which we expect to
|
// Inform the reporter of each one of the test cases that were discovered which we expect to
|
||||||
// run.
|
// run.
|
||||||
.inspect(|(_, _, _, _, reporter)| {
|
.inspect(|(_, _, _, _, reporter)| {
|
||||||
@@ -140,7 +130,7 @@ pub async fn create_test_definitions_stream<'a>(
|
|||||||
)
|
)
|
||||||
// Filter out the test cases which are incompatible or that can't run in the current setup.
|
// Filter out the test cases which are incompatible or that can't run in the current setup.
|
||||||
.filter_map(move |test| async move {
|
.filter_map(move |test| async move {
|
||||||
match test.check_compatibility() {
|
match test.check_compatibility(only_execute_failed_tests) {
|
||||||
Ok(()) => Some(test),
|
Ok(()) => Some(test),
|
||||||
Err((reason, additional_information)) => {
|
Err((reason, additional_information)) => {
|
||||||
debug!(
|
debug!(
|
||||||
@@ -200,12 +190,16 @@ pub struct TestDefinition<'a> {
|
|||||||
|
|
||||||
impl<'a> TestDefinition<'a> {
|
impl<'a> TestDefinition<'a> {
|
||||||
/// Checks if this test can be ran with the current configuration.
|
/// Checks if this test can be ran with the current configuration.
|
||||||
pub fn check_compatibility(&self) -> TestCheckFunctionResult {
|
pub fn check_compatibility(
|
||||||
|
&self,
|
||||||
|
only_execute_failed_tests: Option<&Report>,
|
||||||
|
) -> TestCheckFunctionResult {
|
||||||
self.check_metadata_file_ignored()?;
|
self.check_metadata_file_ignored()?;
|
||||||
self.check_case_file_ignored()?;
|
self.check_case_file_ignored()?;
|
||||||
self.check_target_compatibility()?;
|
self.check_target_compatibility()?;
|
||||||
self.check_evm_version_compatibility()?;
|
self.check_evm_version_compatibility()?;
|
||||||
self.check_compiler_compatibility()?;
|
self.check_compiler_compatibility()?;
|
||||||
|
self.check_ignore_succeeded(only_execute_failed_tests)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -313,6 +307,36 @@ impl<'a> TestDefinition<'a> {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Checks if the test case should be executed or not based on the passed report and whether the
|
||||||
|
/// user has instructed the tool to ignore the already succeeding test cases.
|
||||||
|
fn check_ignore_succeeded(
|
||||||
|
&self,
|
||||||
|
only_execute_failed_tests: Option<&Report>,
|
||||||
|
) -> TestCheckFunctionResult {
|
||||||
|
let Some(report) = only_execute_failed_tests else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
let test_case_status = report
|
||||||
|
.execution_information
|
||||||
|
.get(&(self.metadata_file_path.to_path_buf().into()))
|
||||||
|
.and_then(|obj| obj.case_reports.get(&self.case_idx))
|
||||||
|
.and_then(|obj| obj.mode_execution_reports.get(&self.mode))
|
||||||
|
.and_then(|obj| obj.status.as_ref());
|
||||||
|
|
||||||
|
match test_case_status {
|
||||||
|
Some(TestCaseStatus::Failed { .. }) => Ok(()),
|
||||||
|
Some(TestCaseStatus::Ignored { .. }) => Err((
|
||||||
|
"Ignored since it was ignored in a previous run",
|
||||||
|
indexmap! {},
|
||||||
|
)),
|
||||||
|
Some(TestCaseStatus::Succeeded { .. }) => {
|
||||||
|
Err(("Ignored since it succeeded in a prior run", indexmap! {}))
|
||||||
|
}
|
||||||
|
None => Ok(()),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct TestPlatformInformation<'a> {
|
pub struct TestPlatformInformation<'a> {
|
||||||
|
|||||||
+179
-120
@@ -16,7 +16,7 @@ use revive_dt_config::*;
|
|||||||
use revive_dt_node::{
|
use revive_dt_node::{
|
||||||
Node, node_implementations::geth::GethNode,
|
Node, node_implementations::geth::GethNode,
|
||||||
node_implementations::lighthouse_geth::LighthouseGethNode,
|
node_implementations::lighthouse_geth::LighthouseGethNode,
|
||||||
node_implementations::substrate::SubstrateNode,
|
node_implementations::substrate::SubstrateNode, node_implementations::zombienet::ZombienetNode,
|
||||||
};
|
};
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
@@ -59,6 +59,9 @@ pub trait Platform {
|
|||||||
context: Context,
|
context: Context,
|
||||||
version: Option<VersionOrRequirement>,
|
version: Option<VersionOrRequirement>,
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>>;
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>>;
|
||||||
|
|
||||||
|
/// Exports the genesis/chainspec for the node.
|
||||||
|
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value>;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
@@ -104,6 +107,15 @@ impl Platform for GethEvmSolcPlatform {
|
|||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
||||||
|
let genesis = AsRef::<GenesisConfiguration>::as_ref(&context).genesis()?;
|
||||||
|
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||||
|
|
||||||
|
let node_genesis = GethNode::node_genesis(genesis.clone(), &wallet);
|
||||||
|
serde_json::to_value(node_genesis)
|
||||||
|
.context("Failed to convert node genesis to a serde_value")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
@@ -149,109 +161,14 @@ impl Platform for LighthouseGethEvmSolcPlatform {
|
|||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
||||||
pub struct KitchensinkPolkavmResolcPlatform;
|
let genesis = AsRef::<GenesisConfiguration>::as_ref(&context).genesis()?;
|
||||||
|
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||||
|
|
||||||
impl Platform for KitchensinkPolkavmResolcPlatform {
|
let node_genesis = LighthouseGethNode::node_genesis(genesis.clone(), &wallet);
|
||||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
serde_json::to_value(node_genesis)
|
||||||
PlatformIdentifier::KitchensinkPolkavmResolc
|
.context("Failed to convert node genesis to a serde_value")
|
||||||
}
|
|
||||||
|
|
||||||
fn node_identifier(&self) -> NodeIdentifier {
|
|
||||||
NodeIdentifier::Kitchensink
|
|
||||||
}
|
|
||||||
|
|
||||||
fn vm_identifier(&self) -> VmIdentifier {
|
|
||||||
VmIdentifier::PolkaVM
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
|
||||||
CompilerIdentifier::Resolc
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_node(
|
|
||||||
&self,
|
|
||||||
context: Context,
|
|
||||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
|
||||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
|
||||||
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context)
|
|
||||||
.path
|
|
||||||
.clone();
|
|
||||||
let genesis = genesis_configuration.genesis()?.clone();
|
|
||||||
Ok(thread::spawn(move || {
|
|
||||||
let node = SubstrateNode::new(
|
|
||||||
kitchensink_path,
|
|
||||||
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
|
|
||||||
context,
|
|
||||||
);
|
|
||||||
let node = spawn_node(node, genesis)?;
|
|
||||||
Ok(Box::new(node) as Box<_>)
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_compiler(
|
|
||||||
&self,
|
|
||||||
context: Context,
|
|
||||||
version: Option<VersionOrRequirement>,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
let compiler = Resolc::new(context, version).await;
|
|
||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
|
||||||
pub struct KitchensinkRevmSolcPlatform;
|
|
||||||
|
|
||||||
impl Platform for KitchensinkRevmSolcPlatform {
|
|
||||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
|
||||||
PlatformIdentifier::KitchensinkRevmSolc
|
|
||||||
}
|
|
||||||
|
|
||||||
fn node_identifier(&self) -> NodeIdentifier {
|
|
||||||
NodeIdentifier::Kitchensink
|
|
||||||
}
|
|
||||||
|
|
||||||
fn vm_identifier(&self) -> VmIdentifier {
|
|
||||||
VmIdentifier::Evm
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
|
||||||
CompilerIdentifier::Solc
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_node(
|
|
||||||
&self,
|
|
||||||
context: Context,
|
|
||||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
|
||||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
|
||||||
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context)
|
|
||||||
.path
|
|
||||||
.clone();
|
|
||||||
let genesis = genesis_configuration.genesis()?.clone();
|
|
||||||
Ok(thread::spawn(move || {
|
|
||||||
let node = SubstrateNode::new(
|
|
||||||
kitchensink_path,
|
|
||||||
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
|
|
||||||
context,
|
|
||||||
);
|
|
||||||
let node = spawn_node(node, genesis)?;
|
|
||||||
Ok(Box::new(node) as Box<_>)
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_compiler(
|
|
||||||
&self,
|
|
||||||
context: Context,
|
|
||||||
version: Option<VersionOrRequirement>,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
let compiler = Solc::new(context, version).await;
|
|
||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -280,15 +197,21 @@ impl Platform for ReviveDevNodePolkavmResolcPlatform {
|
|||||||
context: Context,
|
context: Context,
|
||||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
|
let revive_dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
|
||||||
.path
|
|
||||||
.clone();
|
let revive_dev_node_path = revive_dev_node_configuration.path.clone();
|
||||||
|
let revive_dev_node_consensus = revive_dev_node_configuration.consensus.clone();
|
||||||
|
|
||||||
|
let eth_rpc_connection_strings = revive_dev_node_configuration.existing_rpc_url.clone();
|
||||||
|
|
||||||
let genesis = genesis_configuration.genesis()?.clone();
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
Ok(thread::spawn(move || {
|
Ok(thread::spawn(move || {
|
||||||
let node = SubstrateNode::new(
|
let node = SubstrateNode::new(
|
||||||
revive_dev_node_path,
|
revive_dev_node_path,
|
||||||
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
||||||
|
Some(revive_dev_node_consensus),
|
||||||
context,
|
context,
|
||||||
|
ð_rpc_connection_strings,
|
||||||
);
|
);
|
||||||
let node = spawn_node(node, genesis)?;
|
let node = spawn_node(node, genesis)?;
|
||||||
Ok(Box::new(node) as Box<_>)
|
Ok(Box::new(node) as Box<_>)
|
||||||
@@ -305,6 +228,16 @@ impl Platform for ReviveDevNodePolkavmResolcPlatform {
|
|||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
||||||
|
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
|
.as_path();
|
||||||
|
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||||
|
let export_chainspec_command = SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND;
|
||||||
|
|
||||||
|
SubstrateNode::node_genesis(revive_dev_node_path, export_chainspec_command, &wallet)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
@@ -332,15 +265,21 @@ impl Platform for ReviveDevNodeRevmSolcPlatform {
|
|||||||
context: Context,
|
context: Context,
|
||||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
|
let revive_dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
|
||||||
.path
|
|
||||||
.clone();
|
let revive_dev_node_path = revive_dev_node_configuration.path.clone();
|
||||||
|
let revive_dev_node_consensus = revive_dev_node_configuration.consensus.clone();
|
||||||
|
|
||||||
|
let eth_rpc_connection_strings = revive_dev_node_configuration.existing_rpc_url.clone();
|
||||||
|
|
||||||
let genesis = genesis_configuration.genesis()?.clone();
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
Ok(thread::spawn(move || {
|
Ok(thread::spawn(move || {
|
||||||
let node = SubstrateNode::new(
|
let node = SubstrateNode::new(
|
||||||
revive_dev_node_path,
|
revive_dev_node_path,
|
||||||
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
||||||
|
Some(revive_dev_node_consensus),
|
||||||
context,
|
context,
|
||||||
|
ð_rpc_connection_strings,
|
||||||
);
|
);
|
||||||
let node = spawn_node(node, genesis)?;
|
let node = spawn_node(node, genesis)?;
|
||||||
Ok(Box::new(node) as Box<_>)
|
Ok(Box::new(node) as Box<_>)
|
||||||
@@ -357,6 +296,130 @@ impl Platform for ReviveDevNodeRevmSolcPlatform {
|
|||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
||||||
|
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
|
.as_path();
|
||||||
|
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||||
|
let export_chainspec_command = SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND;
|
||||||
|
|
||||||
|
SubstrateNode::node_genesis(revive_dev_node_path, export_chainspec_command, &wallet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
|
pub struct ZombienetPolkavmResolcPlatform;
|
||||||
|
|
||||||
|
impl Platform for ZombienetPolkavmResolcPlatform {
|
||||||
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
|
PlatformIdentifier::ZombienetPolkavmResolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
|
NodeIdentifier::Zombienet
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
|
VmIdentifier::PolkaVM
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
|
CompilerIdentifier::Resolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_node(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
|
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
|
.clone();
|
||||||
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
|
Ok(thread::spawn(move || {
|
||||||
|
let node = ZombienetNode::new(polkadot_parachain_path, context);
|
||||||
|
let node = spawn_node(node, genesis)?;
|
||||||
|
Ok(Box::new(node) as Box<_>)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_compiler(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
version: Option<VersionOrRequirement>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let compiler = Resolc::new(context, version).await;
|
||||||
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
||||||
|
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
|
.as_path();
|
||||||
|
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||||
|
|
||||||
|
ZombienetNode::node_genesis(polkadot_parachain_path, &wallet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
|
pub struct ZombienetRevmSolcPlatform;
|
||||||
|
|
||||||
|
impl Platform for ZombienetRevmSolcPlatform {
|
||||||
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
|
PlatformIdentifier::ZombienetRevmSolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
|
NodeIdentifier::Zombienet
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
|
VmIdentifier::Evm
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
|
CompilerIdentifier::Solc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_node(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
|
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
|
.clone();
|
||||||
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
|
Ok(thread::spawn(move || {
|
||||||
|
let node = ZombienetNode::new(polkadot_parachain_path, context);
|
||||||
|
let node = spawn_node(node, genesis)?;
|
||||||
|
Ok(Box::new(node) as Box<_>)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_compiler(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
version: Option<VersionOrRequirement>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let compiler = Solc::new(context, version).await;
|
||||||
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
||||||
|
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
|
.as_path();
|
||||||
|
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||||
|
|
||||||
|
ZombienetNode::node_genesis(polkadot_parachain_path, &wallet)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<PlatformIdentifier> for Box<dyn Platform> {
|
impl From<PlatformIdentifier> for Box<dyn Platform> {
|
||||||
@@ -366,18 +429,16 @@ impl From<PlatformIdentifier> for Box<dyn Platform> {
|
|||||||
PlatformIdentifier::LighthouseGethEvmSolc => {
|
PlatformIdentifier::LighthouseGethEvmSolc => {
|
||||||
Box::new(LighthouseGethEvmSolcPlatform) as Box<_>
|
Box::new(LighthouseGethEvmSolcPlatform) as Box<_>
|
||||||
}
|
}
|
||||||
PlatformIdentifier::KitchensinkPolkavmResolc => {
|
|
||||||
Box::new(KitchensinkPolkavmResolcPlatform) as Box<_>
|
|
||||||
}
|
|
||||||
PlatformIdentifier::KitchensinkRevmSolc => {
|
|
||||||
Box::new(KitchensinkRevmSolcPlatform) as Box<_>
|
|
||||||
}
|
|
||||||
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
||||||
Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_>
|
Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_>
|
||||||
}
|
}
|
||||||
PlatformIdentifier::ReviveDevNodeRevmSolc => {
|
PlatformIdentifier::ReviveDevNodeRevmSolc => {
|
||||||
Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_>
|
Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_>
|
||||||
}
|
}
|
||||||
|
PlatformIdentifier::ZombienetPolkavmResolc => {
|
||||||
|
Box::new(ZombienetPolkavmResolcPlatform) as Box<_>
|
||||||
|
}
|
||||||
|
PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -389,18 +450,16 @@ impl From<PlatformIdentifier> for &dyn Platform {
|
|||||||
PlatformIdentifier::LighthouseGethEvmSolc => {
|
PlatformIdentifier::LighthouseGethEvmSolc => {
|
||||||
&LighthouseGethEvmSolcPlatform as &dyn Platform
|
&LighthouseGethEvmSolcPlatform as &dyn Platform
|
||||||
}
|
}
|
||||||
PlatformIdentifier::KitchensinkPolkavmResolc => {
|
|
||||||
&KitchensinkPolkavmResolcPlatform as &dyn Platform
|
|
||||||
}
|
|
||||||
PlatformIdentifier::KitchensinkRevmSolc => {
|
|
||||||
&KitchensinkRevmSolcPlatform as &dyn Platform
|
|
||||||
}
|
|
||||||
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
||||||
&ReviveDevNodePolkavmResolcPlatform as &dyn Platform
|
&ReviveDevNodePolkavmResolcPlatform as &dyn Platform
|
||||||
}
|
}
|
||||||
PlatformIdentifier::ReviveDevNodeRevmSolc => {
|
PlatformIdentifier::ReviveDevNodeRevmSolc => {
|
||||||
&ReviveDevNodeRevmSolcPlatform as &dyn Platform
|
&ReviveDevNodeRevmSolcPlatform as &dyn Platform
|
||||||
}
|
}
|
||||||
|
PlatformIdentifier::ZombienetPolkavmResolc => {
|
||||||
|
&ZombienetPolkavmResolcPlatform as &dyn Platform
|
||||||
|
}
|
||||||
|
PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
+18
-3
@@ -2,10 +2,11 @@ mod differential_benchmarks;
|
|||||||
mod differential_tests;
|
mod differential_tests;
|
||||||
mod helpers;
|
mod helpers;
|
||||||
|
|
||||||
|
use anyhow::Context as _;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use revive_dt_report::ReportAggregator;
|
use revive_dt_report::ReportAggregator;
|
||||||
use schemars::schema_for;
|
use schemars::schema_for;
|
||||||
use tracing::info;
|
use tracing::{info, level_filters::LevelFilter};
|
||||||
use tracing_subscriber::{EnvFilter, FmtSubscriber};
|
use tracing_subscriber::{EnvFilter, FmtSubscriber};
|
||||||
|
|
||||||
use revive_dt_config::Context;
|
use revive_dt_config::Context;
|
||||||
@@ -30,14 +31,20 @@ fn main() -> anyhow::Result<()> {
|
|||||||
.with_writer(writer)
|
.with_writer(writer)
|
||||||
.with_thread_ids(false)
|
.with_thread_ids(false)
|
||||||
.with_thread_names(false)
|
.with_thread_names(false)
|
||||||
.with_env_filter(EnvFilter::from_default_env())
|
.with_env_filter(
|
||||||
|
EnvFilter::builder()
|
||||||
|
.with_default_directive(LevelFilter::OFF.into())
|
||||||
|
.from_env_lossy(),
|
||||||
|
)
|
||||||
.with_ansi(false)
|
.with_ansi(false)
|
||||||
.pretty()
|
.pretty()
|
||||||
.finish();
|
.finish();
|
||||||
tracing::subscriber::set_global_default(subscriber)?;
|
tracing::subscriber::set_global_default(subscriber)?;
|
||||||
info!("Differential testing tool is starting");
|
info!("Differential testing tool is starting");
|
||||||
|
|
||||||
let context = Context::try_parse()?;
|
let mut context = Context::try_parse()?;
|
||||||
|
context.update_for_profile();
|
||||||
|
|
||||||
let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task();
|
let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task();
|
||||||
|
|
||||||
match context {
|
match context {
|
||||||
@@ -72,6 +79,14 @@ fn main() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}),
|
}),
|
||||||
|
Context::ExportGenesis(ref export_genesis_context) => {
|
||||||
|
let platform = Into::<&dyn Platform>::into(export_genesis_context.platform);
|
||||||
|
let genesis = platform.export_genesis(context)?;
|
||||||
|
let genesis_json = serde_json::to_string_pretty(&genesis)
|
||||||
|
.context("Failed to serialize the genesis to JSON")?;
|
||||||
|
println!("{genesis_json}");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
Context::ExportJsonSchema => {
|
Context::ExportJsonSchema => {
|
||||||
let schema = schema_for!(Metadata);
|
let schema = schema_for!(Metadata);
|
||||||
println!("{}", serde_json::to_string_pretty(&schema).unwrap());
|
println!("{}", serde_json::to_string_pretty(&schema).unwrap());
|
||||||
|
|||||||
@@ -16,12 +16,12 @@ revive-common = { workspace = true }
|
|||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
futures = { workspace = true }
|
futures = { workspace = true }
|
||||||
regex = { workspace = true }
|
|
||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
schemars = { workspace = true }
|
schemars = { workspace = true }
|
||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
serde = { workspace = true, features = ["derive"] }
|
serde = { workspace = true, features = ["derive"] }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
|
itertools = { workspace = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
|
|||||||
@@ -1,9 +1,12 @@
|
|||||||
use schemars::JsonSchema;
|
use schemars::JsonSchema;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use revive_dt_common::{macros::define_wrapper_type, types::Mode};
|
use revive_dt_common::{
|
||||||
|
macros::define_wrapper_type,
|
||||||
|
types::{Mode, ParsedMode},
|
||||||
|
};
|
||||||
|
|
||||||
use crate::{mode::ParsedMode, steps::*};
|
use crate::steps::*;
|
||||||
|
|
||||||
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)]
|
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)]
|
||||||
pub struct Case {
|
pub struct Case {
|
||||||
|
|||||||
+148
-79
@@ -1,56 +1,167 @@
|
|||||||
use std::{
|
use std::{
|
||||||
fs::File,
|
borrow::Cow,
|
||||||
|
collections::HashMap,
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
};
|
};
|
||||||
|
|
||||||
use revive_dt_common::iterators::FilesWithExtensionIterator;
|
use itertools::Itertools;
|
||||||
use serde::{Deserialize, Serialize};
|
use revive_dt_common::{
|
||||||
use tracing::{debug, info};
|
iterators::{EitherIter, FilesWithExtensionIterator},
|
||||||
|
types::{Mode, ParsedMode, ParsedTestSpecifier},
|
||||||
|
};
|
||||||
|
use tracing::{debug, warn};
|
||||||
|
|
||||||
use crate::metadata::{Metadata, MetadataFile};
|
use crate::{
|
||||||
use anyhow::Context as _;
|
case::{Case, CaseIdx},
|
||||||
|
metadata::{Metadata, MetadataFile},
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
#[derive(Default)]
|
||||||
#[serde(untagged)]
|
pub struct Corpus {
|
||||||
pub enum Corpus {
|
test_specifiers: HashMap<ParsedTestSpecifier, Vec<PathBuf>>,
|
||||||
SinglePath { name: String, path: PathBuf },
|
metadata_files: HashMap<PathBuf, MetadataFile>,
|
||||||
MultiplePaths { name: String, paths: Vec<PathBuf> },
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Corpus {
|
impl Corpus {
|
||||||
pub fn try_from_path(file_path: impl AsRef<Path>) -> anyhow::Result<Self> {
|
pub fn new() -> Self {
|
||||||
let mut corpus = File::open(file_path.as_ref())
|
Default::default()
|
||||||
.map_err(anyhow::Error::from)
|
}
|
||||||
.and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into))
|
|
||||||
.with_context(|| {
|
pub fn with_test_specifier(
|
||||||
format!(
|
mut self,
|
||||||
"Failed to open and deserialize corpus file at {}",
|
test_specifier: ParsedTestSpecifier,
|
||||||
file_path.as_ref().display()
|
) -> anyhow::Result<Self> {
|
||||||
|
match &test_specifier {
|
||||||
|
ParsedTestSpecifier::FileOrDirectory {
|
||||||
|
metadata_or_directory_file_path: metadata_file_path,
|
||||||
|
}
|
||||||
|
| ParsedTestSpecifier::Case {
|
||||||
|
metadata_file_path, ..
|
||||||
|
}
|
||||||
|
| ParsedTestSpecifier::CaseWithMode {
|
||||||
|
metadata_file_path, ..
|
||||||
|
} => {
|
||||||
|
let metadata_files = enumerate_metadata_files(metadata_file_path);
|
||||||
|
self.test_specifiers.insert(
|
||||||
|
test_specifier,
|
||||||
|
metadata_files
|
||||||
|
.iter()
|
||||||
|
.map(|metadata_file| metadata_file.metadata_file_path.clone())
|
||||||
|
.collect(),
|
||||||
|
);
|
||||||
|
for metadata_file in metadata_files.into_iter() {
|
||||||
|
self.metadata_files
|
||||||
|
.insert(metadata_file.metadata_file_path.clone(), metadata_file);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cases_iterator(
|
||||||
|
&self,
|
||||||
|
) -> impl Iterator<Item = (&'_ MetadataFile, CaseIdx, &'_ Case, Cow<'_, Mode>)> + '_ {
|
||||||
|
let mut iterator = Box::new(std::iter::empty())
|
||||||
|
as Box<dyn Iterator<Item = (&'_ MetadataFile, CaseIdx, &'_ Case, Cow<'_, Mode>)> + '_>;
|
||||||
|
|
||||||
|
for (test_specifier, metadata_file_paths) in self.test_specifiers.iter() {
|
||||||
|
for metadata_file_path in metadata_file_paths {
|
||||||
|
let metadata_file = self
|
||||||
|
.metadata_files
|
||||||
|
.get(metadata_file_path)
|
||||||
|
.expect("Must succeed");
|
||||||
|
|
||||||
|
match test_specifier {
|
||||||
|
ParsedTestSpecifier::FileOrDirectory { .. } => {
|
||||||
|
for (case_idx, case) in metadata_file.cases.iter().enumerate() {
|
||||||
|
let case_idx = CaseIdx::new(case_idx);
|
||||||
|
|
||||||
|
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
|
||||||
|
let modes = match modes {
|
||||||
|
Some(modes) => EitherIter::A(
|
||||||
|
ParsedMode::many_to_modes(modes.iter())
|
||||||
|
.map(Cow::<'static, _>::Owned),
|
||||||
|
),
|
||||||
|
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
|
||||||
|
};
|
||||||
|
|
||||||
|
iterator = Box::new(
|
||||||
|
iterator.chain(
|
||||||
|
modes
|
||||||
|
.into_iter()
|
||||||
|
.map(move |mode| (metadata_file, case_idx, case, mode)),
|
||||||
|
),
|
||||||
)
|
)
|
||||||
})?;
|
}
|
||||||
|
}
|
||||||
|
ParsedTestSpecifier::Case { case_idx, .. } => {
|
||||||
|
let Some(case) = metadata_file.cases.get(*case_idx) else {
|
||||||
|
warn!(
|
||||||
|
test_specifier = %test_specifier,
|
||||||
|
metadata_file_path = %metadata_file_path.display(),
|
||||||
|
case_idx = case_idx,
|
||||||
|
case_count = metadata_file.cases.len(),
|
||||||
|
"Specified case not found in metadata file"
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let case_idx = CaseIdx::new(*case_idx);
|
||||||
|
|
||||||
let corpus_directory = file_path
|
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
|
||||||
.as_ref()
|
let modes = match modes {
|
||||||
.canonicalize()
|
Some(modes) => EitherIter::A(
|
||||||
.context("Failed to canonicalize the path to the corpus file")?
|
ParsedMode::many_to_modes(modes.iter())
|
||||||
.parent()
|
.map(Cow::<'static, Mode>::Owned),
|
||||||
.context("Corpus file has no parent")?
|
),
|
||||||
.to_path_buf();
|
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
|
||||||
|
};
|
||||||
|
|
||||||
for path in corpus.paths_iter_mut() {
|
iterator = Box::new(
|
||||||
*path = corpus_directory.join(path.as_path())
|
iterator.chain(
|
||||||
|
modes
|
||||||
|
.into_iter()
|
||||||
|
.map(move |mode| (metadata_file, case_idx, case, mode)),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
ParsedTestSpecifier::CaseWithMode { case_idx, mode, .. } => {
|
||||||
|
let Some(case) = metadata_file.cases.get(*case_idx) else {
|
||||||
|
warn!(
|
||||||
|
test_specifier = %test_specifier,
|
||||||
|
metadata_file_path = %metadata_file_path.display(),
|
||||||
|
case_idx = case_idx,
|
||||||
|
case_count = metadata_file.cases.len(),
|
||||||
|
"Specified case not found in metadata file"
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let case_idx = CaseIdx::new(*case_idx);
|
||||||
|
|
||||||
|
let mode = Cow::Borrowed(mode);
|
||||||
|
iterator = Box::new(iterator.chain(std::iter::once((
|
||||||
|
metadata_file,
|
||||||
|
case_idx,
|
||||||
|
case,
|
||||||
|
mode,
|
||||||
|
))))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(corpus)
|
iterator.unique_by(|item| (&item.0.metadata_file_path, item.1, item.3.clone()))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn enumerate_tests(&self) -> Vec<MetadataFile> {
|
pub fn metadata_file_count(&self) -> usize {
|
||||||
let mut tests = self
|
self.metadata_files.len()
|
||||||
.paths_iter()
|
}
|
||||||
.flat_map(|root_path| {
|
}
|
||||||
if !root_path.is_dir() {
|
|
||||||
Box::new(std::iter::once(root_path.to_path_buf()))
|
fn enumerate_metadata_files(path: impl AsRef<Path>) -> Vec<MetadataFile> {
|
||||||
as Box<dyn Iterator<Item = _>>
|
let root_path = path.as_ref();
|
||||||
|
let mut tests = if !root_path.is_dir() {
|
||||||
|
Box::new(std::iter::once(root_path.to_path_buf())) as Box<dyn Iterator<Item = _>>
|
||||||
} else {
|
} else {
|
||||||
Box::new(
|
Box::new(
|
||||||
FilesWithExtensionIterator::new(root_path)
|
FilesWithExtensionIterator::new(root_path)
|
||||||
@@ -60,7 +171,6 @@ impl Corpus {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
.map(move |metadata_file_path| (root_path, metadata_file_path))
|
.map(move |metadata_file_path| (root_path, metadata_file_path))
|
||||||
})
|
|
||||||
.filter_map(|(root_path, metadata_file_path)| {
|
.filter_map(|(root_path, metadata_file_path)| {
|
||||||
Metadata::try_from_file(&metadata_file_path)
|
Metadata::try_from_file(&metadata_file_path)
|
||||||
.or_else(|| {
|
.or_else(|| {
|
||||||
@@ -86,46 +196,5 @@ impl Corpus {
|
|||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
|
tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
|
||||||
tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
|
tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
|
||||||
info!(
|
|
||||||
len = tests.len(),
|
|
||||||
corpus_name = self.name(),
|
|
||||||
"Found tests in Corpus"
|
|
||||||
);
|
|
||||||
tests
|
tests
|
||||||
}
|
|
||||||
|
|
||||||
pub fn name(&self) -> &str {
|
|
||||||
match self {
|
|
||||||
Corpus::SinglePath { name, .. } | Corpus::MultiplePaths { name, .. } => name.as_str(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn paths_iter(&self) -> impl Iterator<Item = &Path> {
|
|
||||||
match self {
|
|
||||||
Corpus::SinglePath { path, .. } => {
|
|
||||||
Box::new(std::iter::once(path.as_path())) as Box<dyn Iterator<Item = _>>
|
|
||||||
}
|
|
||||||
Corpus::MultiplePaths { paths, .. } => {
|
|
||||||
Box::new(paths.iter().map(|path| path.as_path())) as Box<dyn Iterator<Item = _>>
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn paths_iter_mut(&mut self) -> impl Iterator<Item = &mut PathBuf> {
|
|
||||||
match self {
|
|
||||||
Corpus::SinglePath { path, .. } => {
|
|
||||||
Box::new(std::iter::once(path)) as Box<dyn Iterator<Item = _>>
|
|
||||||
}
|
|
||||||
Corpus::MultiplePaths { paths, .. } => {
|
|
||||||
Box::new(paths.iter_mut()) as Box<dyn Iterator<Item = _>>
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn path_count(&self) -> usize {
|
|
||||||
match self {
|
|
||||||
Corpus::SinglePath { .. } => 1,
|
|
||||||
Corpus::MultiplePaths { paths, .. } => paths.len(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,6 +3,5 @@
|
|||||||
pub mod case;
|
pub mod case;
|
||||||
pub mod corpus;
|
pub mod corpus;
|
||||||
pub mod metadata;
|
pub mod metadata;
|
||||||
pub mod mode;
|
|
||||||
pub mod steps;
|
pub mod steps;
|
||||||
pub mod traits;
|
pub mod traits;
|
||||||
|
|||||||
@@ -16,11 +16,11 @@ use revive_dt_common::{
|
|||||||
cached_fs::read_to_string,
|
cached_fs::read_to_string,
|
||||||
iterators::FilesWithExtensionIterator,
|
iterators::FilesWithExtensionIterator,
|
||||||
macros::define_wrapper_type,
|
macros::define_wrapper_type,
|
||||||
types::{Mode, VmIdentifier},
|
types::{Mode, ParsedMode, VmIdentifier},
|
||||||
};
|
};
|
||||||
use tracing::error;
|
use tracing::error;
|
||||||
|
|
||||||
use crate::{case::Case, mode::ParsedMode};
|
use crate::case::Case;
|
||||||
|
|
||||||
pub const METADATA_FILE_EXTENSION: &str = "json";
|
pub const METADATA_FILE_EXTENSION: &str = "json";
|
||||||
pub const SOLIDITY_CASE_FILE_EXTENSION: &str = "sol";
|
pub const SOLIDITY_CASE_FILE_EXTENSION: &str = "sol";
|
||||||
|
|||||||
@@ -1,257 +0,0 @@
|
|||||||
use anyhow::Context as _;
|
|
||||||
use regex::Regex;
|
|
||||||
use revive_dt_common::iterators::EitherIter;
|
|
||||||
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
|
|
||||||
use schemars::JsonSchema;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::collections::HashSet;
|
|
||||||
use std::fmt::Display;
|
|
||||||
use std::str::FromStr;
|
|
||||||
use std::sync::LazyLock;
|
|
||||||
|
|
||||||
/// This represents a mode that has been parsed from test metadata.
|
|
||||||
///
|
|
||||||
/// Mode strings can take the following form (in pseudo-regex):
|
|
||||||
///
|
|
||||||
/// ```text
|
|
||||||
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// We can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)]
|
|
||||||
#[serde(try_from = "String", into = "String")]
|
|
||||||
pub struct ParsedMode {
|
|
||||||
pub pipeline: Option<ModePipeline>,
|
|
||||||
pub optimize_flag: Option<bool>,
|
|
||||||
pub optimize_setting: Option<ModeOptimizerSetting>,
|
|
||||||
pub version: Option<semver::VersionReq>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromStr for ParsedMode {
|
|
||||||
type Err = anyhow::Error;
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
static REGEX: LazyLock<Regex> = LazyLock::new(|| {
|
|
||||||
Regex::new(r"(?x)
|
|
||||||
^
|
|
||||||
(?:(?P<pipeline>[YEILV])(?P<optimize_flag>[+-])?)? # Pipeline to use eg Y, E+, E-
|
|
||||||
\s*
|
|
||||||
(?P<optimize_setting>M[a-zA-Z0-9])? # Optimize setting eg M0, Ms, Mz
|
|
||||||
\s*
|
|
||||||
(?P<version>[>=<]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8
|
|
||||||
$
|
|
||||||
").unwrap()
|
|
||||||
});
|
|
||||||
|
|
||||||
let Some(caps) = REGEX.captures(s) else {
|
|
||||||
anyhow::bail!("Cannot parse mode '{s}' from string");
|
|
||||||
};
|
|
||||||
|
|
||||||
let pipeline = match caps.name("pipeline") {
|
|
||||||
Some(m) => Some(
|
|
||||||
ModePipeline::from_str(m.as_str())
|
|
||||||
.context("Failed to parse mode pipeline from string")?,
|
|
||||||
),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
|
|
||||||
|
|
||||||
let optimize_setting = match caps.name("optimize_setting") {
|
|
||||||
Some(m) => Some(
|
|
||||||
ModeOptimizerSetting::from_str(m.as_str())
|
|
||||||
.context("Failed to parse optimizer setting from string")?,
|
|
||||||
),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let version = match caps.name("version") {
|
|
||||||
Some(m) => Some(
|
|
||||||
semver::VersionReq::parse(m.as_str())
|
|
||||||
.map_err(|e| {
|
|
||||||
anyhow::anyhow!(
|
|
||||||
"Cannot parse the version requirement '{}': {e}",
|
|
||||||
m.as_str()
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.context("Failed to parse semver requirement from mode string")?,
|
|
||||||
),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(ParsedMode {
|
|
||||||
pipeline,
|
|
||||||
optimize_flag,
|
|
||||||
optimize_setting,
|
|
||||||
version,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for ParsedMode {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
let mut has_written = false;
|
|
||||||
|
|
||||||
if let Some(pipeline) = self.pipeline {
|
|
||||||
pipeline.fmt(f)?;
|
|
||||||
if let Some(optimize_flag) = self.optimize_flag {
|
|
||||||
f.write_str(if optimize_flag { "+" } else { "-" })?;
|
|
||||||
}
|
|
||||||
has_written = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(optimize_setting) = self.optimize_setting {
|
|
||||||
if has_written {
|
|
||||||
f.write_str(" ")?;
|
|
||||||
}
|
|
||||||
optimize_setting.fmt(f)?;
|
|
||||||
has_written = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(version) = &self.version {
|
|
||||||
if has_written {
|
|
||||||
f.write_str(" ")?;
|
|
||||||
}
|
|
||||||
version.fmt(f)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<ParsedMode> for String {
|
|
||||||
fn from(parsed_mode: ParsedMode) -> Self {
|
|
||||||
parsed_mode.to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<String> for ParsedMode {
|
|
||||||
type Error = anyhow::Error;
|
|
||||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
|
||||||
ParsedMode::from_str(&value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ParsedMode {
|
|
||||||
/// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try.
|
|
||||||
pub fn to_modes(&self) -> impl Iterator<Item = Mode> {
|
|
||||||
let pipeline_iter = self.pipeline.as_ref().map_or_else(
|
|
||||||
|| EitherIter::A(ModePipeline::test_cases()),
|
|
||||||
|p| EitherIter::B(std::iter::once(*p)),
|
|
||||||
);
|
|
||||||
|
|
||||||
let optimize_flag_setting = self.optimize_flag.map(|flag| {
|
|
||||||
if flag {
|
|
||||||
ModeOptimizerSetting::M3
|
|
||||||
} else {
|
|
||||||
ModeOptimizerSetting::M0
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let optimize_flag_iter = match optimize_flag_setting {
|
|
||||||
Some(setting) => EitherIter::A(std::iter::once(setting)),
|
|
||||||
None => EitherIter::B(ModeOptimizerSetting::test_cases()),
|
|
||||||
};
|
|
||||||
|
|
||||||
let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else(
|
|
||||||
|| EitherIter::A(optimize_flag_iter),
|
|
||||||
|s| EitherIter::B(std::iter::once(*s)),
|
|
||||||
);
|
|
||||||
|
|
||||||
pipeline_iter.flat_map(move |pipeline| {
|
|
||||||
optimize_settings_iter
|
|
||||||
.clone()
|
|
||||||
.map(move |optimize_setting| Mode {
|
|
||||||
pipeline,
|
|
||||||
optimize_setting,
|
|
||||||
version: self.version.clone(),
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s.
|
|
||||||
/// This avoids any duplicate entries.
|
|
||||||
pub fn many_to_modes<'a>(
|
|
||||||
parsed: impl Iterator<Item = &'a ParsedMode>,
|
|
||||||
) -> impl Iterator<Item = Mode> {
|
|
||||||
let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect();
|
|
||||||
modes.into_iter()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_parsed_mode_from_str() {
|
|
||||||
let strings = vec![
|
|
||||||
("Mz", "Mz"),
|
|
||||||
("Y", "Y"),
|
|
||||||
("Y+", "Y+"),
|
|
||||||
("Y-", "Y-"),
|
|
||||||
("E", "E"),
|
|
||||||
("E+", "E+"),
|
|
||||||
("E-", "E-"),
|
|
||||||
("Y M0", "Y M0"),
|
|
||||||
("Y M1", "Y M1"),
|
|
||||||
("Y M2", "Y M2"),
|
|
||||||
("Y M3", "Y M3"),
|
|
||||||
("Y Ms", "Y Ms"),
|
|
||||||
("Y Mz", "Y Mz"),
|
|
||||||
("E M0", "E M0"),
|
|
||||||
("E M1", "E M1"),
|
|
||||||
("E M2", "E M2"),
|
|
||||||
("E M3", "E M3"),
|
|
||||||
("E Ms", "E Ms"),
|
|
||||||
("E Mz", "E Mz"),
|
|
||||||
// When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning)
|
|
||||||
("Y 0.8.0", "Y ^0.8.0"),
|
|
||||||
("E+ 0.8.0", "E+ ^0.8.0"),
|
|
||||||
("Y M3 >=0.8.0", "Y M3 >=0.8.0"),
|
|
||||||
("E Mz <0.7.0", "E Mz <0.7.0"),
|
|
||||||
// We can parse +- _and_ M1/M2 but the latter takes priority.
|
|
||||||
("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"),
|
|
||||||
("E- M2 0.7.0", "E- M2 ^0.7.0"),
|
|
||||||
// We don't see this in the wild but it is parsed.
|
|
||||||
("<=0.8", "<=0.8"),
|
|
||||||
];
|
|
||||||
|
|
||||||
for (actual, expected) in strings {
|
|
||||||
let parsed = ParsedMode::from_str(actual)
|
|
||||||
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
|
||||||
assert_eq!(
|
|
||||||
expected,
|
|
||||||
parsed.to_string(),
|
|
||||||
"Mode string '{actual}' did not parse to '{expected}': got '{parsed}'"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_parsed_mode_to_test_modes() {
|
|
||||||
let strings = vec![
|
|
||||||
("Mz", vec!["Y Mz", "E Mz"]),
|
|
||||||
("Y", vec!["Y M0", "Y M3"]),
|
|
||||||
("E", vec!["E M0", "E M3"]),
|
|
||||||
("Y+", vec!["Y M3"]),
|
|
||||||
("Y-", vec!["Y M0"]),
|
|
||||||
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
|
|
||||||
(
|
|
||||||
"<=0.8",
|
|
||||||
vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"],
|
|
||||||
),
|
|
||||||
];
|
|
||||||
|
|
||||||
for (actual, expected) in strings {
|
|
||||||
let parsed = ParsedMode::from_str(actual)
|
|
||||||
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
|
||||||
let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect();
|
|
||||||
let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect();
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
expected_set, actual_set,
|
|
||||||
"Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
use std::{collections::HashMap, fmt::Display, str::FromStr};
|
use std::{collections::HashMap, fmt::Display, str::FromStr};
|
||||||
|
|
||||||
|
use alloy::hex::ToHexExt;
|
||||||
use alloy::primitives::{FixedBytes, utils::parse_units};
|
use alloy::primitives::{FixedBytes, utils::parse_units};
|
||||||
use alloy::{
|
use alloy::{
|
||||||
eips::BlockNumberOrTag,
|
eips::BlockNumberOrTag,
|
||||||
@@ -45,12 +46,12 @@ pub enum Step {
|
|||||||
}
|
}
|
||||||
|
|
||||||
define_wrapper_type!(
|
define_wrapper_type!(
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
pub struct StepIdx(usize) impl Display, FromStr;
|
pub struct StepIdx(usize) impl Display, FromStr;
|
||||||
);
|
);
|
||||||
|
|
||||||
define_wrapper_type!(
|
define_wrapper_type!(
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
#[serde(try_from = "String", into = "String")]
|
#[serde(try_from = "String", into = "String")]
|
||||||
pub struct StepPath(Vec<StepIdx>);
|
pub struct StepPath(Vec<StepIdx>);
|
||||||
);
|
);
|
||||||
@@ -686,8 +687,8 @@ impl Calldata {
|
|||||||
Calldata::Compound(items) => {
|
Calldata::Compound(items) => {
|
||||||
stream::iter(items.iter().zip(other.chunks(32)))
|
stream::iter(items.iter().zip(other.chunks(32)))
|
||||||
.map(|(this, other)| async move {
|
.map(|(this, other)| async move {
|
||||||
// The matterlabs format supports wildcards and therefore we
|
// The MatterLabs format supports wildcards and therefore we also need to
|
||||||
// also need to support them.
|
// support them.
|
||||||
if this.as_ref() == "*" {
|
if this.as_ref() == "*" {
|
||||||
return Ok::<_, anyhow::Error>(true);
|
return Ok::<_, anyhow::Error>(true);
|
||||||
}
|
}
|
||||||
@@ -768,7 +769,14 @@ impl CalldataItem {
|
|||||||
match stack.as_slice() {
|
match stack.as_slice() {
|
||||||
// Empty stack means that we got an empty compound calldata which we resolve to zero.
|
// Empty stack means that we got an empty compound calldata which we resolve to zero.
|
||||||
[] => Ok(U256::ZERO),
|
[] => Ok(U256::ZERO),
|
||||||
[CalldataToken::Item(item)] => Ok(*item),
|
[CalldataToken::Item(item)] => {
|
||||||
|
tracing::debug!(
|
||||||
|
original_item = ?self,
|
||||||
|
resolved_item = item.to_be_bytes::<32>().encode_hex(),
|
||||||
|
"Resolution Done"
|
||||||
|
);
|
||||||
|
Ok(*item)
|
||||||
|
}
|
||||||
_ => Err(anyhow::anyhow!(
|
_ => Err(anyhow::anyhow!(
|
||||||
"Invalid calldata arithmetic operation - Invalid stack"
|
"Invalid calldata arithmetic operation - Invalid stack"
|
||||||
)),
|
)),
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ rust-version.workspace = true
|
|||||||
revive-common = { workspace = true }
|
revive-common = { workspace = true }
|
||||||
|
|
||||||
revive-dt-format = { workspace = true }
|
revive-dt-format = { workspace = true }
|
||||||
|
revive-dt-report = { workspace = true }
|
||||||
|
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
|
|||||||
@@ -3,7 +3,9 @@
|
|||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use alloy::primitives::{Address, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256};
|
use alloy::network::Ethereum;
|
||||||
|
use alloy::primitives::{Address, StorageKey, TxHash, U256};
|
||||||
|
use alloy::providers::DynProvider;
|
||||||
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace};
|
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace};
|
||||||
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest};
|
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
@@ -11,6 +13,7 @@ use anyhow::Result;
|
|||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
use revive_common::EVMVersion;
|
use revive_common::EVMVersion;
|
||||||
use revive_dt_format::traits::ResolverApi;
|
use revive_dt_format::traits::ResolverApi;
|
||||||
|
use revive_dt_report::MinedBlockInformation;
|
||||||
|
|
||||||
/// An interface for all interactions with Ethereum compatible nodes.
|
/// An interface for all interactions with Ethereum compatible nodes.
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
@@ -74,22 +77,7 @@ pub trait EthereumNode {
|
|||||||
+ '_,
|
+ '_,
|
||||||
>,
|
>,
|
||||||
>;
|
>;
|
||||||
}
|
|
||||||
|
fn provider(&self)
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
-> Pin<Box<dyn Future<Output = anyhow::Result<DynProvider<Ethereum>>> + '_>>;
|
||||||
pub struct MinedBlockInformation {
|
|
||||||
/// The block number.
|
|
||||||
pub block_number: BlockNumber,
|
|
||||||
|
|
||||||
/// The block timestamp.
|
|
||||||
pub block_timestamp: BlockTimestamp,
|
|
||||||
|
|
||||||
/// The amount of gas mined in the block.
|
|
||||||
pub mined_gas: u128,
|
|
||||||
|
|
||||||
/// The gas limit of the block.
|
|
||||||
pub block_gas_limit: u128,
|
|
||||||
|
|
||||||
/// The hashes of the transactions that were mined as part of the block.
|
|
||||||
pub transaction_hashes: Vec<TxHash>,
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ revive-dt-common = { workspace = true }
|
|||||||
revive-dt-config = { workspace = true }
|
revive-dt-config = { workspace = true }
|
||||||
revive-dt-format = { workspace = true }
|
revive-dt-format = { workspace = true }
|
||||||
revive-dt-node-interaction = { workspace = true }
|
revive-dt-node-interaction = { workspace = true }
|
||||||
|
revive-dt-report = { workspace = true }
|
||||||
|
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
@@ -29,6 +30,8 @@ serde_yaml_ng = { workspace = true }
|
|||||||
|
|
||||||
sp-core = { workspace = true }
|
sp-core = { workspace = true }
|
||||||
sp-runtime = { workspace = true }
|
sp-runtime = { workspace = true }
|
||||||
|
subxt = { workspace = true }
|
||||||
|
zombienet-sdk = { workspace = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
temp-dir = { workspace = true }
|
temp-dir = { workspace = true }
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ use alloy::{
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
use anyhow::Context as _;
|
use anyhow::Context as _;
|
||||||
use futures::{Stream, StreamExt};
|
use futures::{FutureExt, Stream, StreamExt};
|
||||||
use revive_common::EVMVersion;
|
use revive_common::EVMVersion;
|
||||||
use tokio::sync::OnceCell;
|
use tokio::sync::OnceCell;
|
||||||
use tracing::{Instrument, error, instrument};
|
use tracing::{Instrument, error, instrument};
|
||||||
@@ -43,7 +43,8 @@ use revive_dt_common::{
|
|||||||
};
|
};
|
||||||
use revive_dt_config::*;
|
use revive_dt_config::*;
|
||||||
use revive_dt_format::traits::ResolverApi;
|
use revive_dt_format::traits::ResolverApi;
|
||||||
use revive_dt_node_interaction::{EthereumNode, MinedBlockInformation};
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
|
use revive_dt_report::{EthereumMinedBlockInformation, MinedBlockInformation};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Node,
|
Node,
|
||||||
@@ -130,7 +131,7 @@ impl GethNode {
|
|||||||
|
|
||||||
/// Create the node directory and call `geth init` to configure the genesis.
|
/// Create the node directory and call `geth init` to configure the genesis.
|
||||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
|
fn init(&mut self, genesis: Genesis) -> anyhow::Result<&mut Self> {
|
||||||
let _ = clear_directory(&self.base_directory);
|
let _ = clear_directory(&self.base_directory);
|
||||||
let _ = clear_directory(&self.logs_directory);
|
let _ = clear_directory(&self.logs_directory);
|
||||||
|
|
||||||
@@ -139,16 +140,7 @@ impl GethNode {
|
|||||||
create_dir_all(&self.logs_directory)
|
create_dir_all(&self.logs_directory)
|
||||||
.context("Failed to create logs directory for geth node")?;
|
.context("Failed to create logs directory for geth node")?;
|
||||||
|
|
||||||
for signer_address in
|
let genesis = Self::node_genesis(genesis, self.wallet.as_ref());
|
||||||
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
|
|
||||||
{
|
|
||||||
// Note, the use of the entry API here means that we only modify the entries for any
|
|
||||||
// account that is not in the `alloc` field of the genesis state.
|
|
||||||
genesis
|
|
||||||
.alloc
|
|
||||||
.entry(signer_address)
|
|
||||||
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
|
|
||||||
}
|
|
||||||
let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE);
|
let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE);
|
||||||
serde_json::to_writer(
|
serde_json::to_writer(
|
||||||
File::create(&genesis_path).context("Failed to create geth genesis file")?,
|
File::create(&genesis_path).context("Failed to create geth genesis file")?,
|
||||||
@@ -265,6 +257,16 @@ impl GethNode {
|
|||||||
.await
|
.await
|
||||||
.cloned()
|
.cloned()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn node_genesis(mut genesis: Genesis, wallet: &EthereumWallet) -> Genesis {
|
||||||
|
for signer_address in NetworkWallet::<Ethereum>::signer_addresses(&wallet) {
|
||||||
|
genesis
|
||||||
|
.alloc
|
||||||
|
.entry(signer_address)
|
||||||
|
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
|
||||||
|
}
|
||||||
|
genesis
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EthereumNode for GethNode {
|
impl EthereumNode for GethNode {
|
||||||
@@ -525,6 +527,7 @@ impl EthereumNode for GethNode {
|
|||||||
let mined_block_information_stream = block_stream.filter_map(|block| async {
|
let mined_block_information_stream = block_stream.filter_map(|block| async {
|
||||||
let block = block.ok()?;
|
let block = block.ok()?;
|
||||||
Some(MinedBlockInformation {
|
Some(MinedBlockInformation {
|
||||||
|
ethereum_block_information: EthereumMinedBlockInformation {
|
||||||
block_number: block.number(),
|
block_number: block.number(),
|
||||||
block_timestamp: block.header.timestamp,
|
block_timestamp: block.header.timestamp,
|
||||||
mined_gas: block.header.gas_used as _,
|
mined_gas: block.header.gas_used as _,
|
||||||
@@ -535,6 +538,9 @@ impl EthereumNode for GethNode {
|
|||||||
.as_hashes()
|
.as_hashes()
|
||||||
.expect("Must be hashes")
|
.expect("Must be hashes")
|
||||||
.to_vec(),
|
.to_vec(),
|
||||||
|
},
|
||||||
|
substrate_block_information: None,
|
||||||
|
tx_counts: Default::default(),
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -542,6 +548,16 @@ impl EthereumNode for GethNode {
|
|||||||
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
|
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn provider(
|
||||||
|
&self,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::providers::DynProvider<Ethereum>>> + '_>>
|
||||||
|
{
|
||||||
|
Box::pin(
|
||||||
|
self.provider()
|
||||||
|
.map(|provider| provider.map(|provider| provider.erased())),
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct GethNodeResolver {
|
pub struct GethNodeResolver {
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ use std::{
|
|||||||
pin::Pin,
|
pin::Pin,
|
||||||
process::{Command, Stdio},
|
process::{Command, Stdio},
|
||||||
sync::{
|
sync::{
|
||||||
Arc, LazyLock,
|
Arc,
|
||||||
atomic::{AtomicU32, Ordering},
|
atomic::{AtomicU32, Ordering},
|
||||||
},
|
},
|
||||||
time::{Duration, SystemTime, UNIX_EPOCH},
|
time::{Duration, SystemTime, UNIX_EPOCH},
|
||||||
@@ -43,11 +43,11 @@ use alloy::{
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
use anyhow::Context as _;
|
use anyhow::Context as _;
|
||||||
use futures::{Stream, StreamExt};
|
use futures::{FutureExt, Stream, StreamExt};
|
||||||
use revive_common::EVMVersion;
|
use revive_common::EVMVersion;
|
||||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||||
use serde_with::serde_as;
|
use serde_with::serde_as;
|
||||||
use tokio::sync::{OnceCell, Semaphore};
|
use tokio::sync::OnceCell;
|
||||||
use tracing::{Instrument, info, instrument};
|
use tracing::{Instrument, info, instrument};
|
||||||
|
|
||||||
use revive_dt_common::{
|
use revive_dt_common::{
|
||||||
@@ -56,7 +56,8 @@ use revive_dt_common::{
|
|||||||
};
|
};
|
||||||
use revive_dt_config::*;
|
use revive_dt_config::*;
|
||||||
use revive_dt_format::traits::ResolverApi;
|
use revive_dt_format::traits::ResolverApi;
|
||||||
use revive_dt_node_interaction::{EthereumNode, MinedBlockInformation};
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
|
use revive_dt_report::{EthereumMinedBlockInformation, MinedBlockInformation};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Node,
|
Node,
|
||||||
@@ -105,7 +106,6 @@ pub struct LighthouseGethNode {
|
|||||||
|
|
||||||
persistent_http_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
|
persistent_http_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
|
||||||
persistent_ws_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
|
persistent_ws_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
|
||||||
http_provider_requests_semaphore: LazyLock<Semaphore>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LighthouseGethNode {
|
impl LighthouseGethNode {
|
||||||
@@ -176,7 +176,6 @@ impl LighthouseGethNode {
|
|||||||
nonce_manager: Default::default(),
|
nonce_manager: Default::default(),
|
||||||
persistent_http_provider: OnceCell::const_new(),
|
persistent_http_provider: OnceCell::const_new(),
|
||||||
persistent_ws_provider: OnceCell::const_new(),
|
persistent_ws_provider: OnceCell::const_new(),
|
||||||
http_provider_requests_semaphore: LazyLock::new(|| Semaphore::const_new(500)),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -224,6 +223,7 @@ impl LighthouseGethNode {
|
|||||||
"--ws.port=8546".to_string(),
|
"--ws.port=8546".to_string(),
|
||||||
"--ws.api=eth,net,web3,txpool,engine".to_string(),
|
"--ws.api=eth,net,web3,txpool,engine".to_string(),
|
||||||
"--ws.origins=*".to_string(),
|
"--ws.origins=*".to_string(),
|
||||||
|
"--miner.gaslimit=30000000".to_string(),
|
||||||
],
|
],
|
||||||
consensus_layer_extra_parameters: vec![
|
consensus_layer_extra_parameters: vec![
|
||||||
"--disable-quic".to_string(),
|
"--disable-quic".to_string(),
|
||||||
@@ -249,6 +249,8 @@ impl LighthouseGethNode {
|
|||||||
.collect::<BTreeMap<_, _>>();
|
.collect::<BTreeMap<_, _>>();
|
||||||
serde_json::to_string(&map).unwrap()
|
serde_json::to_string(&map).unwrap()
|
||||||
},
|
},
|
||||||
|
gas_limit: 30_000_000,
|
||||||
|
genesis_gaslimit: 30_000_000,
|
||||||
},
|
},
|
||||||
wait_for_finalization: false,
|
wait_for_finalization: false,
|
||||||
port_publisher: Some(PortPublisherParameters {
|
port_publisher: Some(PortPublisherParameters {
|
||||||
@@ -540,6 +542,16 @@ impl LighthouseGethNode {
|
|||||||
.await
|
.await
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn node_genesis(mut genesis: Genesis, wallet: &EthereumWallet) -> Genesis {
|
||||||
|
for signer_address in NetworkWallet::<Ethereum>::signer_addresses(&wallet) {
|
||||||
|
genesis
|
||||||
|
.alloc
|
||||||
|
.entry(signer_address)
|
||||||
|
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
|
||||||
|
}
|
||||||
|
genesis
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EthereumNode for LighthouseGethNode {
|
impl EthereumNode for LighthouseGethNode {
|
||||||
@@ -566,8 +578,6 @@ impl EthereumNode for LighthouseGethNode {
|
|||||||
transaction: TransactionRequest,
|
transaction: TransactionRequest,
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TxHash>> + '_>> {
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<TxHash>> + '_>> {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let _permit = self.http_provider_requests_semaphore.acquire().await;
|
|
||||||
|
|
||||||
let provider = self
|
let provider = self
|
||||||
.http_provider()
|
.http_provider()
|
||||||
.await
|
.await
|
||||||
@@ -748,6 +758,7 @@ impl EthereumNode for LighthouseGethNode {
|
|||||||
let mined_block_information_stream = block_stream.filter_map(|block| async {
|
let mined_block_information_stream = block_stream.filter_map(|block| async {
|
||||||
let block = block.ok()?;
|
let block = block.ok()?;
|
||||||
Some(MinedBlockInformation {
|
Some(MinedBlockInformation {
|
||||||
|
ethereum_block_information: EthereumMinedBlockInformation {
|
||||||
block_number: block.number(),
|
block_number: block.number(),
|
||||||
block_timestamp: block.header.timestamp,
|
block_timestamp: block.header.timestamp,
|
||||||
mined_gas: block.header.gas_used as _,
|
mined_gas: block.header.gas_used as _,
|
||||||
@@ -758,6 +769,9 @@ impl EthereumNode for LighthouseGethNode {
|
|||||||
.as_hashes()
|
.as_hashes()
|
||||||
.expect("Must be hashes")
|
.expect("Must be hashes")
|
||||||
.to_vec(),
|
.to_vec(),
|
||||||
|
},
|
||||||
|
substrate_block_information: None,
|
||||||
|
tx_counts: Default::default(),
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -765,6 +779,16 @@ impl EthereumNode for LighthouseGethNode {
|
|||||||
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
|
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn provider(
|
||||||
|
&self,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::providers::DynProvider<Ethereum>>> + '_>>
|
||||||
|
{
|
||||||
|
Box::pin(
|
||||||
|
self.http_provider()
|
||||||
|
.map(|provider| provider.map(|provider| provider.erased())),
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct LighthouseGethNodeResolver<F: TxFiller<Ethereum>, P: Provider<Ethereum>> {
|
pub struct LighthouseGethNodeResolver<F: TxFiller<Ethereum>, P: Provider<Ethereum>> {
|
||||||
@@ -1039,6 +1063,8 @@ struct NetworkParameters {
|
|||||||
pub num_validator_keys_per_node: u64,
|
pub num_validator_keys_per_node: u64,
|
||||||
|
|
||||||
pub genesis_delay: u64,
|
pub genesis_delay: u64,
|
||||||
|
pub genesis_gaslimit: u64,
|
||||||
|
pub gas_limit: u64,
|
||||||
|
|
||||||
pub prefunded_accounts: String,
|
pub prefunded_accounts: String,
|
||||||
}
|
}
|
||||||
@@ -1135,6 +1161,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
#[ignore = "Ignored since they take a long time to run"]
|
||||||
async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
|
async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let (context, node) = new_node();
|
let (context, node) = new_node();
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
pub mod geth;
|
pub mod geth;
|
||||||
pub mod lighthouse_geth;
|
pub mod lighthouse_geth;
|
||||||
pub mod substrate;
|
pub mod substrate;
|
||||||
|
pub mod zombienet;
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
+19
-5
@@ -7,6 +7,10 @@ use alloy::{
|
|||||||
transports::TransportResult,
|
transports::TransportResult,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Percentage padding applied to estimated gas (e.g. 120 = 20% padding)
|
||||||
|
const GAS_ESTIMATE_PADDING_NUMERATOR: u64 = 120;
|
||||||
|
const GAS_ESTIMATE_PADDING_DENOMINATOR: u64 = 100;
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct FallbackGasFiller {
|
pub struct FallbackGasFiller {
|
||||||
inner: GasFiller,
|
inner: GasFiller,
|
||||||
@@ -56,11 +60,12 @@ where
|
|||||||
provider: &P,
|
provider: &P,
|
||||||
tx: &<N as Network>::TransactionRequest,
|
tx: &<N as Network>::TransactionRequest,
|
||||||
) -> TransportResult<Self::Fillable> {
|
) -> TransportResult<Self::Fillable> {
|
||||||
// Try to fetch GasFiller’s “fillable” (gas_price, base_fee, estimate_gas, …)
|
|
||||||
// If it errors (i.e. tx would revert under eth_estimateGas), swallow it.
|
|
||||||
match self.inner.prepare(provider, tx).await {
|
match self.inner.prepare(provider, tx).await {
|
||||||
Ok(fill) => Ok(Some(fill)),
|
Ok(fill) => Ok(Some(fill)),
|
||||||
Err(_) => Ok(None),
|
Err(err) => {
|
||||||
|
tracing::debug!(error = ?err, "Gas Provider Estimation Failed, using fallback");
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,8 +75,17 @@ where
|
|||||||
mut tx: alloy::providers::SendableTx<N>,
|
mut tx: alloy::providers::SendableTx<N>,
|
||||||
) -> TransportResult<SendableTx<N>> {
|
) -> TransportResult<SendableTx<N>> {
|
||||||
if let Some(fill) = fillable {
|
if let Some(fill) = fillable {
|
||||||
// our inner GasFiller succeeded — use it
|
let mut tx = self.inner.fill(fill, tx).await?;
|
||||||
self.inner.fill(fill, tx).await
|
if let Some(builder) = tx.as_mut_builder() {
|
||||||
|
if let Some(estimated) = builder.gas_limit() {
|
||||||
|
let padded = estimated
|
||||||
|
.checked_mul(GAS_ESTIMATE_PADDING_NUMERATOR)
|
||||||
|
.and_then(|v| v.checked_div(GAS_ESTIMATE_PADDING_DENOMINATOR))
|
||||||
|
.unwrap_or(u64::MAX);
|
||||||
|
builder.set_gas_limit(padded);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(tx)
|
||||||
} else {
|
} else {
|
||||||
if let Some(builder) = tx.as_mut_builder() {
|
if let Some(builder) = tx.as_mut_builder() {
|
||||||
builder.set_gas_limit(self.default_gas_limit);
|
builder.set_gas_limit(self.default_gas_limit);
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
mod concurrency_limiter;
|
mod concurrency_limiter;
|
||||||
mod fallback_gas_provider;
|
mod fallback_gas_filler;
|
||||||
mod provider;
|
mod provider;
|
||||||
|
|
||||||
pub use concurrency_limiter::*;
|
pub use concurrency_limiter::*;
|
||||||
pub use fallback_gas_provider::*;
|
pub use fallback_gas_filler::*;
|
||||||
pub use provider::*;
|
pub use provider::*;
|
||||||
|
|||||||
@@ -1,14 +1,16 @@
|
|||||||
use std::sync::LazyLock;
|
use std::{ops::ControlFlow, sync::LazyLock, time::Duration};
|
||||||
|
|
||||||
use alloy::{
|
use alloy::{
|
||||||
network::{Network, NetworkWallet, TransactionBuilder4844},
|
network::{Ethereum, Network, NetworkWallet, TransactionBuilder4844},
|
||||||
providers::{
|
providers::{
|
||||||
Identity, ProviderBuilder, RootProvider,
|
Identity, PendingTransactionBuilder, Provider, ProviderBuilder, RootProvider,
|
||||||
fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller},
|
fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller},
|
||||||
},
|
},
|
||||||
rpc::client::ClientBuilder,
|
rpc::client::ClientBuilder,
|
||||||
};
|
};
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
|
use revive_dt_common::futures::{PollingWaitBehavior, poll};
|
||||||
|
use tracing::{Instrument, debug, info, info_span};
|
||||||
|
|
||||||
use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller};
|
use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller};
|
||||||
|
|
||||||
@@ -42,7 +44,7 @@ where
|
|||||||
// requests at any point of time and no more than that. This is done in an effort to stabilize
|
// requests at any point of time and no more than that. This is done in an effort to stabilize
|
||||||
// the framework from some of the interment issues that we've been seeing related to RPC calls.
|
// the framework from some of the interment issues that we've been seeing related to RPC calls.
|
||||||
static GLOBAL_CONCURRENCY_LIMITER_LAYER: LazyLock<ConcurrencyLimiterLayer> =
|
static GLOBAL_CONCURRENCY_LIMITER_LAYER: LazyLock<ConcurrencyLimiterLayer> =
|
||||||
LazyLock::new(|| ConcurrencyLimiterLayer::new(10));
|
LazyLock::new(|| ConcurrencyLimiterLayer::new(500));
|
||||||
|
|
||||||
let client = ClientBuilder::default()
|
let client = ClientBuilder::default()
|
||||||
.layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone())
|
.layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone())
|
||||||
@@ -61,3 +63,70 @@ where
|
|||||||
|
|
||||||
Ok(provider)
|
Ok(provider)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn execute_transaction<N, W>(
|
||||||
|
provider: ConcreteProvider<N, W>,
|
||||||
|
transaction: N::TransactionRequest,
|
||||||
|
) -> Result<N::ReceiptResponse>
|
||||||
|
where
|
||||||
|
N: Network<
|
||||||
|
TransactionRequest: TransactionBuilder4844,
|
||||||
|
TxEnvelope = <Ethereum as Network>::TxEnvelope,
|
||||||
|
>,
|
||||||
|
W: NetworkWallet<N>,
|
||||||
|
Identity: TxFiller<N>,
|
||||||
|
FallbackGasFiller: TxFiller<N>,
|
||||||
|
ChainIdFiller: TxFiller<N>,
|
||||||
|
NonceFiller: TxFiller<N>,
|
||||||
|
WalletFiller<W>: TxFiller<N>,
|
||||||
|
{
|
||||||
|
let sendable_transaction = provider
|
||||||
|
.fill(transaction)
|
||||||
|
.await
|
||||||
|
.context("Failed to fill transaction")?;
|
||||||
|
|
||||||
|
let transaction_envelope = sendable_transaction
|
||||||
|
.try_into_envelope()
|
||||||
|
.context("Failed to convert transaction into an envelope")?;
|
||||||
|
let tx_hash = *transaction_envelope.tx_hash();
|
||||||
|
|
||||||
|
let mut pending_transaction = match provider.send_tx_envelope(transaction_envelope).await {
|
||||||
|
Ok(pending_transaction) => pending_transaction,
|
||||||
|
Err(error) => {
|
||||||
|
let error_string = error.to_string();
|
||||||
|
|
||||||
|
if error_string.contains("Transaction Already Imported") {
|
||||||
|
PendingTransactionBuilder::<N>::new(provider.root().clone(), tx_hash)
|
||||||
|
} else {
|
||||||
|
return Err(error).context(format!("Failed to submit transaction {tx_hash}"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
debug!(%tx_hash, "Submitted Transaction");
|
||||||
|
|
||||||
|
pending_transaction.set_timeout(Some(Duration::from_secs(240)));
|
||||||
|
let tx_hash = pending_transaction.watch().await.context(format!(
|
||||||
|
"Transaction inclusion watching timeout for {tx_hash}"
|
||||||
|
))?;
|
||||||
|
|
||||||
|
poll(
|
||||||
|
Duration::from_secs(60),
|
||||||
|
PollingWaitBehavior::Constant(Duration::from_secs(3)),
|
||||||
|
|| {
|
||||||
|
let provider = provider.clone();
|
||||||
|
|
||||||
|
async move {
|
||||||
|
match provider.get_transaction_receipt(tx_hash).await {
|
||||||
|
Ok(Some(receipt)) => {
|
||||||
|
info!("Found the transaction receipt");
|
||||||
|
Ok(ControlFlow::Break(receipt))
|
||||||
|
}
|
||||||
|
_ => Ok(ControlFlow::Continue(())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.instrument(info_span!("Polling for receipt", %tx_hash))
|
||||||
|
.await
|
||||||
|
.context(format!("Polling for receipt failed for {tx_hash}"))
|
||||||
|
}
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ alloy = { workspace = true }
|
|||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
paste = { workspace = true }
|
paste = { workspace = true }
|
||||||
indexmap = { workspace = true, features = ["serde"] }
|
indexmap = { workspace = true, features = ["serde"] }
|
||||||
|
itertools = { workspace = true }
|
||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
|
|||||||
+453
-42
@@ -4,19 +4,21 @@
|
|||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
|
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
|
||||||
fs::OpenOptions,
|
fs::OpenOptions,
|
||||||
|
ops::{Add, Div},
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
time::{SystemTime, UNIX_EPOCH},
|
time::{SystemTime, UNIX_EPOCH},
|
||||||
};
|
};
|
||||||
|
|
||||||
use alloy::primitives::Address;
|
use alloy::primitives::{Address, BlockNumber, BlockTimestamp, TxHash};
|
||||||
use anyhow::{Context as _, Result};
|
use anyhow::{Context as _, Result};
|
||||||
use indexmap::IndexMap;
|
use indexmap::IndexMap;
|
||||||
|
use itertools::Itertools;
|
||||||
use revive_dt_common::types::PlatformIdentifier;
|
use revive_dt_common::types::PlatformIdentifier;
|
||||||
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
|
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
|
||||||
use revive_dt_config::Context;
|
use revive_dt_config::Context;
|
||||||
use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance};
|
use revive_dt_format::{case::CaseIdx, metadata::ContractInstance, steps::StepPath};
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
use serde::Serialize;
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_with::{DisplayFromStr, serde_as};
|
use serde_with::{DisplayFromStr, serde_as};
|
||||||
use tokio::sync::{
|
use tokio::sync::{
|
||||||
broadcast::{Sender, channel},
|
broadcast::{Sender, channel},
|
||||||
@@ -39,7 +41,7 @@ pub struct ReportAggregator {
|
|||||||
impl ReportAggregator {
|
impl ReportAggregator {
|
||||||
pub fn new(context: Context) -> Self {
|
pub fn new(context: Context) -> Self {
|
||||||
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
|
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
|
||||||
let (listener_tx, _) = channel::<ReporterEvent>(1024);
|
let (listener_tx, _) = channel::<ReporterEvent>(0xFFFF);
|
||||||
Self {
|
Self {
|
||||||
report: Report::new(context),
|
report: Report::new(context),
|
||||||
remaining_cases: Default::default(),
|
remaining_cases: Default::default(),
|
||||||
@@ -62,14 +64,11 @@ impl ReportAggregator {
|
|||||||
debug!("Starting to aggregate report");
|
debug!("Starting to aggregate report");
|
||||||
|
|
||||||
while let Some(event) = self.runner_rx.recv().await {
|
while let Some(event) = self.runner_rx.recv().await {
|
||||||
debug!(?event, "Received Event");
|
debug!(event = event.variant_name(), "Received Event");
|
||||||
match event {
|
match event {
|
||||||
RunnerEvent::SubscribeToEvents(event) => {
|
RunnerEvent::SubscribeToEvents(event) => {
|
||||||
self.handle_subscribe_to_events_event(*event);
|
self.handle_subscribe_to_events_event(*event);
|
||||||
}
|
}
|
||||||
RunnerEvent::CorpusFileDiscovery(event) => {
|
|
||||||
self.handle_corpus_file_discovered_event(*event)
|
|
||||||
}
|
|
||||||
RunnerEvent::MetadataFileDiscovery(event) => {
|
RunnerEvent::MetadataFileDiscovery(event) => {
|
||||||
self.handle_metadata_file_discovery_event(*event);
|
self.handle_metadata_file_discovery_event(*event);
|
||||||
}
|
}
|
||||||
@@ -106,12 +105,20 @@ impl ReportAggregator {
|
|||||||
RunnerEvent::ContractDeployed(event) => {
|
RunnerEvent::ContractDeployed(event) => {
|
||||||
self.handle_contract_deployed_event(*event);
|
self.handle_contract_deployed_event(*event);
|
||||||
}
|
}
|
||||||
RunnerEvent::Completion(event) => {
|
RunnerEvent::Completion(_) => {
|
||||||
self.handle_completion(*event);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
/* Benchmarks Events */
|
||||||
|
RunnerEvent::StepTransactionInformation(event) => {
|
||||||
|
self.handle_step_transaction_information(*event)
|
||||||
|
}
|
||||||
|
RunnerEvent::ContractInformation(event) => {
|
||||||
|
self.handle_contract_information(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::BlockMined(event) => self.handle_block_mined(*event),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
self.handle_completion(CompletionEvent {});
|
||||||
debug!("Report aggregation completed");
|
debug!("Report aggregation completed");
|
||||||
|
|
||||||
let file_name = {
|
let file_name = {
|
||||||
@@ -152,10 +159,6 @@ impl ReportAggregator {
|
|||||||
let _ = event.tx.send(self.listener_tx.subscribe());
|
let _ = event.tx.send(self.listener_tx.subscribe());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_corpus_file_discovered_event(&mut self, event: CorpusFileDiscoveryEvent) {
|
|
||||||
self.report.corpora.push(event.corpus);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_metadata_file_discovery_event(&mut self, event: MetadataFileDiscoveryEvent) {
|
fn handle_metadata_file_discovery_event(&mut self, event: MetadataFileDiscoveryEvent) {
|
||||||
self.report.metadata_files.insert(event.path.clone());
|
self.report.metadata_files.insert(event.path.clone());
|
||||||
}
|
}
|
||||||
@@ -234,17 +237,19 @@ impl ReportAggregator {
|
|||||||
|
|
||||||
let case_status = self
|
let case_status = self
|
||||||
.report
|
.report
|
||||||
.test_case_information
|
.execution_information
|
||||||
.entry(specifier.metadata_file_path.clone().into())
|
.entry(specifier.metadata_file_path.clone().into())
|
||||||
.or_default()
|
.or_default()
|
||||||
.entry(specifier.solc_mode.clone())
|
.case_reports
|
||||||
.or_default()
|
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(case_idx, case_report)| {
|
.flat_map(|(case_idx, mode_to_execution_map)| {
|
||||||
(
|
let case_status = mode_to_execution_map
|
||||||
*case_idx,
|
.mode_execution_reports
|
||||||
case_report.status.clone().expect("Can't be uninitialized"),
|
.get(&specifier.solc_mode)?
|
||||||
)
|
.status
|
||||||
|
.clone()
|
||||||
|
.expect("Can't be uninitialized");
|
||||||
|
Some((*case_idx, case_status))
|
||||||
})
|
})
|
||||||
.collect::<BTreeMap<_, _>>();
|
.collect::<BTreeMap<_, _>>();
|
||||||
let event = ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
let event = ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
||||||
@@ -383,22 +388,157 @@ impl ReportAggregator {
|
|||||||
self.execution_information(&event.execution_specifier)
|
self.execution_information(&event.execution_specifier)
|
||||||
.deployed_contracts
|
.deployed_contracts
|
||||||
.get_or_insert_default()
|
.get_or_insert_default()
|
||||||
.insert(event.contract_instance, event.address);
|
.insert(event.contract_instance.clone(), event.address);
|
||||||
|
self.test_case_report(&event.execution_specifier.test_specifier)
|
||||||
|
.contract_addresses
|
||||||
|
.entry(event.contract_instance)
|
||||||
|
.or_default()
|
||||||
|
.entry(event.execution_specifier.platform_identifier)
|
||||||
|
.or_default()
|
||||||
|
.push(event.address);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_completion(&mut self, _: CompletionEvent) {
|
fn handle_completion(&mut self, _: CompletionEvent) {
|
||||||
self.runner_rx.close();
|
self.runner_rx.close();
|
||||||
|
self.handle_metrics_computation();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_case_report(&mut self, specifier: &TestSpecifier) -> &mut TestCaseReport {
|
fn handle_metrics_computation(&mut self) {
|
||||||
|
for report in self.report.execution_information.values_mut() {
|
||||||
|
for report in report.case_reports.values_mut() {
|
||||||
|
for report in report.mode_execution_reports.values_mut() {
|
||||||
|
for (platform_identifier, block_information) in
|
||||||
|
report.mined_block_information.iter_mut()
|
||||||
|
{
|
||||||
|
block_information.sort_by(|a, b| {
|
||||||
|
a.ethereum_block_information
|
||||||
|
.block_number
|
||||||
|
.cmp(&b.ethereum_block_information.block_number)
|
||||||
|
});
|
||||||
|
|
||||||
|
// Computing the TPS.
|
||||||
|
let tps = block_information
|
||||||
|
.iter()
|
||||||
|
.tuple_windows::<(_, _)>()
|
||||||
|
.map(|(block1, block2)| {
|
||||||
|
block2.ethereum_block_information.transaction_hashes.len() as u64
|
||||||
|
/ (block2.ethereum_block_information.block_timestamp
|
||||||
|
- block1.ethereum_block_information.block_timestamp)
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
report
|
||||||
|
.metrics
|
||||||
|
.get_or_insert_default()
|
||||||
|
.transaction_per_second
|
||||||
|
.with_list(*platform_identifier, tps);
|
||||||
|
|
||||||
|
// Computing the GPS.
|
||||||
|
let gps = block_information
|
||||||
|
.iter()
|
||||||
|
.tuple_windows::<(_, _)>()
|
||||||
|
.map(|(block1, block2)| {
|
||||||
|
block2.ethereum_block_information.mined_gas as u64
|
||||||
|
/ (block2.ethereum_block_information.block_timestamp
|
||||||
|
- block1.ethereum_block_information.block_timestamp)
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
report
|
||||||
|
.metrics
|
||||||
|
.get_or_insert_default()
|
||||||
|
.gas_per_second
|
||||||
|
.with_list(*platform_identifier, gps);
|
||||||
|
|
||||||
|
// Computing the gas block fullness
|
||||||
|
let gas_block_fullness = block_information
|
||||||
|
.iter()
|
||||||
|
.map(|block| block.gas_block_fullness_percentage())
|
||||||
|
.map(|v| v as u64)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
report
|
||||||
|
.metrics
|
||||||
|
.get_or_insert_default()
|
||||||
|
.gas_block_fullness
|
||||||
|
.with_list(*platform_identifier, gas_block_fullness);
|
||||||
|
|
||||||
|
// Computing the ref-time block fullness
|
||||||
|
let reftime_block_fullness = block_information
|
||||||
|
.iter()
|
||||||
|
.filter_map(|block| block.ref_time_block_fullness_percentage())
|
||||||
|
.map(|v| v as u64)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
if !reftime_block_fullness.is_empty() {
|
||||||
|
report
|
||||||
|
.metrics
|
||||||
|
.get_or_insert_default()
|
||||||
|
.ref_time_block_fullness
|
||||||
|
.get_or_insert_default()
|
||||||
|
.with_list(*platform_identifier, reftime_block_fullness);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Computing the proof size block fullness
|
||||||
|
let proof_size_block_fullness = block_information
|
||||||
|
.iter()
|
||||||
|
.filter_map(|block| block.proof_size_block_fullness_percentage())
|
||||||
|
.map(|v| v as u64)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
if !proof_size_block_fullness.is_empty() {
|
||||||
|
report
|
||||||
|
.metrics
|
||||||
|
.get_or_insert_default()
|
||||||
|
.proof_size_block_fullness
|
||||||
|
.get_or_insert_default()
|
||||||
|
.with_list(*platform_identifier, proof_size_block_fullness);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_step_transaction_information(&mut self, event: StepTransactionInformationEvent) {
|
||||||
|
self.test_case_report(&event.execution_specifier.test_specifier)
|
||||||
|
.steps
|
||||||
|
.entry(event.step_path)
|
||||||
|
.or_default()
|
||||||
|
.transactions
|
||||||
|
.entry(event.execution_specifier.platform_identifier)
|
||||||
|
.or_default()
|
||||||
|
.push(event.transaction_information);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_contract_information(&mut self, event: ContractInformationEvent) {
|
||||||
|
self.test_case_report(&event.execution_specifier.test_specifier)
|
||||||
|
.compiled_contracts
|
||||||
|
.entry(event.source_code_path)
|
||||||
|
.or_default()
|
||||||
|
.entry(event.contract_name)
|
||||||
|
.or_default()
|
||||||
|
.contract_size
|
||||||
|
.insert(
|
||||||
|
event.execution_specifier.platform_identifier,
|
||||||
|
event.contract_size,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_block_mined(&mut self, event: BlockMinedEvent) {
|
||||||
|
self.test_case_report(&event.execution_specifier.test_specifier)
|
||||||
|
.mined_block_information
|
||||||
|
.entry(event.execution_specifier.platform_identifier)
|
||||||
|
.or_default()
|
||||||
|
.push(event.mined_block_information);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_case_report(&mut self, specifier: &TestSpecifier) -> &mut ExecutionReport {
|
||||||
self.report
|
self.report
|
||||||
.test_case_information
|
.execution_information
|
||||||
.entry(specifier.metadata_file_path.clone().into())
|
.entry(specifier.metadata_file_path.clone().into())
|
||||||
.or_default()
|
.or_default()
|
||||||
.entry(specifier.solc_mode.clone())
|
.case_reports
|
||||||
.or_default()
|
|
||||||
.entry(specifier.case_idx)
|
.entry(specifier.case_idx)
|
||||||
.or_default()
|
.or_default()
|
||||||
|
.mode_execution_reports
|
||||||
|
.entry(specifier.solc_mode.clone())
|
||||||
|
.or_default()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn execution_information(
|
fn execution_information(
|
||||||
@@ -415,43 +555,78 @@ impl ReportAggregator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[serde_as]
|
#[serde_as]
|
||||||
#[derive(Clone, Debug, Serialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct Report {
|
pub struct Report {
|
||||||
/// The context that the tool was started up with.
|
/// The context that the tool was started up with.
|
||||||
pub context: Context,
|
pub context: Context,
|
||||||
/// The list of corpus files that the tool found.
|
|
||||||
pub corpora: Vec<Corpus>,
|
|
||||||
/// The list of metadata files that were found by the tool.
|
/// The list of metadata files that were found by the tool.
|
||||||
pub metadata_files: BTreeSet<MetadataFilePath>,
|
pub metadata_files: BTreeSet<MetadataFilePath>,
|
||||||
|
/// Metrics from the execution.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub metrics: Option<Metrics>,
|
||||||
/// Information relating to each test case.
|
/// Information relating to each test case.
|
||||||
#[serde_as(as = "BTreeMap<_, HashMap<DisplayFromStr, BTreeMap<DisplayFromStr, _>>>")]
|
pub execution_information: BTreeMap<MetadataFilePath, MetadataFileReport>,
|
||||||
pub test_case_information:
|
|
||||||
BTreeMap<MetadataFilePath, HashMap<Mode, BTreeMap<CaseIdx, TestCaseReport>>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Report {
|
impl Report {
|
||||||
pub fn new(context: Context) -> Self {
|
pub fn new(context: Context) -> Self {
|
||||||
Self {
|
Self {
|
||||||
context,
|
context,
|
||||||
corpora: Default::default(),
|
metrics: Default::default(),
|
||||||
metadata_files: Default::default(),
|
metadata_files: Default::default(),
|
||||||
test_case_information: Default::default(),
|
execution_information: Default::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Default)]
|
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
||||||
pub struct TestCaseReport {
|
pub struct MetadataFileReport {
|
||||||
|
/// Metrics from the execution.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub metrics: Option<Metrics>,
|
||||||
|
/// The report of each case keyed by the case idx.
|
||||||
|
pub case_reports: BTreeMap<CaseIdx, CaseReport>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[serde_as]
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
||||||
|
pub struct CaseReport {
|
||||||
|
/// Metrics from the execution.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub metrics: Option<Metrics>,
|
||||||
|
/// The [`ExecutionReport`] for each one of the [`Mode`]s.
|
||||||
|
#[serde_as(as = "HashMap<DisplayFromStr, _>")]
|
||||||
|
pub mode_execution_reports: HashMap<Mode, ExecutionReport>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
||||||
|
pub struct ExecutionReport {
|
||||||
/// Information on the status of the test case and whether it succeeded, failed, or was ignored.
|
/// Information on the status of the test case and whether it succeeded, failed, or was ignored.
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub status: Option<TestCaseStatus>,
|
pub status: Option<TestCaseStatus>,
|
||||||
|
/// Metrics from the execution.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub metrics: Option<Metrics>,
|
||||||
/// Information related to the execution on one of the platforms.
|
/// Information related to the execution on one of the platforms.
|
||||||
pub platform_execution: BTreeMap<PlatformIdentifier, Option<ExecutionInformation>>,
|
#[serde(skip_serializing_if = "BTreeMap::is_empty")]
|
||||||
|
pub platform_execution: PlatformKeyedInformation<Option<ExecutionInformation>>,
|
||||||
|
/// Information on the compiled contracts.
|
||||||
|
#[serde(skip_serializing_if = "BTreeMap::is_empty")]
|
||||||
|
pub compiled_contracts: BTreeMap<PathBuf, BTreeMap<String, ContractInformation>>,
|
||||||
|
/// The addresses of the deployed contracts
|
||||||
|
#[serde(skip_serializing_if = "BTreeMap::is_empty")]
|
||||||
|
pub contract_addresses: BTreeMap<ContractInstance, PlatformKeyedInformation<Vec<Address>>>,
|
||||||
|
/// Information on the mined blocks as part of this execution.
|
||||||
|
#[serde(skip_serializing_if = "BTreeMap::is_empty")]
|
||||||
|
pub mined_block_information: PlatformKeyedInformation<Vec<MinedBlockInformation>>,
|
||||||
|
/// Information tracked for each step that was executed.
|
||||||
|
#[serde(skip_serializing_if = "BTreeMap::is_empty")]
|
||||||
|
pub steps: BTreeMap<StepPath, StepReport>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Information related to the status of the test. Could be that the test succeeded, failed, or that
|
/// Information related to the status of the test. Could be that the test succeeded, failed, or that
|
||||||
/// it was ignored.
|
/// it was ignored.
|
||||||
#[derive(Clone, Debug, Serialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
#[serde(tag = "status")]
|
#[serde(tag = "status")]
|
||||||
pub enum TestCaseStatus {
|
pub enum TestCaseStatus {
|
||||||
/// The test case succeeded.
|
/// The test case succeeded.
|
||||||
@@ -475,7 +650,7 @@ pub enum TestCaseStatus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Information related to the platform node that's being used to execute the step.
|
/// Information related to the platform node that's being used to execute the step.
|
||||||
#[derive(Clone, Debug, Serialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct TestCaseNodeInformation {
|
pub struct TestCaseNodeInformation {
|
||||||
/// The ID of the node that this case is being executed on.
|
/// The ID of the node that this case is being executed on.
|
||||||
pub id: usize,
|
pub id: usize,
|
||||||
@@ -486,7 +661,7 @@ pub struct TestCaseNodeInformation {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Execution information tied to the platform.
|
/// Execution information tied to the platform.
|
||||||
#[derive(Clone, Debug, Default, Serialize)]
|
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||||
pub struct ExecutionInformation {
|
pub struct ExecutionInformation {
|
||||||
/// Information related to the node assigned to this test case.
|
/// Information related to the node assigned to this test case.
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
@@ -506,7 +681,7 @@ pub struct ExecutionInformation {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Information related to compilation
|
/// Information related to compilation
|
||||||
#[derive(Clone, Debug, Serialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
#[serde(tag = "status")]
|
#[serde(tag = "status")]
|
||||||
pub enum CompilationStatus {
|
pub enum CompilationStatus {
|
||||||
/// The compilation was successful.
|
/// The compilation was successful.
|
||||||
@@ -544,3 +719,239 @@ pub enum CompilationStatus {
|
|||||||
compiler_input: Option<CompilerInput>,
|
compiler_input: Option<CompilerInput>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Information on each step in the execution.
|
||||||
|
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||||
|
pub struct StepReport {
|
||||||
|
/// Information on the transactions submitted as part of this step.
|
||||||
|
transactions: PlatformKeyedInformation<Vec<TransactionInformation>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct TransactionInformation {
|
||||||
|
/// The hash of the transaction
|
||||||
|
pub transaction_hash: TxHash,
|
||||||
|
pub submission_timestamp: u64,
|
||||||
|
pub block_timestamp: u64,
|
||||||
|
pub block_number: BlockNumber,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The metrics we collect for our benchmarks.
|
||||||
|
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||||
|
pub struct Metrics {
|
||||||
|
pub transaction_per_second: Metric<u64>,
|
||||||
|
pub gas_per_second: Metric<u64>,
|
||||||
|
/* Block Fullness */
|
||||||
|
pub gas_block_fullness: Metric<u64>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub ref_time_block_fullness: Option<Metric<u64>>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub proof_size_block_fullness: Option<Metric<u64>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The data that we store for a given metric (e.g., TPS).
|
||||||
|
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||||
|
pub struct Metric<T> {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub minimum: Option<PlatformKeyedInformation<T>>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub maximum: Option<PlatformKeyedInformation<T>>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub mean: Option<PlatformKeyedInformation<T>>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub median: Option<PlatformKeyedInformation<T>>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub raw: Option<PlatformKeyedInformation<Vec<T>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Metric<T>
|
||||||
|
where
|
||||||
|
T: Default
|
||||||
|
+ Copy
|
||||||
|
+ Ord
|
||||||
|
+ PartialOrd
|
||||||
|
+ Add<Output = T>
|
||||||
|
+ Div<Output = T>
|
||||||
|
+ TryFrom<usize, Error: std::fmt::Debug>,
|
||||||
|
{
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Default::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn platform_identifiers(&self) -> BTreeSet<PlatformIdentifier> {
|
||||||
|
self.minimum
|
||||||
|
.as_ref()
|
||||||
|
.map(|m| m.keys())
|
||||||
|
.into_iter()
|
||||||
|
.flatten()
|
||||||
|
.chain(
|
||||||
|
self.maximum
|
||||||
|
.as_ref()
|
||||||
|
.map(|m| m.keys())
|
||||||
|
.into_iter()
|
||||||
|
.flatten(),
|
||||||
|
)
|
||||||
|
.chain(self.mean.as_ref().map(|m| m.keys()).into_iter().flatten())
|
||||||
|
.chain(self.median.as_ref().map(|m| m.keys()).into_iter().flatten())
|
||||||
|
.chain(self.raw.as_ref().map(|m| m.keys()).into_iter().flatten())
|
||||||
|
.copied()
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_list(
|
||||||
|
&mut self,
|
||||||
|
platform_identifier: PlatformIdentifier,
|
||||||
|
original_list: Vec<T>,
|
||||||
|
) -> &mut Self {
|
||||||
|
let mut list = original_list.clone();
|
||||||
|
list.sort();
|
||||||
|
let Some(min) = list.first().copied() else {
|
||||||
|
return self;
|
||||||
|
};
|
||||||
|
let Some(max) = list.last().copied() else {
|
||||||
|
return self;
|
||||||
|
};
|
||||||
|
let sum = list.iter().fold(T::default(), |acc, num| acc + *num);
|
||||||
|
let mean = sum / TryInto::<T>::try_into(list.len()).unwrap();
|
||||||
|
|
||||||
|
let median = match list.len().is_multiple_of(2) {
|
||||||
|
true => {
|
||||||
|
let idx = list.len() / 2;
|
||||||
|
let val1 = *list.get(idx - 1).unwrap();
|
||||||
|
let val2 = *list.get(idx).unwrap();
|
||||||
|
(val1 + val2) / TryInto::<T>::try_into(2usize).unwrap()
|
||||||
|
}
|
||||||
|
false => {
|
||||||
|
let idx = list.len() / 2;
|
||||||
|
*list.get(idx).unwrap()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
self.minimum
|
||||||
|
.get_or_insert_default()
|
||||||
|
.insert(platform_identifier, min);
|
||||||
|
self.maximum
|
||||||
|
.get_or_insert_default()
|
||||||
|
.insert(platform_identifier, max);
|
||||||
|
self.mean
|
||||||
|
.get_or_insert_default()
|
||||||
|
.insert(platform_identifier, mean);
|
||||||
|
self.median
|
||||||
|
.get_or_insert_default()
|
||||||
|
.insert(platform_identifier, median);
|
||||||
|
self.raw
|
||||||
|
.get_or_insert_default()
|
||||||
|
.insert(platform_identifier, original_list);
|
||||||
|
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn combine(&self, other: &Self) -> Self {
|
||||||
|
let mut platform_identifiers = self.platform_identifiers();
|
||||||
|
platform_identifiers.extend(other.platform_identifiers());
|
||||||
|
|
||||||
|
let mut this = Self::new();
|
||||||
|
for platform_identifier in platform_identifiers {
|
||||||
|
let mut l1 = self
|
||||||
|
.raw
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|m| m.get(&platform_identifier))
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_default();
|
||||||
|
let l2 = other
|
||||||
|
.raw
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|m| m.get(&platform_identifier))
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_default();
|
||||||
|
l1.extend(l2);
|
||||||
|
this.with_list(platform_identifier, l1);
|
||||||
|
}
|
||||||
|
|
||||||
|
this
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
||||||
|
pub struct ContractInformation {
|
||||||
|
/// The size of the contract on the various platforms.
|
||||||
|
pub contract_size: PlatformKeyedInformation<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
|
pub struct MinedBlockInformation {
|
||||||
|
pub ethereum_block_information: EthereumMinedBlockInformation,
|
||||||
|
pub substrate_block_information: Option<SubstrateMinedBlockInformation>,
|
||||||
|
pub tx_counts: BTreeMap<StepPath, usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MinedBlockInformation {
|
||||||
|
pub fn gas_block_fullness_percentage(&self) -> u8 {
|
||||||
|
self.ethereum_block_information
|
||||||
|
.gas_block_fullness_percentage()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ref_time_block_fullness_percentage(&self) -> Option<u8> {
|
||||||
|
self.substrate_block_information
|
||||||
|
.as_ref()
|
||||||
|
.map(|block| block.ref_time_block_fullness_percentage())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn proof_size_block_fullness_percentage(&self) -> Option<u8> {
|
||||||
|
self.substrate_block_information
|
||||||
|
.as_ref()
|
||||||
|
.map(|block| block.proof_size_block_fullness_percentage())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
|
pub struct EthereumMinedBlockInformation {
|
||||||
|
/// The block number.
|
||||||
|
pub block_number: BlockNumber,
|
||||||
|
|
||||||
|
/// The block timestamp.
|
||||||
|
pub block_timestamp: BlockTimestamp,
|
||||||
|
|
||||||
|
/// The amount of gas mined in the block.
|
||||||
|
pub mined_gas: u128,
|
||||||
|
|
||||||
|
/// The gas limit of the block.
|
||||||
|
pub block_gas_limit: u128,
|
||||||
|
|
||||||
|
/// The hashes of the transactions that were mined as part of the block.
|
||||||
|
pub transaction_hashes: Vec<TxHash>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EthereumMinedBlockInformation {
|
||||||
|
pub fn gas_block_fullness_percentage(&self) -> u8 {
|
||||||
|
(self.mined_gas * 100 / self.block_gas_limit) as u8
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
|
pub struct SubstrateMinedBlockInformation {
|
||||||
|
/// The ref time for substrate based chains.
|
||||||
|
pub ref_time: u128,
|
||||||
|
|
||||||
|
/// The max ref time for substrate based chains.
|
||||||
|
pub max_ref_time: u64,
|
||||||
|
|
||||||
|
/// The proof size for substrate based chains.
|
||||||
|
pub proof_size: u128,
|
||||||
|
|
||||||
|
/// The max proof size for substrate based chains.
|
||||||
|
pub max_proof_size: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SubstrateMinedBlockInformation {
|
||||||
|
pub fn ref_time_block_fullness_percentage(&self) -> u8 {
|
||||||
|
(self.ref_time * 100 / self.max_ref_time as u128) as u8
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn proof_size_block_fullness_percentage(&self) -> u8 {
|
||||||
|
(self.proof_size * 100 / self.max_proof_size as u128) as u8
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Information keyed by the platform identifier.
|
||||||
|
pub type PlatformKeyedInformation<T> = BTreeMap<PlatformIdentifier, T>;
|
||||||
|
|||||||
@@ -8,11 +8,14 @@ use anyhow::Context as _;
|
|||||||
use indexmap::IndexMap;
|
use indexmap::IndexMap;
|
||||||
use revive_dt_common::types::PlatformIdentifier;
|
use revive_dt_common::types::PlatformIdentifier;
|
||||||
use revive_dt_compiler::{CompilerInput, CompilerOutput};
|
use revive_dt_compiler::{CompilerInput, CompilerOutput};
|
||||||
|
use revive_dt_format::metadata::ContractInstance;
|
||||||
use revive_dt_format::metadata::Metadata;
|
use revive_dt_format::metadata::Metadata;
|
||||||
use revive_dt_format::{corpus::Corpus, metadata::ContractInstance};
|
use revive_dt_format::steps::StepPath;
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
use tokio::sync::{broadcast, oneshot};
|
use tokio::sync::{broadcast, oneshot};
|
||||||
|
|
||||||
|
use crate::MinedBlockInformation;
|
||||||
|
use crate::TransactionInformation;
|
||||||
use crate::{ExecutionSpecifier, ReporterEvent, TestSpecifier, common::MetadataFilePath};
|
use crate::{ExecutionSpecifier, ReporterEvent, TestSpecifier, common::MetadataFilePath};
|
||||||
|
|
||||||
macro_rules! __report_gen_emit_test_specific {
|
macro_rules! __report_gen_emit_test_specific {
|
||||||
@@ -344,6 +347,16 @@ macro_rules! define_event {
|
|||||||
),*
|
),*
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl $ident {
|
||||||
|
pub fn variant_name(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
$(
|
||||||
|
Self::$variant_ident { .. } => stringify!($variant_ident)
|
||||||
|
),*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
$(
|
$(
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
$(#[$variant_meta])*
|
$(#[$variant_meta])*
|
||||||
@@ -480,11 +493,6 @@ define_event! {
|
|||||||
/// The channel that the aggregator is to send the receive side of the channel on.
|
/// The channel that the aggregator is to send the receive side of the channel on.
|
||||||
tx: oneshot::Sender<broadcast::Receiver<ReporterEvent>>
|
tx: oneshot::Sender<broadcast::Receiver<ReporterEvent>>
|
||||||
},
|
},
|
||||||
/// An event emitted by runners when they've discovered a corpus file.
|
|
||||||
CorpusFileDiscovery {
|
|
||||||
/// The contents of the corpus file.
|
|
||||||
corpus: Corpus
|
|
||||||
},
|
|
||||||
/// An event emitted by runners when they've discovered a metadata file.
|
/// An event emitted by runners when they've discovered a metadata file.
|
||||||
MetadataFileDiscovery {
|
MetadataFileDiscovery {
|
||||||
/// The path of the metadata file discovered.
|
/// The path of the metadata file discovered.
|
||||||
@@ -614,7 +622,35 @@ define_event! {
|
|||||||
address: Address
|
address: Address
|
||||||
},
|
},
|
||||||
/// Reports the completion of the run.
|
/// Reports the completion of the run.
|
||||||
Completion {}
|
Completion {},
|
||||||
|
|
||||||
|
/* Benchmarks Events */
|
||||||
|
/// An event emitted with information on a transaction that was submitted for a certain step
|
||||||
|
/// of the execution.
|
||||||
|
StepTransactionInformation {
|
||||||
|
/// A specifier for the execution that's taking place.
|
||||||
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
/// The path of the step that this transaction belongs to.
|
||||||
|
step_path: StepPath,
|
||||||
|
/// Information about the transaction
|
||||||
|
transaction_information: TransactionInformation
|
||||||
|
},
|
||||||
|
ContractInformation {
|
||||||
|
/// A specifier for the execution that's taking place.
|
||||||
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
/// The path of the solidity source code that contains the contract.
|
||||||
|
source_code_path: PathBuf,
|
||||||
|
/// The name of the contract
|
||||||
|
contract_name: String,
|
||||||
|
/// The size of the contract
|
||||||
|
contract_size: usize
|
||||||
|
},
|
||||||
|
BlockMined {
|
||||||
|
/// A specifier for the execution that's taking place.
|
||||||
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
/// Information on the mined block,
|
||||||
|
mined_block_information: MinedBlockInformation
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
+1
-1
Submodule polkadot-sdk updated: dc3d0e5ab7...a44be635e6
Submodule
+1
Submodule resolc-compiler-tests added at d06c4d0127
+8
-28
@@ -1,7 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Revive Differential Tests - Quick Start Script
|
# Revive Differential Tests - Quick Start Script
|
||||||
# This script clones the test repository, sets up the corpus file, and runs the tool
|
# This script clones the test repository, and runs the tool
|
||||||
|
|
||||||
set -e # Exit on any error
|
set -e # Exit on any error
|
||||||
|
|
||||||
@@ -14,7 +14,6 @@ NC='\033[0m' # No Color
|
|||||||
# Configuration
|
# Configuration
|
||||||
TEST_REPO_URL="https://github.com/paritytech/resolc-compiler-tests"
|
TEST_REPO_URL="https://github.com/paritytech/resolc-compiler-tests"
|
||||||
TEST_REPO_DIR="resolc-compiler-tests"
|
TEST_REPO_DIR="resolc-compiler-tests"
|
||||||
CORPUS_FILE="./corpus.json"
|
|
||||||
WORKDIR="workdir"
|
WORKDIR="workdir"
|
||||||
|
|
||||||
# Optional positional argument: path to polkadot-sdk directory
|
# Optional positional argument: path to polkadot-sdk directory
|
||||||
@@ -23,7 +22,6 @@ POLKADOT_SDK_DIR="${1:-}"
|
|||||||
# Binary paths (default to names in $PATH)
|
# Binary paths (default to names in $PATH)
|
||||||
REVIVE_DEV_NODE_BIN="revive-dev-node"
|
REVIVE_DEV_NODE_BIN="revive-dev-node"
|
||||||
ETH_RPC_BIN="eth-rpc"
|
ETH_RPC_BIN="eth-rpc"
|
||||||
SUBSTRATE_NODE_BIN="substrate-node"
|
|
||||||
|
|
||||||
echo -e "${GREEN}=== Revive Differential Tests Quick Start ===${NC}"
|
echo -e "${GREEN}=== Revive Differential Tests Quick Start ===${NC}"
|
||||||
echo ""
|
echo ""
|
||||||
@@ -51,14 +49,13 @@ if [ -n "$POLKADOT_SDK_DIR" ]; then
|
|||||||
|
|
||||||
REVIVE_DEV_NODE_BIN="$POLKADOT_SDK_DIR/target/release/revive-dev-node"
|
REVIVE_DEV_NODE_BIN="$POLKADOT_SDK_DIR/target/release/revive-dev-node"
|
||||||
ETH_RPC_BIN="$POLKADOT_SDK_DIR/target/release/eth-rpc"
|
ETH_RPC_BIN="$POLKADOT_SDK_DIR/target/release/eth-rpc"
|
||||||
SUBSTRATE_NODE_BIN="$POLKADOT_SDK_DIR/target/release/substrate-node"
|
|
||||||
|
|
||||||
if [ ! -x "$REVIVE_DEV_NODE_BIN" ] || [ ! -x "$ETH_RPC_BIN" ] || [ ! -x "$SUBSTRATE_NODE_BIN" ]; then
|
if [ ! -x "$REVIVE_DEV_NODE_BIN" ] || [ ! -x "$ETH_RPC_BIN" ]; then
|
||||||
echo -e "${YELLOW}Required binaries not found in release target. Building...${NC}"
|
echo -e "${YELLOW}Required binaries not found in release target. Building...${NC}"
|
||||||
(cd "$POLKADOT_SDK_DIR" && cargo build --release --package staging-node-cli --package pallet-revive-eth-rpc --package revive-dev-node)
|
(cd "$POLKADOT_SDK_DIR" && cargo build --release --package staging-node-cli --package pallet-revive-eth-rpc --package revive-dev-node)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for bin in "$REVIVE_DEV_NODE_BIN" "$ETH_RPC_BIN" "$SUBSTRATE_NODE_BIN"; do
|
for bin in "$REVIVE_DEV_NODE_BIN" "$ETH_RPC_BIN"; do
|
||||||
if [ ! -x "$bin" ]; then
|
if [ ! -x "$bin" ]; then
|
||||||
echo -e "${RED}Expected binary not found after build: $bin${NC}"
|
echo -e "${RED}Expected binary not found after build: $bin${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
@@ -68,23 +65,6 @@ else
|
|||||||
echo -e "${YELLOW}No polkadot-sdk path provided. Using binaries from $PATH.${NC}"
|
echo -e "${YELLOW}No polkadot-sdk path provided. Using binaries from $PATH.${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create corpus file with absolute path resolved at runtime
|
|
||||||
echo -e "${GREEN}Creating corpus file...${NC}"
|
|
||||||
ABSOLUTE_PATH=$(realpath "$TEST_REPO_DIR/fixtures/solidity/")
|
|
||||||
|
|
||||||
cat > "$CORPUS_FILE" << EOF
|
|
||||||
{
|
|
||||||
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
|
|
||||||
"paths": [
|
|
||||||
"$(realpath "$TEST_REPO_DIR/fixtures/solidity/translated_semantic_tests")",
|
|
||||||
"$(realpath "$TEST_REPO_DIR/fixtures/solidity/complex")",
|
|
||||||
"$(realpath "$TEST_REPO_DIR/fixtures/solidity/simple")"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
echo -e "${GREEN}Corpus file created: $CORPUS_FILE${NC}"
|
|
||||||
|
|
||||||
# Create workdir if it doesn't exist
|
# Create workdir if it doesn't exist
|
||||||
mkdir -p "$WORKDIR"
|
mkdir -p "$WORKDIR"
|
||||||
|
|
||||||
@@ -95,13 +75,13 @@ echo ""
|
|||||||
# Run the tool
|
# Run the tool
|
||||||
cargo build --release;
|
cargo build --release;
|
||||||
RUST_LOG="info,alloy_pubsub::service=error" ./target/release/retester test \
|
RUST_LOG="info,alloy_pubsub::service=error" ./target/release/retester test \
|
||||||
--platform geth-evm-solc \
|
--platform revive-dev-node-polkavm-resolc \
|
||||||
--platform revive-dev-node-revm-solc \
|
--test $(realpath "$TEST_REPO_DIR/fixtures/solidity") \
|
||||||
--corpus "$CORPUS_FILE" \
|
|
||||||
--working-directory "$WORKDIR" \
|
--working-directory "$WORKDIR" \
|
||||||
--concurrency.number-of-nodes 5 \
|
--concurrency.number-of-nodes 10 \
|
||||||
|
--concurrency.number-of-threads 5 \
|
||||||
|
--concurrency.number-of-concurrent-tasks 500 \
|
||||||
--wallet.additional-keys 100000 \
|
--wallet.additional-keys 100000 \
|
||||||
--kitchensink.path "$SUBSTRATE_NODE_BIN" \
|
|
||||||
--revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
|
--revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
|
||||||
--eth-rpc.path "$ETH_RPC_BIN" \
|
--eth-rpc.path "$ETH_RPC_BIN" \
|
||||||
> logs.log \
|
> logs.log \
|
||||||
|
|||||||
@@ -0,0 +1,246 @@
|
|||||||
|
"""
|
||||||
|
Utilities to print benchmark metrics from a report JSON into CSV.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python scripts/print_benchmark_metrics_csv.py /absolute/path/to/report.json
|
||||||
|
|
||||||
|
The script prints, for each metadata path, case index, and mode combination,
|
||||||
|
CSV rows aligned to mined blocks with the following columns:
|
||||||
|
- block_number
|
||||||
|
- number_of_txs
|
||||||
|
- tps (transaction_per_second)
|
||||||
|
- gps (gas_per_second)
|
||||||
|
- gas_block_fullness
|
||||||
|
- ref_time (if available)
|
||||||
|
- max_ref_time (if available)
|
||||||
|
- proof_size (if available)
|
||||||
|
- max_proof_size (if available)
|
||||||
|
- ref_time_block_fullness (if available)
|
||||||
|
- proof_size_block_fullness (if available)
|
||||||
|
|
||||||
|
Important nuance: TPS and GPS arrays have (number_of_blocks - 1) items. The
|
||||||
|
first block row has no TPS/GPS; the CSV leaves those cells empty for the first
|
||||||
|
row and aligns subsequent values to their corresponding next block.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
import csv
|
||||||
|
from typing import List, Mapping, TypedDict
|
||||||
|
|
||||||
|
|
||||||
|
class EthereumMinedBlockInformation(TypedDict):
|
||||||
|
"""EVM block information extracted from the report.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
block_number: The block height.
|
||||||
|
block_timestamp: The UNIX timestamp of the block.
|
||||||
|
mined_gas: Total gas used (mined) in the block.
|
||||||
|
block_gas_limit: The gas limit of the block.
|
||||||
|
transaction_hashes: List of transaction hashes included in the block.
|
||||||
|
"""
|
||||||
|
|
||||||
|
block_number: int
|
||||||
|
block_timestamp: int
|
||||||
|
mined_gas: int
|
||||||
|
block_gas_limit: int
|
||||||
|
transaction_hashes: List[str]
|
||||||
|
|
||||||
|
|
||||||
|
class SubstrateMinedBlockInformation(TypedDict):
|
||||||
|
"""Substrate-specific block resource usage fields.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
ref_time: The consumed ref time in the block.
|
||||||
|
max_ref_time: The maximum ref time allowed for the block.
|
||||||
|
proof_size: The consumed proof size in the block.
|
||||||
|
max_proof_size: The maximum proof size allowed for the block.
|
||||||
|
"""
|
||||||
|
|
||||||
|
ref_time: int
|
||||||
|
max_ref_time: int
|
||||||
|
proof_size: int
|
||||||
|
max_proof_size: int
|
||||||
|
|
||||||
|
|
||||||
|
class MinedBlockInformation(TypedDict):
|
||||||
|
"""Block-level information for a mined block with both EVM and optional Substrate fields."""
|
||||||
|
|
||||||
|
ethereum_block_information: EthereumMinedBlockInformation
|
||||||
|
substrate_block_information: SubstrateMinedBlockInformation
|
||||||
|
|
||||||
|
|
||||||
|
class Metric(TypedDict):
|
||||||
|
"""Metric data of integer values keyed by platform identifier.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
minimum: Single scalar minimum per platform.
|
||||||
|
maximum: Single scalar maximum per platform.
|
||||||
|
mean: Single scalar mean per platform.
|
||||||
|
median: Single scalar median per platform.
|
||||||
|
raw: Time-series (or list) of values per platform.
|
||||||
|
"""
|
||||||
|
|
||||||
|
minimum: Mapping[str, int]
|
||||||
|
maximum: Mapping[str, int]
|
||||||
|
mean: Mapping[str, int]
|
||||||
|
median: Mapping[str, int]
|
||||||
|
raw: Mapping[str, List[int]]
|
||||||
|
|
||||||
|
|
||||||
|
class Metrics(TypedDict):
|
||||||
|
"""All metrics that may be present for a given execution report.
|
||||||
|
|
||||||
|
Note that some metrics are optional and present only for specific platforms
|
||||||
|
or execution modes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
transaction_per_second: Metric
|
||||||
|
gas_per_second: Metric
|
||||||
|
gas_block_fullness: Metric
|
||||||
|
ref_time_block_fullness: Metric
|
||||||
|
proof_size_block_fullness: Metric
|
||||||
|
|
||||||
|
|
||||||
|
class ExecutionReport(TypedDict):
|
||||||
|
"""Execution report for a mode containing mined blocks and metrics.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
mined_block_information: Mapping from platform identifier to the list of
|
||||||
|
mined blocks observed for that platform.
|
||||||
|
metrics: The computed metrics for the execution.
|
||||||
|
"""
|
||||||
|
|
||||||
|
mined_block_information: Mapping[str, List[MinedBlockInformation]]
|
||||||
|
metrics: Metrics
|
||||||
|
|
||||||
|
|
||||||
|
class CaseReport(TypedDict):
|
||||||
|
"""Report for a single case, keyed by mode string."""
|
||||||
|
|
||||||
|
mode_execution_reports: Mapping[str, ExecutionReport]
|
||||||
|
|
||||||
|
|
||||||
|
class MetadataFileReport(TypedDict):
|
||||||
|
"""Report subtree keyed by case indices for a metadata file path."""
|
||||||
|
|
||||||
|
case_reports: Mapping[str, CaseReport]
|
||||||
|
|
||||||
|
|
||||||
|
class ReportRoot(TypedDict):
|
||||||
|
"""Top-level report schema with execution information keyed by metadata path."""
|
||||||
|
|
||||||
|
execution_information: Mapping[str, MetadataFileReport]
|
||||||
|
|
||||||
|
|
||||||
|
BlockInformation = TypedDict(
|
||||||
|
"BlockInformation",
|
||||||
|
{
|
||||||
|
"Block Number": int,
|
||||||
|
"Timestamp": int,
|
||||||
|
"Datetime": None,
|
||||||
|
"Transaction Count": int,
|
||||||
|
"TPS": int | None,
|
||||||
|
"GPS": int | None,
|
||||||
|
"Ref Time": int,
|
||||||
|
"Max Ref Time": int,
|
||||||
|
"Block Fullness Ref Time": int,
|
||||||
|
"Proof Size": int,
|
||||||
|
"Max Proof Size": int,
|
||||||
|
"Block Fullness Proof Size": int,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
"""A typed dictionary used to hold all of the block information"""
|
||||||
|
|
||||||
|
|
||||||
|
def load_report(path: str) -> ReportRoot:
|
||||||
|
"""Load the report JSON from disk.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path: Absolute or relative filesystem path to the JSON report file.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The parsed report as a typed dictionary structure.
|
||||||
|
"""
|
||||||
|
|
||||||
|
with open(path, "r", encoding="utf-8") as f:
|
||||||
|
data: ReportRoot = json.load(f)
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
report_path: str = sys.argv[1]
|
||||||
|
report: ReportRoot = load_report(report_path)
|
||||||
|
|
||||||
|
# TODO: Remove this in the future, but for now, the target is fixed.
|
||||||
|
target: str = "revive-dev-node-revm-solc"
|
||||||
|
|
||||||
|
csv_writer = csv.writer(sys.stdout)
|
||||||
|
|
||||||
|
for _, metadata_file_report in report["execution_information"].items():
|
||||||
|
for _, case_report in metadata_file_report["case_reports"].items():
|
||||||
|
for _, execution_report in case_report["mode_execution_reports"].items():
|
||||||
|
blocks_information: list[MinedBlockInformation] = execution_report[
|
||||||
|
"mined_block_information"
|
||||||
|
][target]
|
||||||
|
|
||||||
|
resolved_blocks: list[BlockInformation] = []
|
||||||
|
for i, block_information in enumerate(blocks_information):
|
||||||
|
resolved_blocks.append(
|
||||||
|
{
|
||||||
|
"Block Number": block_information[
|
||||||
|
"ethereum_block_information"
|
||||||
|
]["block_number"],
|
||||||
|
"Timestamp": block_information[
|
||||||
|
"ethereum_block_information"
|
||||||
|
]["block_timestamp"],
|
||||||
|
"Datetime": None,
|
||||||
|
"Transaction Count": len(
|
||||||
|
block_information["ethereum_block_information"][
|
||||||
|
"transaction_hashes"
|
||||||
|
]
|
||||||
|
),
|
||||||
|
"TPS": (
|
||||||
|
None
|
||||||
|
if i == 0
|
||||||
|
else execution_report["metrics"][
|
||||||
|
"transaction_per_second"
|
||||||
|
]["raw"][target][i - 1]
|
||||||
|
),
|
||||||
|
"GPS": (
|
||||||
|
None
|
||||||
|
if i == 0
|
||||||
|
else execution_report["metrics"]["gas_per_second"][
|
||||||
|
"raw"
|
||||||
|
][target][i - 1]
|
||||||
|
),
|
||||||
|
"Ref Time": block_information[
|
||||||
|
"substrate_block_information"
|
||||||
|
]["ref_time"],
|
||||||
|
"Max Ref Time": block_information[
|
||||||
|
"substrate_block_information"
|
||||||
|
]["max_ref_time"],
|
||||||
|
"Block Fullness Ref Time": execution_report["metrics"][
|
||||||
|
"ref_time_block_fullness"
|
||||||
|
]["raw"][target][i],
|
||||||
|
"Proof Size": block_information[
|
||||||
|
"substrate_block_information"
|
||||||
|
]["proof_size"],
|
||||||
|
"Max Proof Size": block_information[
|
||||||
|
"substrate_block_information"
|
||||||
|
]["max_proof_size"],
|
||||||
|
"Block Fullness Proof Size": execution_report["metrics"][
|
||||||
|
"proof_size_block_fullness"
|
||||||
|
]["raw"][target][i],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
csv_writer = csv.DictWriter(sys.stdout, resolved_blocks[0].keys())
|
||||||
|
csv_writer.writeheader()
|
||||||
|
csv_writer.writerows(resolved_blocks)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -0,0 +1,226 @@
|
|||||||
|
"""
|
||||||
|
This script is used to turn the JSON report produced by the revive differential tests tool into an
|
||||||
|
easy to consume markdown document for the purpose of reporting this information in the Polkadot SDK
|
||||||
|
CI. The full models used in the JSON report can be found in the revive differential tests repo and
|
||||||
|
the models used in this script are just a partial reproduction of the full report models.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import TypedDict, Literal, Union
|
||||||
|
|
||||||
|
import json, io
|
||||||
|
|
||||||
|
|
||||||
|
class Report(TypedDict):
|
||||||
|
context: "Context"
|
||||||
|
execution_information: dict[
|
||||||
|
"MetadataFilePathString",
|
||||||
|
dict["ModeString", dict["CaseIdxString", "CaseReport"]],
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class Context(TypedDict):
|
||||||
|
Test: "TestContext"
|
||||||
|
|
||||||
|
|
||||||
|
class TestContext(TypedDict):
|
||||||
|
corpus_configuration: "CorpusConfiguration"
|
||||||
|
|
||||||
|
|
||||||
|
class CorpusConfiguration(TypedDict):
|
||||||
|
test_specifiers: list["TestSpecifier"]
|
||||||
|
|
||||||
|
|
||||||
|
class CaseReport(TypedDict):
|
||||||
|
status: "CaseStatus"
|
||||||
|
|
||||||
|
|
||||||
|
class CaseStatusSuccess(TypedDict):
|
||||||
|
status: Literal["Succeeded"]
|
||||||
|
steps_executed: int
|
||||||
|
|
||||||
|
|
||||||
|
class CaseStatusFailure(TypedDict):
|
||||||
|
status: Literal["Failed"]
|
||||||
|
reason: str
|
||||||
|
|
||||||
|
|
||||||
|
class CaseStatusIgnored(TypedDict):
|
||||||
|
status: Literal["Ignored"]
|
||||||
|
reason: str
|
||||||
|
|
||||||
|
|
||||||
|
CaseStatus = Union[CaseStatusSuccess, CaseStatusFailure, CaseStatusIgnored]
|
||||||
|
"""A union type of all of the possible statuses that could be reported for a case."""
|
||||||
|
|
||||||
|
TestSpecifier = str
|
||||||
|
"""A test specifier string. For example resolc-compiler-tests/fixtures/solidity/test.json::0::Y+"""
|
||||||
|
|
||||||
|
ModeString = str
|
||||||
|
"""The mode string. For example Y+ >=0.8.13"""
|
||||||
|
|
||||||
|
MetadataFilePathString = str
|
||||||
|
"""The path to a metadata file. For example resolc-compiler-tests/fixtures/solidity/test.json"""
|
||||||
|
|
||||||
|
CaseIdxString = str
|
||||||
|
"""The index of a case as a string. For example '0'"""
|
||||||
|
|
||||||
|
|
||||||
|
def path_relative_to_resolc_compiler_test_directory(path: str) -> str:
|
||||||
|
"""
|
||||||
|
Given a path, this function returns the path relative to the resolc-compiler-test directory. The
|
||||||
|
following is an example of an input and an output:
|
||||||
|
|
||||||
|
Input: ~/polkadot-sdk/revive-differential-tests/resolc-compiler-tests/fixtures/solidity/test.json
|
||||||
|
Output: test.json
|
||||||
|
"""
|
||||||
|
|
||||||
|
return f"{path.split('resolc-compiler-tests/fixtures/solidity')[-1].strip('/')}"
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
with open("report.json", "r") as file:
|
||||||
|
report: Report = json.load(file)
|
||||||
|
|
||||||
|
# Starting the markdown document and adding information to it as we go.
|
||||||
|
markdown_document: io.TextIOWrapper = open("report.md", "w")
|
||||||
|
print("# Differential Tests Results", file=markdown_document)
|
||||||
|
|
||||||
|
# Getting all of the test specifiers from the report and making them relative to the tests dir.
|
||||||
|
test_specifiers: list[str] = list(
|
||||||
|
map(
|
||||||
|
path_relative_to_resolc_compiler_test_directory,
|
||||||
|
report["context"]["Test"]["corpus_configuration"]["test_specifiers"],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
print("## Specified Tests", file=markdown_document)
|
||||||
|
for test_specifier in test_specifiers:
|
||||||
|
print(f"* `{test_specifier}`", file=markdown_document)
|
||||||
|
|
||||||
|
# Counting the total number of test cases, successes, failures, and ignored tests
|
||||||
|
total_number_of_cases: int = 0
|
||||||
|
total_number_of_successes: int = 0
|
||||||
|
total_number_of_failures: int = 0
|
||||||
|
total_number_of_ignores: int = 0
|
||||||
|
for _, mode_to_case_mapping in report["execution_information"].items():
|
||||||
|
for _, case_idx_to_report_mapping in mode_to_case_mapping.items():
|
||||||
|
for _, case_report in case_idx_to_report_mapping.items():
|
||||||
|
status: CaseStatus = case_report["status"]
|
||||||
|
|
||||||
|
total_number_of_cases += 1
|
||||||
|
if status["status"] == "Succeeded":
|
||||||
|
total_number_of_successes += 1
|
||||||
|
elif status["status"] == "Failed":
|
||||||
|
total_number_of_failures += 1
|
||||||
|
elif status["status"] == "Ignored":
|
||||||
|
total_number_of_ignores += 1
|
||||||
|
else:
|
||||||
|
raise Exception(
|
||||||
|
f"Encountered a status that's unknown to the script: {status}"
|
||||||
|
)
|
||||||
|
|
||||||
|
print("## Counts", file=markdown_document)
|
||||||
|
print(
|
||||||
|
f"* **Total Number of Test Cases:** {total_number_of_cases}",
|
||||||
|
file=markdown_document,
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f"* **Total Number of Successes:** {total_number_of_successes}",
|
||||||
|
file=markdown_document,
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f"* **Total Number of Failures:** {total_number_of_failures}",
|
||||||
|
file=markdown_document,
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f"* **Total Number of Ignores:** {total_number_of_ignores}",
|
||||||
|
file=markdown_document,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Grouping the various test cases into dictionaries and groups depending on their status to make
|
||||||
|
# them easier to include in the markdown document later on.
|
||||||
|
successful_cases: dict[
|
||||||
|
MetadataFilePathString, dict[CaseIdxString, set[ModeString]]
|
||||||
|
] = {}
|
||||||
|
for metadata_file_path, mode_to_case_mapping in report[
|
||||||
|
"execution_information"
|
||||||
|
].items():
|
||||||
|
for mode_string, case_idx_to_report_mapping in mode_to_case_mapping.items():
|
||||||
|
for case_idx_string, case_report in case_idx_to_report_mapping.items():
|
||||||
|
status: CaseStatus = case_report["status"]
|
||||||
|
metadata_file_path: str = (
|
||||||
|
path_relative_to_resolc_compiler_test_directory(metadata_file_path)
|
||||||
|
)
|
||||||
|
mode_string: str = mode_string.replace(" M3", "+").replace(" M0", "-")
|
||||||
|
|
||||||
|
if status["status"] == "Succeeded":
|
||||||
|
successful_cases.setdefault(
|
||||||
|
metadata_file_path,
|
||||||
|
{},
|
||||||
|
).setdefault(
|
||||||
|
case_idx_string, set()
|
||||||
|
).add(mode_string)
|
||||||
|
|
||||||
|
print("## Failures", file=markdown_document)
|
||||||
|
print(
|
||||||
|
"The test specifiers seen in this section have the format 'path::case_idx::compilation_mode'\
|
||||||
|
and they're compatible with the revive differential tests framework and can be specified\
|
||||||
|
to it directly in the same way that they're provided through the `--test` argument of the\
|
||||||
|
framework.\n",
|
||||||
|
file=markdown_document,
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"The failures are provided in an expandable section to ensure that the PR does not get \
|
||||||
|
polluted with information. Please click on the section below for more information",
|
||||||
|
file=markdown_document,
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"<details><summary>Detailed Differential Tests Failure Information</summary>\n\n",
|
||||||
|
file=markdown_document,
|
||||||
|
)
|
||||||
|
print("| Test Specifier | Failure Reason | Note |", file=markdown_document)
|
||||||
|
print("| -- | -- | -- |", file=markdown_document)
|
||||||
|
|
||||||
|
for metadata_file_path, mode_to_case_mapping in report[
|
||||||
|
"execution_information"
|
||||||
|
].items():
|
||||||
|
for mode_string, case_idx_to_report_mapping in mode_to_case_mapping.items():
|
||||||
|
for case_idx_string, case_report in case_idx_to_report_mapping.items():
|
||||||
|
status: CaseStatus = case_report["status"]
|
||||||
|
metadata_file_path: str = (
|
||||||
|
path_relative_to_resolc_compiler_test_directory(metadata_file_path)
|
||||||
|
)
|
||||||
|
mode_string: str = mode_string.replace(" M3", "+").replace(" M0", "-")
|
||||||
|
|
||||||
|
if status["status"] != "Failed":
|
||||||
|
continue
|
||||||
|
|
||||||
|
failure_reason: str = status["reason"].replace("\n", " ")
|
||||||
|
|
||||||
|
note: str = ""
|
||||||
|
modes_where_this_case_succeeded: set[ModeString] = (
|
||||||
|
successful_cases.setdefault(
|
||||||
|
metadata_file_path,
|
||||||
|
{},
|
||||||
|
).setdefault(case_idx_string, set())
|
||||||
|
)
|
||||||
|
if len(modes_where_this_case_succeeded) != 0:
|
||||||
|
note: str = (
|
||||||
|
f"This test case succeeded with other compilation modes: {modes_where_this_case_succeeded}"
|
||||||
|
)
|
||||||
|
|
||||||
|
test_specifier: str = (
|
||||||
|
f"{metadata_file_path}::{case_idx_string}::{mode_string}"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f"| `{test_specifier}` | `{failure_reason}` | {note} |",
|
||||||
|
file=markdown_document,
|
||||||
|
)
|
||||||
|
print("\n\n</details>", file=markdown_document)
|
||||||
|
|
||||||
|
# The primary downside of not using `with`, but I guess it's better since I don't want to over
|
||||||
|
# indent the code.
|
||||||
|
markdown_document.close()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
Reference in New Issue
Block a user