mirror of
https://github.com/pezkuwichain/revive-differential-tests.git
synced 2026-04-22 10:17:56 +00:00
Compare commits
17 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 6b8fec2a12 | |||
| 73fce7a250 | |||
| fde07b7c0d | |||
| ebc24a588b | |||
| 21e25f09e6 | |||
| 8c412dc924 | |||
| 6da3172581 | |||
| c6eb04b04e | |||
| e5114d31dc | |||
| 74fdeb4a2e | |||
| f9dc362c03 | |||
| c2ba2cfed6 | |||
| 3dda739cef | |||
| 97e3f8bbff | |||
| 7189361a58 | |||
| 9b700bfec2 | |||
| 98b62d705f |
@@ -15,6 +15,7 @@ concurrency:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
POLKADOT_VERSION: polkadot-stable2506-2
|
||||
|
||||
jobs:
|
||||
cache-polkadot:
|
||||
@@ -65,6 +66,37 @@ jobs:
|
||||
run: |
|
||||
cd polkadot-sdk
|
||||
cargo install --path substrate/frame/revive/rpc --bin eth-rpc
|
||||
|
||||
- name: Cache downloaded Polkadot binaries
|
||||
id: cache-polkadot
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/polkadot-cache/polkadot
|
||||
~/polkadot-cache/polkadot-execute-worker
|
||||
~/polkadot-cache/polkadot-prepare-worker
|
||||
~/polkadot-cache/polkadot-parachain
|
||||
key: polkadot-downloaded-${{ matrix.os }}-${{ env.POLKADOT_VERSION }}
|
||||
|
||||
- name: Download Polkadot binaries on macOS
|
||||
if: matrix.os == 'macos-14' && steps.cache-polkadot.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
mkdir -p ~/polkadot-cache
|
||||
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-aarch64-apple-darwin -o ~/polkadot-cache/polkadot
|
||||
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-execute-worker-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-execute-worker
|
||||
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-prepare-worker-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-prepare-worker
|
||||
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-parachain-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-parachain
|
||||
chmod +x ~/polkadot-cache/*
|
||||
|
||||
- name: Download Polkadot binaries on Ubuntu
|
||||
if: matrix.os == 'ubuntu-24.04' && steps.cache-polkadot.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
mkdir -p ~/polkadot-cache
|
||||
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot -o ~/polkadot-cache/polkadot
|
||||
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-execute-worker -o ~/polkadot-cache/polkadot-execute-worker
|
||||
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-prepare-worker -o ~/polkadot-cache/polkadot-prepare-worker
|
||||
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-parachain -o ~/polkadot-cache/polkadot-parachain
|
||||
chmod +x ~/polkadot-cache/*
|
||||
|
||||
ci:
|
||||
name: CI on ${{ matrix.os }}
|
||||
@@ -86,15 +118,33 @@ jobs:
|
||||
~/.cargo/bin/eth-rpc
|
||||
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}
|
||||
|
||||
- name: Restore downloaded Polkadot binaries from cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/polkadot-cache/polkadot
|
||||
~/polkadot-cache/polkadot-execute-worker
|
||||
~/polkadot-cache/polkadot-prepare-worker
|
||||
~/polkadot-cache/polkadot-parachain
|
||||
key: polkadot-downloaded-${{ matrix.os }}-${{ env.POLKADOT_VERSION }}
|
||||
|
||||
- name: Install Polkadot binaries
|
||||
run: |
|
||||
sudo cp ~/polkadot-cache/polkadot /usr/local/bin/
|
||||
sudo cp ~/polkadot-cache/polkadot-execute-worker /usr/local/bin/
|
||||
sudo cp ~/polkadot-cache/polkadot-prepare-worker /usr/local/bin/
|
||||
sudo cp ~/polkadot-cache/polkadot-parachain /usr/local/bin/
|
||||
sudo chmod +x /usr/local/bin/polkadot*
|
||||
|
||||
- name: Setup Rust toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
rustflags: ""
|
||||
|
||||
- name: Add wasm32 target
|
||||
- name: Add wasm32 target and formatting
|
||||
run: |
|
||||
rustup target add wasm32-unknown-unknown
|
||||
rustup component add rust-src
|
||||
rustup component add rust-src rustfmt clippy
|
||||
|
||||
- name: Install Geth on Ubuntu
|
||||
if: matrix.os == 'ubuntu-24.04'
|
||||
@@ -141,6 +191,17 @@ jobs:
|
||||
chmod +x resolc
|
||||
sudo mv resolc /usr/local/bin
|
||||
|
||||
- name: Install Kurtosis on macOS
|
||||
if: matrix.os == 'macos-14'
|
||||
run: brew install kurtosis-tech/tap/kurtosis-cli
|
||||
|
||||
- name: Install Kurtosis on Ubuntu
|
||||
if: matrix.os == 'ubuntu-24.04'
|
||||
run: |
|
||||
echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list
|
||||
sudo apt update
|
||||
sudo apt install kurtosis-cli
|
||||
|
||||
- name: Machete
|
||||
uses: bnjbvr/cargo-machete@v0.7.1
|
||||
|
||||
@@ -159,5 +220,34 @@ jobs:
|
||||
- name: Check resolc version
|
||||
run: resolc --version
|
||||
|
||||
- name: Test cargo workspace
|
||||
run: make test
|
||||
- name: Check polkadot version
|
||||
run: polkadot --version
|
||||
|
||||
- name: Check polkadot-parachain version
|
||||
run: polkadot-parachain --version
|
||||
|
||||
- name: Check polkadot-execute-worker version
|
||||
run: polkadot-execute-worker --version
|
||||
|
||||
- name: Check polkadot-prepare-worker version
|
||||
run: polkadot-prepare-worker --version
|
||||
|
||||
- name: Test Formatting
|
||||
run: make format
|
||||
|
||||
- name: Test Clippy
|
||||
run: make clippy
|
||||
|
||||
- name: Test Machete
|
||||
run: make machete
|
||||
|
||||
- name: Unit Tests
|
||||
if: matrix.os == 'ubuntu-24.04'
|
||||
run: cargo test --workspace -- --nocapture
|
||||
|
||||
# We can't install docker in the MacOS image used in CI and therefore we need to skip the
|
||||
# Kurtosis and lighthouse related tests when running the CI on MacOS.
|
||||
- name: Unit Tests
|
||||
if: matrix.os == 'macos-14'
|
||||
run: |
|
||||
cargo test --workspace -- --nocapture --skip lighthouse_geth::tests::
|
||||
|
||||
+2
-1
@@ -12,4 +12,5 @@ profile.json.gz
|
||||
resolc-compiler-tests
|
||||
workdir
|
||||
|
||||
!/schema.json
|
||||
!/schema.json
|
||||
!/dev-genesis.json
|
||||
Generated
+2410
-113
File diff suppressed because it is too large
Load Diff
+11
-4
@@ -22,9 +22,9 @@ revive-dt-node-pool = { version = "0.1.0", path = "crates/node-pool" }
|
||||
revive-dt-report = { version = "0.1.0", path = "crates/report" }
|
||||
revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
|
||||
|
||||
alloy-primitives = "1.2.1"
|
||||
alloy-sol-types = "1.2.1"
|
||||
ansi_term = "0.12.1"
|
||||
anyhow = "1.0"
|
||||
async-stream = { version = "0.3.6" }
|
||||
bson = { version = "2.15.0" }
|
||||
cacache = { version = "13.1.0" }
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
@@ -45,7 +45,8 @@ serde_json = { version = "1.0", default-features = false, features = [
|
||||
"std",
|
||||
"unbounded_depth",
|
||||
] }
|
||||
serde_with = { version = "3.14.0" }
|
||||
serde_with = { version = "3.14.0", features = ["hex"] }
|
||||
serde_yaml_ng = { version = "0.10.0" }
|
||||
sha2 = { version = "0.10.9" }
|
||||
sp-core = "36.1.0"
|
||||
sp-runtime = "41.1.0"
|
||||
@@ -58,6 +59,7 @@ tokio = { version = "1.47.0", default-features = false, features = [
|
||||
"process",
|
||||
"rt",
|
||||
] }
|
||||
tower = { version = "0.5.2", features = ["limit"] }
|
||||
uuid = { version = "1.8", features = ["v4"] }
|
||||
tracing = { version = "0.1.41" }
|
||||
tracing-appender = { version = "0.2.3" }
|
||||
@@ -73,13 +75,17 @@ revive-solc-json-interface = { git = "https://github.com/paritytech/revive", rev
|
||||
revive-common = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
||||
revive-differential = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
||||
|
||||
zombienet-sdk = { git = "https://github.com/paritytech/zombienet-sdk.git", rev = "891f6554354ce466abd496366dbf8b4f82141241" }
|
||||
|
||||
[workspace.dependencies.alloy]
|
||||
version = "1.0.22"
|
||||
version = "1.0.37"
|
||||
default-features = false
|
||||
features = [
|
||||
"json-abi",
|
||||
"providers",
|
||||
"provider-ws",
|
||||
"provider-ipc",
|
||||
"provider-http",
|
||||
"provider-debug-api",
|
||||
"reqwest",
|
||||
"rpc-types",
|
||||
@@ -89,6 +95,7 @@ features = [
|
||||
"serde",
|
||||
"rpc-types-eth",
|
||||
"genesis",
|
||||
"sol-types",
|
||||
]
|
||||
|
||||
[profile.bench]
|
||||
|
||||
@@ -44,6 +44,7 @@ This section describes the required dependencies that this framework requires to
|
||||
- ETH-RPC - All communication with Kitchensink is done through the ETH RPC.
|
||||
- Solc - This is actually a transitive dependency, while this tool doesn't require solc as it downloads the versions that it requires, resolc requires that Solc is installed and available in the path.
|
||||
- Resolc - This is required to compile the contracts to PolkaVM bytecode.
|
||||
- Kurtosis - The Kurtosis CLI tool is required for the production Ethereum mainnet-like node configuration with Geth as the execution layer and lighthouse as the consensus layer. Kurtosis also requires docker to be installed since it runs everything inside of docker containers.
|
||||
|
||||
All of the above need to be installed and available in the path in order for the tool to work.
|
||||
|
||||
@@ -52,122 +53,152 @@ All of the above need to be installed and available in the path in order for the
|
||||
This tool is being updated quite frequently. Therefore, it's recommended that you don't install the tool and then run it, but rather that you run it from the root of the directory using `cargo run --release`. The help command of the tool gives you all of the information you need to know about each of the options and flags that the tool offers.
|
||||
|
||||
```bash
|
||||
$ cargo run --release -- --help
|
||||
Usage: retester [OPTIONS]
|
||||
$ cargo run --release -- execute-tests --help
|
||||
Error: Executes tests in the MatterLabs format differentially on multiple targets concurrently
|
||||
|
||||
Usage: retester execute-tests [OPTIONS]
|
||||
|
||||
Options:
|
||||
-s, --solc <SOLC>
|
||||
The `solc` version to use if the test didn't specify it explicitly
|
||||
-w, --working-directory <WORKING_DIRECTORY>
|
||||
The working directory that the program will use for all of the temporary artifacts needed at runtime.
|
||||
|
||||
[default: 0.8.29]
|
||||
If not specified, then a temporary directory will be created and used by the program for all temporary artifacts.
|
||||
|
||||
--wasm
|
||||
Use the Wasm compiler versions
|
||||
[default: ]
|
||||
|
||||
-r, --resolc <RESOLC>
|
||||
The path to the `resolc` executable to be tested.
|
||||
-p, --platform <PLATFORMS>
|
||||
The set of platforms that the differential tests should run on
|
||||
|
||||
By default it uses the `resolc` binary found in `$PATH`.
|
||||
[default: geth-evm-solc,revive-dev-node-polkavm-resolc]
|
||||
|
||||
If `--wasm` is set, this should point to the resolc Wasm ile.
|
||||
|
||||
[default: resolc]
|
||||
Possible values:
|
||||
- geth-evm-solc: The Go-ethereum reference full node EVM implementation with the solc compiler
|
||||
- kitchensink-polkavm-resolc: The kitchensink node with the PolkaVM backend with the resolc compiler
|
||||
- kitchensink-revm-solc: The kitchensink node with the REVM backend with the solc compiler
|
||||
- revive-dev-node-polkavm-resolc: The revive dev node with the PolkaVM backend with the resolc compiler
|
||||
- revive-dev-node-revm-solc: The revive dev node with the REVM backend with the solc compiler
|
||||
|
||||
-c, --corpus <CORPUS>
|
||||
A list of test corpus JSON files to be tested
|
||||
|
||||
-w, --workdir <WORKING_DIRECTORY>
|
||||
A place to store temporary artifacts during test execution.
|
||||
-h, --help
|
||||
Print help (see a summary with '-h')
|
||||
|
||||
Creates a temporary dir if not specified.
|
||||
Solc Configuration:
|
||||
--solc.version <VERSION>
|
||||
Specifies the default version of the Solc compiler that should be used if there is no override specified by one of the test cases
|
||||
|
||||
-g, --geth <GETH>
|
||||
The path to the `geth` executable.
|
||||
[default: 0.8.29]
|
||||
|
||||
By default it uses `geth` binary found in `$PATH`.
|
||||
Resolc Configuration:
|
||||
--resolc.path <resolc.path>
|
||||
Specifies the path of the resolc compiler to be used by the tool.
|
||||
|
||||
If this is not specified, then the tool assumes that it should use the resolc binary that's provided in the user's $PATH.
|
||||
|
||||
[default: resolc]
|
||||
|
||||
Geth Configuration:
|
||||
--geth.path <geth.path>
|
||||
Specifies the path of the geth node to be used by the tool.
|
||||
|
||||
If this is not specified, then the tool assumes that it should use the geth binary that's provided in the user's $PATH.
|
||||
|
||||
[default: geth]
|
||||
|
||||
--geth-start-timeout <GETH_START_TIMEOUT>
|
||||
The maximum time in milliseconds to wait for geth to start
|
||||
--geth.start-timeout-ms <geth.start-timeout-ms>
|
||||
The amount of time to wait upon startup before considering that the node timed out
|
||||
|
||||
[default: 5000]
|
||||
|
||||
--genesis <GENESIS_FILE>
|
||||
Configure nodes according to this genesis.json file
|
||||
Kitchensink Configuration:
|
||||
--kitchensink.path <kitchensink.path>
|
||||
Specifies the path of the kitchensink node to be used by the tool.
|
||||
|
||||
[default: genesis.json]
|
||||
If this is not specified, then the tool assumes that it should use the kitchensink binary that's provided in the user's $PATH.
|
||||
|
||||
-a, --account <ACCOUNT>
|
||||
The signing account private key
|
||||
[default: substrate-node]
|
||||
|
||||
--kitchensink.start-timeout-ms <kitchensink.start-timeout-ms>
|
||||
The amount of time to wait upon startup before considering that the node timed out
|
||||
|
||||
[default: 5000]
|
||||
|
||||
--kitchensink.dont-use-dev-node
|
||||
This configures the tool to use Kitchensink instead of using the revive-dev-node
|
||||
|
||||
Revive Dev Node Configuration:
|
||||
--revive-dev-node.path <revive-dev-node.path>
|
||||
Specifies the path of the revive dev node to be used by the tool.
|
||||
|
||||
If this is not specified, then the tool assumes that it should use the revive dev node binary that's provided in the user's $PATH.
|
||||
|
||||
[default: revive-dev-node]
|
||||
|
||||
--revive-dev-node.start-timeout-ms <revive-dev-node.start-timeout-ms>
|
||||
The amount of time to wait upon startup before considering that the node timed out
|
||||
|
||||
[default: 5000]
|
||||
|
||||
Eth RPC Configuration:
|
||||
--eth-rpc.path <eth-rpc.path>
|
||||
Specifies the path of the ETH RPC to be used by the tool.
|
||||
|
||||
If this is not specified, then the tool assumes that it should use the ETH RPC binary that's provided in the user's $PATH.
|
||||
|
||||
[default: eth-rpc]
|
||||
|
||||
--eth-rpc.start-timeout-ms <eth-rpc.start-timeout-ms>
|
||||
The amount of time to wait upon startup before considering that the node timed out
|
||||
|
||||
[default: 5000]
|
||||
|
||||
Genesis Configuration:
|
||||
--genesis.path <genesis.path>
|
||||
Specifies the path of the genesis file to use for the nodes that are started.
|
||||
|
||||
This is expected to be the path of a JSON geth genesis file.
|
||||
|
||||
Wallet Configuration:
|
||||
--wallet.default-private-key <DEFAULT_KEY>
|
||||
The private key of the default signer
|
||||
|
||||
[default: 0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d]
|
||||
|
||||
--private-keys-count <PRIVATE_KEYS_TO_ADD>
|
||||
--wallet.additional-keys <ADDITIONAL_KEYS>
|
||||
This argument controls which private keys the nodes should have access to and be added to its wallet signers. With a value of N, private keys (0, N] will be added to the signer set of the node
|
||||
|
||||
[default: 100000]
|
||||
|
||||
-l, --leader <LEADER>
|
||||
The differential testing leader node implementation
|
||||
|
||||
[default: geth]
|
||||
|
||||
Possible values:
|
||||
- geth: The go-ethereum reference full node EVM implementation
|
||||
- kitchensink: The kitchensink runtime provides the PolkaVM (PVM) based node implentation
|
||||
|
||||
-f, --follower <FOLLOWER>
|
||||
The differential testing follower node implementation
|
||||
|
||||
[default: kitchensink]
|
||||
|
||||
Possible values:
|
||||
- geth: The go-ethereum reference full node EVM implementation
|
||||
- kitchensink: The kitchensink runtime provides the PolkaVM (PVM) based node implentation
|
||||
|
||||
--compile-only <COMPILE_ONLY>
|
||||
Only compile against this testing platform (doesn't execute the tests)
|
||||
|
||||
Possible values:
|
||||
- geth: The go-ethereum reference full node EVM implementation
|
||||
- kitchensink: The kitchensink runtime provides the PolkaVM (PVM) based node implentation
|
||||
|
||||
--number-of-nodes <NUMBER_OF_NODES>
|
||||
Concurrency Configuration:
|
||||
--concurrency.number-of-nodes <NUMBER_OF_NODES>
|
||||
Determines the amount of nodes that will be spawned for each chain
|
||||
|
||||
[default: 1]
|
||||
[default: 5]
|
||||
|
||||
--number-of-threads <NUMBER_OF_THREADS>
|
||||
--concurrency.number-of-threads <NUMBER_OF_THREADS>
|
||||
Determines the amount of tokio worker threads that will will be used
|
||||
|
||||
[default: 16]
|
||||
|
||||
--number-concurrent-tasks <NUMBER_CONCURRENT_TASKS>
|
||||
Determines the amount of concurrent tasks that will be spawned to run tests. Defaults to 10 x the number of nodes
|
||||
--concurrency.number-of-concurrent-tasks <NUMBER_CONCURRENT_TASKS>
|
||||
Determines the amount of concurrent tasks that will be spawned to run tests.
|
||||
|
||||
-e, --extract-problems
|
||||
Extract problems back to the test corpus
|
||||
Defaults to 10 x the number of nodes.
|
||||
|
||||
-k, --kitchensink <KITCHENSINK>
|
||||
The path to the `kitchensink` executable.
|
||||
--concurrency.ignore-concurrency-limit
|
||||
Determines if the concurrency limit should be ignored or not
|
||||
|
||||
By default it uses `substrate-node` binary found in `$PATH`.
|
||||
|
||||
[default: substrate-node]
|
||||
|
||||
-p, --eth_proxy <ETH_PROXY>
|
||||
The path to the `eth_proxy` executable.
|
||||
|
||||
By default it uses `eth-rpc` binary found in `$PATH`.
|
||||
|
||||
[default: eth-rpc]
|
||||
|
||||
-i, --invalidate-compilation-cache
|
||||
Compilation Configuration:
|
||||
--compilation.invalidate-cache
|
||||
Controls if the compilation cache should be invalidated or not
|
||||
|
||||
-h, --help
|
||||
Print help (see a summary with '-h')
|
||||
Report Configuration:
|
||||
--report.include-compiler-input
|
||||
Controls if the compiler input is included in the final report
|
||||
|
||||
--report.include-compiler-output
|
||||
Controls if the compiler output is included in the final report
|
||||
```
|
||||
|
||||
To run tests with this tool you need a corpus JSON file that defines the tests included in the corpus. The simplest corpus file looks like the following:
|
||||
@@ -188,10 +219,11 @@ The simplest command to run this tool is the following:
|
||||
|
||||
```bash
|
||||
RUST_LOG="info" cargo run --release -- execute-tests \
|
||||
--follower geth \
|
||||
--corpus path_to_your_corpus_file.json \
|
||||
--working-directory path_to_a_temporary_directory_to_cache_things_in \
|
||||
--platform geth-evm-solc \
|
||||
--corpus corp.json \
|
||||
--working-directory workdir \
|
||||
--concurrency.number-of-nodes 5 \
|
||||
--concurrency.ignore-concurrency-limit \
|
||||
> logs.log \
|
||||
2> output.log
|
||||
```
|
||||
|
||||
@@ -9,11 +9,16 @@ repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
alloy = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
moka = { workspace = true, features = ["sync"] }
|
||||
once_cell = { workspace = true }
|
||||
regex = { workspace = true }
|
||||
semver = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
schemars = { workspace = true }
|
||||
strum = { workspace = true }
|
||||
tokio = { workspace = true, default-features = false, features = ["time"] }
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -0,0 +1,134 @@
|
||||
use clap::ValueEnum;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
|
||||
|
||||
/// An enum of the platform identifiers of all of the platforms supported by this framework. This
|
||||
/// could be thought of like the target triple from Rust and LLVM where it specifies the platform
|
||||
/// completely starting with the node, the vm, and finally the compiler used for this combination.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
ValueEnum,
|
||||
EnumString,
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[strum(serialize_all = "kebab-case")]
|
||||
pub enum PlatformIdentifier {
|
||||
/// The Go-ethereum reference full node EVM implementation with the solc compiler.
|
||||
GethEvmSolc,
|
||||
/// The Lighthouse Go-ethereum reference full node EVM implementation with the solc compiler.
|
||||
LighthouseGethEvmSolc,
|
||||
/// The kitchensink node with the PolkaVM backend with the resolc compiler.
|
||||
KitchensinkPolkavmResolc,
|
||||
/// The kitchensink node with the REVM backend with the solc compiler.
|
||||
KitchensinkRevmSolc,
|
||||
/// The revive dev node with the PolkaVM backend with the resolc compiler.
|
||||
ReviveDevNodePolkavmResolc,
|
||||
/// The revive dev node with the REVM backend with the solc compiler.
|
||||
ReviveDevNodeRevmSolc,
|
||||
/// A zombienet based Substrate/Polkadot node with the PolkaVM backend with the resolc compiler.
|
||||
ZombienetPolkavmResolc,
|
||||
/// A zombienet based Substrate/Polkadot node with the REVM backend with the solc compiler.
|
||||
ZombienetRevmSolc,
|
||||
}
|
||||
|
||||
/// An enum of the platform identifiers of all of the platforms supported by this framework.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
ValueEnum,
|
||||
EnumString,
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
)]
|
||||
pub enum CompilerIdentifier {
|
||||
/// The solc compiler.
|
||||
Solc,
|
||||
/// The resolc compiler.
|
||||
Resolc,
|
||||
}
|
||||
|
||||
/// An enum representing the identifiers of the supported nodes.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
ValueEnum,
|
||||
EnumString,
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
)]
|
||||
pub enum NodeIdentifier {
|
||||
/// The go-ethereum node implementation.
|
||||
Geth,
|
||||
/// The go-ethereum node implementation.
|
||||
LighthouseGeth,
|
||||
/// The Kitchensink node implementation.
|
||||
Kitchensink,
|
||||
/// The revive dev node implementation.
|
||||
ReviveDevNode,
|
||||
/// A zombienet spawned nodes
|
||||
Zombienet,
|
||||
}
|
||||
|
||||
/// An enum representing the identifiers of the supported VMs.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
ValueEnum,
|
||||
EnumString,
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
#[strum(serialize_all = "lowercase")]
|
||||
pub enum VmIdentifier {
|
||||
/// The ethereum virtual machine.
|
||||
Evm,
|
||||
/// The EraVM virtual machine.
|
||||
EraVM,
|
||||
/// Polkadot's PolaVM Risc-v based virtual machine.
|
||||
PolkaVM,
|
||||
}
|
||||
@@ -1,5 +1,11 @@
|
||||
mod identifiers;
|
||||
mod mode;
|
||||
mod private_key_allocator;
|
||||
mod round_robin_pool;
|
||||
mod version_or_requirement;
|
||||
|
||||
pub use identifiers::*;
|
||||
pub use mode::*;
|
||||
pub use private_key_allocator::*;
|
||||
pub use round_robin_pool::*;
|
||||
pub use version_or_requirement::*;
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
use crate::iterators::EitherIter;
|
||||
use crate::types::VersionOrRequirement;
|
||||
use anyhow::{Context as _, bail};
|
||||
use regex::Regex;
|
||||
use schemars::JsonSchema;
|
||||
use semver::Version;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashSet;
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
use std::sync::LazyLock;
|
||||
@@ -33,6 +38,19 @@ impl Display for Mode {
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Mode {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let parsed_mode = ParsedMode::from_str(s)?;
|
||||
let mut iter = parsed_mode.to_modes();
|
||||
let (Some(mode), None) = (iter.next(), iter.next()) else {
|
||||
bail!("Failed to parse the mode")
|
||||
};
|
||||
Ok(mode)
|
||||
}
|
||||
}
|
||||
|
||||
impl Mode {
|
||||
/// Return all of the available mode combinations.
|
||||
pub fn all() -> impl Iterator<Item = &'static Mode> {
|
||||
@@ -171,3 +189,250 @@ impl ModeOptimizerSetting {
|
||||
!matches!(self, ModeOptimizerSetting::M0)
|
||||
}
|
||||
}
|
||||
|
||||
/// This represents a mode that has been parsed from test metadata.
|
||||
///
|
||||
/// Mode strings can take the following form (in pseudo-regex):
|
||||
///
|
||||
/// ```text
|
||||
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
||||
/// ```
|
||||
///
|
||||
/// We can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)]
|
||||
#[serde(try_from = "String", into = "String")]
|
||||
pub struct ParsedMode {
|
||||
pub pipeline: Option<ModePipeline>,
|
||||
pub optimize_flag: Option<bool>,
|
||||
pub optimize_setting: Option<ModeOptimizerSetting>,
|
||||
pub version: Option<semver::VersionReq>,
|
||||
}
|
||||
|
||||
impl FromStr for ParsedMode {
|
||||
type Err = anyhow::Error;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
static REGEX: LazyLock<Regex> = LazyLock::new(|| {
|
||||
Regex::new(r"(?x)
|
||||
^
|
||||
(?:(?P<pipeline>[YEILV])(?P<optimize_flag>[+-])?)? # Pipeline to use eg Y, E+, E-
|
||||
\s*
|
||||
(?P<optimize_setting>M[a-zA-Z0-9])? # Optimize setting eg M0, Ms, Mz
|
||||
\s*
|
||||
(?P<version>[>=<^]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8
|
||||
$
|
||||
").unwrap()
|
||||
});
|
||||
|
||||
let Some(caps) = REGEX.captures(s) else {
|
||||
anyhow::bail!("Cannot parse mode '{s}' from string");
|
||||
};
|
||||
|
||||
let pipeline = match caps.name("pipeline") {
|
||||
Some(m) => Some(
|
||||
ModePipeline::from_str(m.as_str())
|
||||
.context("Failed to parse mode pipeline from string")?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
|
||||
|
||||
let optimize_setting = match caps.name("optimize_setting") {
|
||||
Some(m) => Some(
|
||||
ModeOptimizerSetting::from_str(m.as_str())
|
||||
.context("Failed to parse optimizer setting from string")?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let version = match caps.name("version") {
|
||||
Some(m) => Some(
|
||||
semver::VersionReq::parse(m.as_str())
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"Cannot parse the version requirement '{}': {e}",
|
||||
m.as_str()
|
||||
)
|
||||
})
|
||||
.context("Failed to parse semver requirement from mode string")?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
|
||||
Ok(ParsedMode {
|
||||
pipeline,
|
||||
optimize_flag,
|
||||
optimize_setting,
|
||||
version,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ParsedMode {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let mut has_written = false;
|
||||
|
||||
if let Some(pipeline) = self.pipeline {
|
||||
pipeline.fmt(f)?;
|
||||
if let Some(optimize_flag) = self.optimize_flag {
|
||||
f.write_str(if optimize_flag { "+" } else { "-" })?;
|
||||
}
|
||||
has_written = true;
|
||||
}
|
||||
|
||||
if let Some(optimize_setting) = self.optimize_setting {
|
||||
if has_written {
|
||||
f.write_str(" ")?;
|
||||
}
|
||||
optimize_setting.fmt(f)?;
|
||||
has_written = true;
|
||||
}
|
||||
|
||||
if let Some(version) = &self.version {
|
||||
if has_written {
|
||||
f.write_str(" ")?;
|
||||
}
|
||||
version.fmt(f)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ParsedMode> for String {
|
||||
fn from(parsed_mode: ParsedMode) -> Self {
|
||||
parsed_mode.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for ParsedMode {
|
||||
type Error = anyhow::Error;
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
ParsedMode::from_str(&value)
|
||||
}
|
||||
}
|
||||
|
||||
impl ParsedMode {
|
||||
/// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try.
|
||||
pub fn to_modes(&self) -> impl Iterator<Item = Mode> {
|
||||
let pipeline_iter = self.pipeline.as_ref().map_or_else(
|
||||
|| EitherIter::A(ModePipeline::test_cases()),
|
||||
|p| EitherIter::B(std::iter::once(*p)),
|
||||
);
|
||||
|
||||
let optimize_flag_setting = self.optimize_flag.map(|flag| {
|
||||
if flag {
|
||||
ModeOptimizerSetting::M3
|
||||
} else {
|
||||
ModeOptimizerSetting::M0
|
||||
}
|
||||
});
|
||||
|
||||
let optimize_flag_iter = match optimize_flag_setting {
|
||||
Some(setting) => EitherIter::A(std::iter::once(setting)),
|
||||
None => EitherIter::B(ModeOptimizerSetting::test_cases()),
|
||||
};
|
||||
|
||||
let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else(
|
||||
|| EitherIter::A(optimize_flag_iter),
|
||||
|s| EitherIter::B(std::iter::once(*s)),
|
||||
);
|
||||
|
||||
pipeline_iter.flat_map(move |pipeline| {
|
||||
optimize_settings_iter
|
||||
.clone()
|
||||
.map(move |optimize_setting| Mode {
|
||||
pipeline,
|
||||
optimize_setting,
|
||||
version: self.version.clone(),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s.
|
||||
/// This avoids any duplicate entries.
|
||||
pub fn many_to_modes<'a>(
|
||||
parsed: impl Iterator<Item = &'a ParsedMode>,
|
||||
) -> impl Iterator<Item = Mode> {
|
||||
let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect();
|
||||
modes.into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parsed_mode_from_str() {
|
||||
let strings = vec![
|
||||
("Mz", "Mz"),
|
||||
("Y", "Y"),
|
||||
("Y+", "Y+"),
|
||||
("Y-", "Y-"),
|
||||
("E", "E"),
|
||||
("E+", "E+"),
|
||||
("E-", "E-"),
|
||||
("Y M0", "Y M0"),
|
||||
("Y M1", "Y M1"),
|
||||
("Y M2", "Y M2"),
|
||||
("Y M3", "Y M3"),
|
||||
("Y Ms", "Y Ms"),
|
||||
("Y Mz", "Y Mz"),
|
||||
("E M0", "E M0"),
|
||||
("E M1", "E M1"),
|
||||
("E M2", "E M2"),
|
||||
("E M3", "E M3"),
|
||||
("E Ms", "E Ms"),
|
||||
("E Mz", "E Mz"),
|
||||
// When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning)
|
||||
("Y 0.8.0", "Y ^0.8.0"),
|
||||
("E+ 0.8.0", "E+ ^0.8.0"),
|
||||
("Y M3 >=0.8.0", "Y M3 >=0.8.0"),
|
||||
("E Mz <0.7.0", "E Mz <0.7.0"),
|
||||
// We can parse +- _and_ M1/M2 but the latter takes priority.
|
||||
("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"),
|
||||
("E- M2 0.7.0", "E- M2 ^0.7.0"),
|
||||
// We don't see this in the wild but it is parsed.
|
||||
("<=0.8", "<=0.8"),
|
||||
];
|
||||
|
||||
for (actual, expected) in strings {
|
||||
let parsed = ParsedMode::from_str(actual)
|
||||
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||
assert_eq!(
|
||||
expected,
|
||||
parsed.to_string(),
|
||||
"Mode string '{actual}' did not parse to '{expected}': got '{parsed}'"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parsed_mode_to_test_modes() {
|
||||
let strings = vec![
|
||||
("Mz", vec!["Y Mz", "E Mz"]),
|
||||
("Y", vec!["Y M0", "Y M3"]),
|
||||
("E", vec!["E M0", "E M3"]),
|
||||
("Y+", vec!["Y M3"]),
|
||||
("Y-", vec!["Y M0"]),
|
||||
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
|
||||
(
|
||||
"<=0.8",
|
||||
vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"],
|
||||
),
|
||||
];
|
||||
|
||||
for (actual, expected) in strings {
|
||||
let parsed = ParsedMode::from_str(actual)
|
||||
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||
let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect();
|
||||
let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect();
|
||||
|
||||
assert_eq!(
|
||||
expected_set, actual_set,
|
||||
"Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,36 @@
|
||||
use alloy::primitives::U256;
|
||||
use alloy::signers::local::PrivateKeySigner;
|
||||
use anyhow::{Context, Result, bail};
|
||||
|
||||
/// This is a sequential private key allocator. When instantiated, it allocated private keys in
|
||||
/// sequentially and in order until the maximum private key specified is reached.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct PrivateKeyAllocator {
|
||||
/// The next private key to be returned by the allocator when requested.
|
||||
next_private_key: U256,
|
||||
|
||||
/// The highest private key (exclusive) that can be returned by this allocator.
|
||||
highest_private_key_inclusive: U256,
|
||||
}
|
||||
|
||||
impl PrivateKeyAllocator {
|
||||
/// Creates a new instance of the private key allocator.
|
||||
pub fn new(highest_private_key_inclusive: U256) -> Self {
|
||||
Self {
|
||||
next_private_key: U256::ONE,
|
||||
highest_private_key_inclusive,
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates a new private key and errors out if the maximum private key has been reached.
|
||||
pub fn allocate(&mut self) -> Result<PrivateKeySigner> {
|
||||
if self.next_private_key > self.highest_private_key_inclusive {
|
||||
bail!("Attempted to allocate a private key but failed since all have been allocated");
|
||||
};
|
||||
let private_key =
|
||||
PrivateKeySigner::from_slice(self.next_private_key.to_be_bytes::<32>().as_slice())
|
||||
.context("Failed to convert the private key digits into a private key")?;
|
||||
self.next_private_key += U256::ONE;
|
||||
Ok(private_key)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
pub struct RoundRobinPool<T> {
|
||||
next_index: AtomicUsize,
|
||||
items: Vec<T>,
|
||||
}
|
||||
|
||||
impl<T> RoundRobinPool<T> {
|
||||
pub fn new(items: Vec<T>) -> Self {
|
||||
Self {
|
||||
next_index: Default::default(),
|
||||
items,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn round_robin(&self) -> &T {
|
||||
let current = self.next_index.fetch_add(1, Ordering::SeqCst) % self.items.len();
|
||||
self.items.get(current).unwrap()
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> impl Iterator<Item = &T> {
|
||||
self.items.iter()
|
||||
}
|
||||
}
|
||||
@@ -16,7 +16,6 @@ revive-dt-solc-binaries = { workspace = true }
|
||||
revive-common = { workspace = true }
|
||||
|
||||
alloy = { workspace = true }
|
||||
alloy-primitives = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
dashmap = { workspace = true }
|
||||
foundry-compilers-artifacts = { workspace = true }
|
||||
|
||||
@@ -7,18 +7,17 @@ use std::{
|
||||
collections::HashMap,
|
||||
hash::Hash,
|
||||
path::{Path, PathBuf},
|
||||
pin::Pin,
|
||||
};
|
||||
|
||||
use alloy::json_abi::JsonAbi;
|
||||
use alloy_primitives::Address;
|
||||
use alloy::primitives::Address;
|
||||
use anyhow::{Context as _, Result};
|
||||
use semver::Version;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use revive_common::EVMVersion;
|
||||
use revive_dt_common::cached_fs::read_to_string;
|
||||
use revive_dt_common::types::VersionOrRequirement;
|
||||
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
|
||||
|
||||
// Re-export this as it's a part of the compiler interface.
|
||||
pub use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
|
||||
@@ -28,19 +27,7 @@ pub mod revive_resolc;
|
||||
pub mod solc;
|
||||
|
||||
/// A common interface for all supported Solidity compilers.
|
||||
pub trait SolidityCompiler: Sized {
|
||||
/// Instantiates a new compiler object.
|
||||
///
|
||||
/// Based on the given [`Context`] and [`VersionOrRequirement`] this function instantiates a
|
||||
/// new compiler object. Certain implementations of this trait might choose to cache cache the
|
||||
/// compiler objects and return the same ones over and over again.
|
||||
fn new(
|
||||
context: impl AsRef<SolcConfiguration>
|
||||
+ AsRef<ResolcConfiguration>
|
||||
+ AsRef<WorkingDirectoryConfiguration>,
|
||||
version: impl Into<Option<VersionOrRequirement>>,
|
||||
) -> impl Future<Output = Result<Self>>;
|
||||
|
||||
pub trait SolidityCompiler {
|
||||
/// Returns the version of the compiler.
|
||||
fn version(&self) -> &Version;
|
||||
|
||||
@@ -48,7 +35,10 @@ pub trait SolidityCompiler: Sized {
|
||||
fn path(&self) -> &Path;
|
||||
|
||||
/// The low-level compiler interface.
|
||||
fn build(&self, input: CompilerInput) -> impl Future<Output = Result<CompilerOutput>>;
|
||||
fn build(
|
||||
&self,
|
||||
input: CompilerInput,
|
||||
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>>;
|
||||
|
||||
/// Does the compiler support the provided mode and version settings.
|
||||
fn supports_mode(
|
||||
@@ -74,7 +64,7 @@ pub struct CompilerInput {
|
||||
/// The generic compilation output configuration.
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct CompilerOutput {
|
||||
/// The compiled contracts. The bytecode of the contract is kept as a string incase linking is
|
||||
/// The compiled contracts. The bytecode of the contract is kept as a string in case linking is
|
||||
/// required and the compiled source has placeholders.
|
||||
pub contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
}
|
||||
@@ -164,7 +154,7 @@ impl Compiler {
|
||||
callback(self)
|
||||
}
|
||||
|
||||
pub async fn try_build(self, compiler: &impl SolidityCompiler) -> Result<CompilerOutput> {
|
||||
pub async fn try_build(self, compiler: &dyn SolidityCompiler) -> Result<CompilerOutput> {
|
||||
compiler.build(self.input).await
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
pin::Pin,
|
||||
process::Stdio,
|
||||
sync::{Arc, LazyLock},
|
||||
};
|
||||
@@ -15,6 +16,7 @@ use revive_solc_json_interface::{
|
||||
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection,
|
||||
SolcStandardJsonOutput,
|
||||
};
|
||||
use tracing::{Span, field::display};
|
||||
|
||||
use crate::{
|
||||
CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler, solc::Solc,
|
||||
@@ -37,8 +39,8 @@ struct ResolcInner {
|
||||
resolc_path: PathBuf,
|
||||
}
|
||||
|
||||
impl SolidityCompiler for Resolc {
|
||||
async fn new(
|
||||
impl Resolc {
|
||||
pub async fn new(
|
||||
context: impl AsRef<SolcConfiguration>
|
||||
+ AsRef<ResolcConfiguration>
|
||||
+ AsRef<WorkingDirectoryConfiguration>,
|
||||
@@ -65,11 +67,13 @@ impl SolidityCompiler for Resolc {
|
||||
})
|
||||
.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl SolidityCompiler for Resolc {
|
||||
fn version(&self) -> &Version {
|
||||
// We currently return the solc compiler version since we do not support multiple resolc
|
||||
// compiler versions.
|
||||
self.0.solc.version()
|
||||
SolidityCompiler::version(&self.0.solc)
|
||||
}
|
||||
|
||||
fn path(&self) -> &std::path::Path {
|
||||
@@ -77,7 +81,17 @@ impl SolidityCompiler for Resolc {
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", ret)]
|
||||
async fn build(
|
||||
#[tracing::instrument(
|
||||
level = "error",
|
||||
skip_all,
|
||||
fields(
|
||||
resolc_version = %self.version(),
|
||||
solc_version = %self.0.solc.version(),
|
||||
json_in = tracing::field::Empty
|
||||
),
|
||||
err(Debug)
|
||||
)]
|
||||
fn build(
|
||||
&self,
|
||||
CompilerInput {
|
||||
pipeline,
|
||||
@@ -91,189 +105,199 @@ impl SolidityCompiler for Resolc {
|
||||
// resolc. So, we need to go back to this later once it's supported.
|
||||
revert_string_handling: _,
|
||||
}: CompilerInput,
|
||||
) -> Result<CompilerOutput> {
|
||||
if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) {
|
||||
anyhow::bail!(
|
||||
"Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}"
|
||||
);
|
||||
}
|
||||
|
||||
let input = SolcStandardJsonInput {
|
||||
language: SolcStandardJsonInputLanguage::Solidity,
|
||||
sources: sources
|
||||
.into_iter()
|
||||
.map(|(path, source)| (path.display().to_string(), source.into()))
|
||||
.collect(),
|
||||
settings: SolcStandardJsonInputSettings {
|
||||
evm_version,
|
||||
libraries: Some(
|
||||
libraries
|
||||
.into_iter()
|
||||
.map(|(source_code, libraries_map)| {
|
||||
(
|
||||
source_code.display().to_string(),
|
||||
libraries_map
|
||||
.into_iter()
|
||||
.map(|(library_ident, library_address)| {
|
||||
(library_ident, library_address.to_string())
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
),
|
||||
remappings: None,
|
||||
output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()),
|
||||
via_ir: Some(true),
|
||||
optimizer: SolcStandardJsonInputSettingsOptimizer::new(
|
||||
optimization
|
||||
.unwrap_or(ModeOptimizerSetting::M0)
|
||||
.optimizations_enabled(),
|
||||
None,
|
||||
&Version::new(0, 0, 0),
|
||||
false,
|
||||
),
|
||||
metadata: None,
|
||||
polkavm: None,
|
||||
},
|
||||
};
|
||||
|
||||
let mut command = AsyncCommand::new(self.path());
|
||||
command
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.arg("--standard-json");
|
||||
|
||||
if let Some(ref base_path) = base_path {
|
||||
command.arg("--base-path").arg(base_path);
|
||||
}
|
||||
if !allow_paths.is_empty() {
|
||||
command.arg("--allow-paths").arg(
|
||||
allow_paths
|
||||
.iter()
|
||||
.map(|path| path.display().to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(","),
|
||||
);
|
||||
}
|
||||
let mut child = command
|
||||
.spawn()
|
||||
.with_context(|| format!("Failed to spawn resolc at {}", self.path().display()))?;
|
||||
|
||||
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
|
||||
let serialized_input = serde_json::to_vec(&input)
|
||||
.context("Failed to serialize Standard JSON input for resolc")?;
|
||||
stdin_pipe
|
||||
.write_all(&serialized_input)
|
||||
.await
|
||||
.context("Failed to write Standard JSON to resolc stdin")?;
|
||||
|
||||
let output = child
|
||||
.wait_with_output()
|
||||
.await
|
||||
.context("Failed while waiting for resolc process to finish")?;
|
||||
let stdout = output.stdout;
|
||||
let stderr = output.stderr;
|
||||
|
||||
if !output.status.success() {
|
||||
let json_in = serde_json::to_string_pretty(&input)
|
||||
.context("Failed to pretty-print Standard JSON input for logging")?;
|
||||
let message = String::from_utf8_lossy(&stderr);
|
||||
tracing::error!(
|
||||
status = %output.status,
|
||||
message = %message,
|
||||
json_input = json_in,
|
||||
"Compilation using resolc failed"
|
||||
);
|
||||
anyhow::bail!("Compilation failed with an error: {message}");
|
||||
}
|
||||
|
||||
let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout)
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
||||
String::from_utf8_lossy(&stderr)
|
||||
)
|
||||
})
|
||||
.context("Failed to parse resolc standard JSON output")?;
|
||||
|
||||
tracing::debug!(
|
||||
output = %serde_json::to_string(&parsed).unwrap(),
|
||||
"Compiled successfully"
|
||||
);
|
||||
|
||||
// Detecting if the compiler output contained errors and reporting them through logs and
|
||||
// errors instead of returning the compiler output that might contain errors.
|
||||
for error in parsed.errors.iter().flatten() {
|
||||
if error.severity == "error" {
|
||||
tracing::error!(
|
||||
?error,
|
||||
?input,
|
||||
output = %serde_json::to_string(&parsed).unwrap(),
|
||||
"Encountered an error in the compilation"
|
||||
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>> {
|
||||
Box::pin(async move {
|
||||
if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) {
|
||||
anyhow::bail!(
|
||||
"Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}"
|
||||
);
|
||||
anyhow::bail!("Encountered an error in the compilation: {error}")
|
||||
}
|
||||
}
|
||||
|
||||
let Some(contracts) = parsed.contracts else {
|
||||
anyhow::bail!("Unexpected error - resolc output doesn't have a contracts section");
|
||||
};
|
||||
let input = SolcStandardJsonInput {
|
||||
language: SolcStandardJsonInputLanguage::Solidity,
|
||||
sources: sources
|
||||
.into_iter()
|
||||
.map(|(path, source)| (path.display().to_string(), source.into()))
|
||||
.collect(),
|
||||
settings: SolcStandardJsonInputSettings {
|
||||
evm_version,
|
||||
libraries: Some(
|
||||
libraries
|
||||
.into_iter()
|
||||
.map(|(source_code, libraries_map)| {
|
||||
(
|
||||
source_code.display().to_string(),
|
||||
libraries_map
|
||||
.into_iter()
|
||||
.map(|(library_ident, library_address)| {
|
||||
(library_ident, library_address.to_string())
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
),
|
||||
remappings: None,
|
||||
output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()),
|
||||
via_ir: Some(true),
|
||||
optimizer: SolcStandardJsonInputSettingsOptimizer::new(
|
||||
optimization
|
||||
.unwrap_or(ModeOptimizerSetting::M0)
|
||||
.optimizations_enabled(),
|
||||
None,
|
||||
&Version::new(0, 0, 0),
|
||||
false,
|
||||
),
|
||||
metadata: None,
|
||||
polkavm: None,
|
||||
},
|
||||
};
|
||||
Span::current().record("json_in", display(serde_json::to_string(&input).unwrap()));
|
||||
|
||||
let mut compiler_output = CompilerOutput::default();
|
||||
for (source_path, contracts) in contracts.into_iter() {
|
||||
let src_for_msg = source_path.clone();
|
||||
let source_path = PathBuf::from(source_path)
|
||||
.canonicalize()
|
||||
.with_context(|| format!("Failed to canonicalize path {src_for_msg}"))?;
|
||||
let path = &self.0.resolc_path;
|
||||
let mut command = AsyncCommand::new(path);
|
||||
command
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.arg("--solc")
|
||||
.arg(self.0.solc.path())
|
||||
.arg("--standard-json");
|
||||
|
||||
let map = compiler_output.contracts.entry(source_path).or_default();
|
||||
for (contract_name, contract_information) in contracts.into_iter() {
|
||||
let bytecode = contract_information
|
||||
.evm
|
||||
.and_then(|evm| evm.bytecode.clone())
|
||||
.context("Unexpected - Contract compiled with resolc has no bytecode")?;
|
||||
let abi = {
|
||||
let metadata = contract_information
|
||||
.metadata
|
||||
.as_ref()
|
||||
.context("No metadata found for the contract")?;
|
||||
let solc_metadata_str = match metadata {
|
||||
serde_json::Value::String(solc_metadata_str) => solc_metadata_str.as_str(),
|
||||
serde_json::Value::Object(metadata_object) => {
|
||||
let solc_metadata_value = metadata_object
|
||||
.get("solc_metadata")
|
||||
.context("Contract doesn't have a 'solc_metadata' field")?;
|
||||
solc_metadata_value
|
||||
.as_str()
|
||||
.context("The 'solc_metadata' field is not a string")?
|
||||
}
|
||||
serde_json::Value::Null
|
||||
| serde_json::Value::Bool(_)
|
||||
| serde_json::Value::Number(_)
|
||||
| serde_json::Value::Array(_) => {
|
||||
anyhow::bail!("Unsupported type of metadata {metadata:?}")
|
||||
}
|
||||
};
|
||||
let solc_metadata =
|
||||
serde_json::from_str::<serde_json::Value>(solc_metadata_str).context(
|
||||
if let Some(ref base_path) = base_path {
|
||||
command.arg("--base-path").arg(base_path);
|
||||
}
|
||||
if !allow_paths.is_empty() {
|
||||
command.arg("--allow-paths").arg(
|
||||
allow_paths
|
||||
.iter()
|
||||
.map(|path| path.display().to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(","),
|
||||
);
|
||||
}
|
||||
let mut child = command
|
||||
.spawn()
|
||||
.with_context(|| format!("Failed to spawn resolc at {}", path.display()))?;
|
||||
|
||||
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
|
||||
let serialized_input = serde_json::to_vec(&input)
|
||||
.context("Failed to serialize Standard JSON input for resolc")?;
|
||||
stdin_pipe
|
||||
.write_all(&serialized_input)
|
||||
.await
|
||||
.context("Failed to write Standard JSON to resolc stdin")?;
|
||||
|
||||
let output = child
|
||||
.wait_with_output()
|
||||
.await
|
||||
.context("Failed while waiting for resolc process to finish")?;
|
||||
let stdout = output.stdout;
|
||||
let stderr = output.stderr;
|
||||
|
||||
if !output.status.success() {
|
||||
let json_in = serde_json::to_string_pretty(&input)
|
||||
.context("Failed to pretty-print Standard JSON input for logging")?;
|
||||
let message = String::from_utf8_lossy(&stderr);
|
||||
tracing::error!(
|
||||
status = %output.status,
|
||||
message = %message,
|
||||
json_input = json_in,
|
||||
"Compilation using resolc failed"
|
||||
);
|
||||
anyhow::bail!("Compilation failed with an error: {message}");
|
||||
}
|
||||
|
||||
let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout)
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
||||
String::from_utf8_lossy(&stderr)
|
||||
)
|
||||
})
|
||||
.context("Failed to parse resolc standard JSON output")?;
|
||||
|
||||
tracing::debug!(
|
||||
output = %serde_json::to_string(&parsed).unwrap(),
|
||||
"Compiled successfully"
|
||||
);
|
||||
|
||||
// Detecting if the compiler output contained errors and reporting them through logs and
|
||||
// errors instead of returning the compiler output that might contain errors.
|
||||
for error in parsed.errors.iter().flatten() {
|
||||
if error.severity == "error" {
|
||||
tracing::error!(
|
||||
?error,
|
||||
?input,
|
||||
output = %serde_json::to_string(&parsed).unwrap(),
|
||||
"Encountered an error in the compilation"
|
||||
);
|
||||
anyhow::bail!("Encountered an error in the compilation: {error}")
|
||||
}
|
||||
}
|
||||
|
||||
let Some(contracts) = parsed.contracts else {
|
||||
anyhow::bail!("Unexpected error - resolc output doesn't have a contracts section");
|
||||
};
|
||||
|
||||
let mut compiler_output = CompilerOutput::default();
|
||||
for (source_path, contracts) in contracts.into_iter() {
|
||||
let src_for_msg = source_path.clone();
|
||||
let source_path = PathBuf::from(source_path)
|
||||
.canonicalize()
|
||||
.with_context(|| format!("Failed to canonicalize path {src_for_msg}"))?;
|
||||
|
||||
let map = compiler_output.contracts.entry(source_path).or_default();
|
||||
for (contract_name, contract_information) in contracts.into_iter() {
|
||||
let bytecode = contract_information
|
||||
.evm
|
||||
.and_then(|evm| evm.bytecode.clone())
|
||||
.context("Unexpected - Contract compiled with resolc has no bytecode")?;
|
||||
let abi = {
|
||||
let metadata = contract_information
|
||||
.metadata
|
||||
.as_ref()
|
||||
.context("No metadata found for the contract")?;
|
||||
let solc_metadata_str = match metadata {
|
||||
serde_json::Value::String(solc_metadata_str) => {
|
||||
solc_metadata_str.as_str()
|
||||
}
|
||||
serde_json::Value::Object(metadata_object) => {
|
||||
let solc_metadata_value = metadata_object
|
||||
.get("solc_metadata")
|
||||
.context("Contract doesn't have a 'solc_metadata' field")?;
|
||||
solc_metadata_value
|
||||
.as_str()
|
||||
.context("The 'solc_metadata' field is not a string")?
|
||||
}
|
||||
serde_json::Value::Null
|
||||
| serde_json::Value::Bool(_)
|
||||
| serde_json::Value::Number(_)
|
||||
| serde_json::Value::Array(_) => {
|
||||
anyhow::bail!("Unsupported type of metadata {metadata:?}")
|
||||
}
|
||||
};
|
||||
let solc_metadata = serde_json::from_str::<serde_json::Value>(
|
||||
solc_metadata_str,
|
||||
)
|
||||
.context(
|
||||
"Failed to deserialize the solc_metadata as a serde_json generic value",
|
||||
)?;
|
||||
let output_value = solc_metadata
|
||||
.get("output")
|
||||
.context("solc_metadata doesn't have an output field")?;
|
||||
let abi_value = output_value
|
||||
.get("abi")
|
||||
.context("solc_metadata output doesn't contain an abi field")?;
|
||||
serde_json::from_value::<JsonAbi>(abi_value.clone())
|
||||
.context("ABI found in solc_metadata output is not valid ABI")?
|
||||
};
|
||||
map.insert(contract_name, (bytecode.object, abi));
|
||||
let output_value = solc_metadata
|
||||
.get("output")
|
||||
.context("solc_metadata doesn't have an output field")?;
|
||||
let abi_value = output_value
|
||||
.get("abi")
|
||||
.context("solc_metadata output doesn't contain an abi field")?;
|
||||
serde_json::from_value::<JsonAbi>(abi_value.clone())
|
||||
.context("ABI found in solc_metadata output is not valid ABI")?
|
||||
};
|
||||
map.insert(contract_name, (bytecode.object, abi));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(compiler_output)
|
||||
Ok(compiler_output)
|
||||
})
|
||||
}
|
||||
|
||||
fn supports_mode(
|
||||
@@ -281,6 +305,7 @@ impl SolidityCompiler for Resolc {
|
||||
optimize_setting: ModeOptimizerSetting,
|
||||
pipeline: ModePipeline,
|
||||
) -> bool {
|
||||
pipeline == ModePipeline::ViaYulIR && self.0.solc.supports_mode(optimize_setting, pipeline)
|
||||
pipeline == ModePipeline::ViaYulIR
|
||||
&& SolidityCompiler::supports_mode(&self.0.solc, optimize_setting, pipeline)
|
||||
}
|
||||
}
|
||||
|
||||
+177
-161
@@ -3,14 +3,16 @@
|
||||
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
pin::Pin,
|
||||
process::Stdio,
|
||||
sync::{Arc, LazyLock},
|
||||
};
|
||||
|
||||
use dashmap::DashMap;
|
||||
use revive_dt_common::types::VersionOrRequirement;
|
||||
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
|
||||
use revive_dt_config::{SolcConfiguration, WorkingDirectoryConfiguration};
|
||||
use revive_dt_solc_binaries::download_solc;
|
||||
use tracing::{Span, field::display, info};
|
||||
|
||||
use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
|
||||
|
||||
@@ -36,11 +38,9 @@ struct SolcInner {
|
||||
solc_version: Version,
|
||||
}
|
||||
|
||||
impl SolidityCompiler for Solc {
|
||||
async fn new(
|
||||
context: impl AsRef<SolcConfiguration>
|
||||
+ AsRef<ResolcConfiguration>
|
||||
+ AsRef<WorkingDirectoryConfiguration>,
|
||||
impl Solc {
|
||||
pub async fn new(
|
||||
context: impl AsRef<SolcConfiguration> + AsRef<WorkingDirectoryConfiguration>,
|
||||
version: impl Into<Option<VersionOrRequirement>>,
|
||||
) -> Result<Self> {
|
||||
// This is a cache for the compiler objects so that whenever the same compiler version is
|
||||
@@ -68,6 +68,11 @@ impl SolidityCompiler for Solc {
|
||||
Ok(COMPILERS_CACHE
|
||||
.entry((path.clone(), version.clone()))
|
||||
.or_insert_with(|| {
|
||||
info!(
|
||||
solc_path = %path.display(),
|
||||
solc_version = %version,
|
||||
"Created a new solc compiler object"
|
||||
);
|
||||
Self(Arc::new(SolcInner {
|
||||
solc_path: path,
|
||||
solc_version: version,
|
||||
@@ -75,7 +80,9 @@ impl SolidityCompiler for Solc {
|
||||
})
|
||||
.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl SolidityCompiler for Solc {
|
||||
fn version(&self) -> &Version {
|
||||
&self.0.solc_version
|
||||
}
|
||||
@@ -85,7 +92,13 @@ impl SolidityCompiler for Solc {
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", ret)]
|
||||
async fn build(
|
||||
#[tracing::instrument(
|
||||
level = "error",
|
||||
skip_all,
|
||||
fields(json_in = tracing::field::Empty),
|
||||
err(Debug)
|
||||
)]
|
||||
fn build(
|
||||
&self,
|
||||
CompilerInput {
|
||||
pipeline,
|
||||
@@ -97,170 +110,173 @@ impl SolidityCompiler for Solc {
|
||||
libraries,
|
||||
revert_string_handling,
|
||||
}: CompilerInput,
|
||||
) -> Result<CompilerOutput> {
|
||||
// Be careful to entirely omit the viaIR field if the compiler does not support it,
|
||||
// as it will error if you provide fields it does not know about. Because
|
||||
// `supports_mode` is called prior to instantiating a compiler, we should never
|
||||
// ask for something which is invalid.
|
||||
let via_ir = match (pipeline, self.compiler_supports_yul()) {
|
||||
(pipeline, true) => pipeline.map(|p| p.via_yul_ir()),
|
||||
(_pipeline, false) => None,
|
||||
};
|
||||
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>> {
|
||||
Box::pin(async move {
|
||||
// Be careful to entirely omit the viaIR field if the compiler does not support it,
|
||||
// as it will error if you provide fields it does not know about. Because
|
||||
// `supports_mode` is called prior to instantiating a compiler, we should never
|
||||
// ask for something which is invalid.
|
||||
let via_ir = match (pipeline, self.compiler_supports_yul()) {
|
||||
(pipeline, true) => pipeline.map(|p| p.via_yul_ir()),
|
||||
(_pipeline, false) => None,
|
||||
};
|
||||
|
||||
let input = SolcInput {
|
||||
language: SolcLanguage::Solidity,
|
||||
sources: Sources(
|
||||
sources
|
||||
.into_iter()
|
||||
.map(|(source_path, source_code)| (source_path, Source::new(source_code)))
|
||||
.collect(),
|
||||
),
|
||||
settings: Settings {
|
||||
optimizer: Optimizer {
|
||||
enabled: optimization.map(|o| o.optimizations_enabled()),
|
||||
details: Some(Default::default()),
|
||||
let input = SolcInput {
|
||||
language: SolcLanguage::Solidity,
|
||||
sources: Sources(
|
||||
sources
|
||||
.into_iter()
|
||||
.map(|(source_path, source_code)| (source_path, Source::new(source_code)))
|
||||
.collect(),
|
||||
),
|
||||
settings: Settings {
|
||||
optimizer: Optimizer {
|
||||
enabled: optimization.map(|o| o.optimizations_enabled()),
|
||||
details: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
output_selection: OutputSelection::common_output_selection(
|
||||
[
|
||||
ContractOutputSelection::Abi,
|
||||
ContractOutputSelection::Evm(EvmOutputSelection::ByteCode(
|
||||
BytecodeOutputSelection::Object,
|
||||
)),
|
||||
]
|
||||
.into_iter()
|
||||
.map(|item| item.to_string()),
|
||||
),
|
||||
evm_version: evm_version.map(|version| version.to_string().parse().unwrap()),
|
||||
via_ir,
|
||||
libraries: Libraries {
|
||||
libs: libraries
|
||||
.into_iter()
|
||||
.map(|(file_path, libraries)| {
|
||||
(
|
||||
file_path,
|
||||
libraries
|
||||
.into_iter()
|
||||
.map(|(library_name, library_address)| {
|
||||
(library_name, library_address.to_string())
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
},
|
||||
debug: revert_string_handling.map(|revert_string_handling| DebuggingSettings {
|
||||
revert_strings: match revert_string_handling {
|
||||
crate::RevertString::Default => Some(RevertStrings::Default),
|
||||
crate::RevertString::Debug => Some(RevertStrings::Debug),
|
||||
crate::RevertString::Strip => Some(RevertStrings::Strip),
|
||||
crate::RevertString::VerboseDebug => Some(RevertStrings::VerboseDebug),
|
||||
},
|
||||
debug_info: Default::default(),
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
output_selection: OutputSelection::common_output_selection(
|
||||
[
|
||||
ContractOutputSelection::Abi,
|
||||
ContractOutputSelection::Evm(EvmOutputSelection::ByteCode(
|
||||
BytecodeOutputSelection::Object,
|
||||
)),
|
||||
]
|
||||
.into_iter()
|
||||
.map(|item| item.to_string()),
|
||||
),
|
||||
evm_version: evm_version.map(|version| version.to_string().parse().unwrap()),
|
||||
via_ir,
|
||||
libraries: Libraries {
|
||||
libs: libraries
|
||||
.into_iter()
|
||||
.map(|(file_path, libraries)| {
|
||||
(
|
||||
file_path,
|
||||
libraries
|
||||
.into_iter()
|
||||
.map(|(library_name, library_address)| {
|
||||
(library_name, library_address.to_string())
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
},
|
||||
debug: revert_string_handling.map(|revert_string_handling| DebuggingSettings {
|
||||
revert_strings: match revert_string_handling {
|
||||
crate::RevertString::Default => Some(RevertStrings::Default),
|
||||
crate::RevertString::Debug => Some(RevertStrings::Debug),
|
||||
crate::RevertString::Strip => Some(RevertStrings::Strip),
|
||||
crate::RevertString::VerboseDebug => Some(RevertStrings::VerboseDebug),
|
||||
},
|
||||
debug_info: Default::default(),
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
let mut command = AsyncCommand::new(self.path());
|
||||
command
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.arg("--standard-json");
|
||||
Span::current().record("json_in", display(serde_json::to_string(&input).unwrap()));
|
||||
|
||||
if let Some(ref base_path) = base_path {
|
||||
command.arg("--base-path").arg(base_path);
|
||||
}
|
||||
if !allow_paths.is_empty() {
|
||||
command.arg("--allow-paths").arg(
|
||||
allow_paths
|
||||
.iter()
|
||||
.map(|path| path.display().to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(","),
|
||||
);
|
||||
}
|
||||
let mut child = command
|
||||
.spawn()
|
||||
.with_context(|| format!("Failed to spawn solc at {}", self.path().display()))?;
|
||||
let path = &self.0.solc_path;
|
||||
let mut command = AsyncCommand::new(path);
|
||||
command
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::null())
|
||||
.arg("--standard-json");
|
||||
|
||||
let stdin = child.stdin.as_mut().expect("should be piped");
|
||||
let serialized_input = serde_json::to_vec(&input)
|
||||
.context("Failed to serialize Standard JSON input for solc")?;
|
||||
stdin
|
||||
.write_all(&serialized_input)
|
||||
.await
|
||||
.context("Failed to write Standard JSON to solc stdin")?;
|
||||
let output = child
|
||||
.wait_with_output()
|
||||
.await
|
||||
.context("Failed while waiting for solc process to finish")?;
|
||||
|
||||
if !output.status.success() {
|
||||
let json_in = serde_json::to_string_pretty(&input)
|
||||
.context("Failed to pretty-print Standard JSON input for logging")?;
|
||||
let message = String::from_utf8_lossy(&output.stderr);
|
||||
tracing::error!(
|
||||
status = %output.status,
|
||||
message = %message,
|
||||
json_input = json_in,
|
||||
"Compilation using solc failed"
|
||||
);
|
||||
anyhow::bail!("Compilation failed with an error: {message}");
|
||||
}
|
||||
|
||||
let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout)
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
||||
String::from_utf8_lossy(&output.stdout)
|
||||
)
|
||||
})
|
||||
.context("Failed to parse solc standard JSON output")?;
|
||||
|
||||
// Detecting if the compiler output contained errors and reporting them through logs and
|
||||
// errors instead of returning the compiler output that might contain errors.
|
||||
for error in parsed.errors.iter() {
|
||||
if error.severity == Severity::Error {
|
||||
tracing::error!(?error, ?input, "Encountered an error in the compilation");
|
||||
anyhow::bail!("Encountered an error in the compilation: {error}")
|
||||
if let Some(ref base_path) = base_path {
|
||||
command.arg("--base-path").arg(base_path);
|
||||
}
|
||||
}
|
||||
if !allow_paths.is_empty() {
|
||||
command.arg("--allow-paths").arg(
|
||||
allow_paths
|
||||
.iter()
|
||||
.map(|path| path.display().to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(","),
|
||||
);
|
||||
}
|
||||
let mut child = command
|
||||
.spawn()
|
||||
.with_context(|| format!("Failed to spawn solc at {}", path.display()))?;
|
||||
|
||||
tracing::debug!(
|
||||
output = %String::from_utf8_lossy(&output.stdout).to_string(),
|
||||
"Compiled successfully"
|
||||
);
|
||||
let stdin = child.stdin.as_mut().expect("should be piped");
|
||||
let serialized_input = serde_json::to_vec(&input)
|
||||
.context("Failed to serialize Standard JSON input for solc")?;
|
||||
stdin
|
||||
.write_all(&serialized_input)
|
||||
.await
|
||||
.context("Failed to write Standard JSON to solc stdin")?;
|
||||
let output = child
|
||||
.wait_with_output()
|
||||
.await
|
||||
.context("Failed while waiting for solc process to finish")?;
|
||||
|
||||
let mut compiler_output = CompilerOutput::default();
|
||||
for (contract_path, contracts) in parsed.contracts {
|
||||
let map = compiler_output
|
||||
.contracts
|
||||
.entry(contract_path.canonicalize().with_context(|| {
|
||||
format!(
|
||||
"Failed to canonicalize contract path {}",
|
||||
contract_path.display()
|
||||
if !output.status.success() {
|
||||
let json_in = serde_json::to_string_pretty(&input)
|
||||
.context("Failed to pretty-print Standard JSON input for logging")?;
|
||||
tracing::error!(
|
||||
status = %output.status,
|
||||
json_input = json_in,
|
||||
"Compilation using solc failed"
|
||||
);
|
||||
anyhow::bail!("Compilation failed");
|
||||
}
|
||||
|
||||
let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout)
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"failed to parse resolc JSON output: {e}\nstdout: {}",
|
||||
String::from_utf8_lossy(&output.stdout)
|
||||
)
|
||||
})?)
|
||||
.or_default();
|
||||
for (contract_name, contract_info) in contracts.into_iter() {
|
||||
let source_code = contract_info
|
||||
.evm
|
||||
.and_then(|evm| evm.bytecode)
|
||||
.map(|bytecode| match bytecode.object {
|
||||
BytecodeObject::Bytecode(bytecode) => bytecode.to_string(),
|
||||
BytecodeObject::Unlinked(unlinked) => unlinked,
|
||||
})
|
||||
.context("Unexpected - contract compiled with solc has no source code")?;
|
||||
let abi = contract_info
|
||||
.abi
|
||||
.context("Unexpected - contract compiled with solc as no ABI")?;
|
||||
map.insert(contract_name, (source_code, abi));
|
||||
}
|
||||
}
|
||||
})
|
||||
.context("Failed to parse solc standard JSON output")?;
|
||||
|
||||
Ok(compiler_output)
|
||||
// Detecting if the compiler output contained errors and reporting them through logs and
|
||||
// errors instead of returning the compiler output that might contain errors.
|
||||
for error in parsed.errors.iter() {
|
||||
if error.severity == Severity::Error {
|
||||
tracing::error!(?error, ?input, "Encountered an error in the compilation");
|
||||
anyhow::bail!("Encountered an error in the compilation: {error}")
|
||||
}
|
||||
}
|
||||
|
||||
tracing::debug!(
|
||||
output = %String::from_utf8_lossy(&output.stdout).to_string(),
|
||||
"Compiled successfully"
|
||||
);
|
||||
|
||||
let mut compiler_output = CompilerOutput::default();
|
||||
for (contract_path, contracts) in parsed.contracts {
|
||||
let map = compiler_output
|
||||
.contracts
|
||||
.entry(contract_path.canonicalize().with_context(|| {
|
||||
format!(
|
||||
"Failed to canonicalize contract path {}",
|
||||
contract_path.display()
|
||||
)
|
||||
})?)
|
||||
.or_default();
|
||||
for (contract_name, contract_info) in contracts.into_iter() {
|
||||
let source_code = contract_info
|
||||
.evm
|
||||
.and_then(|evm| evm.bytecode)
|
||||
.map(|bytecode| match bytecode.object {
|
||||
BytecodeObject::Bytecode(bytecode) => bytecode.to_string(),
|
||||
BytecodeObject::Unlinked(unlinked) => unlinked,
|
||||
})
|
||||
.context("Unexpected - contract compiled with solc has no source code")?;
|
||||
let abi = contract_info
|
||||
.abi
|
||||
.context("Unexpected - contract compiled with solc as no ABI")?;
|
||||
map.insert(contract_name, (source_code, abi));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(compiler_output)
|
||||
})
|
||||
}
|
||||
|
||||
fn supports_mode(
|
||||
@@ -278,6 +294,6 @@ impl SolidityCompiler for Solc {
|
||||
impl Solc {
|
||||
fn compiler_supports_yul(&self) -> bool {
|
||||
const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13);
|
||||
self.version() >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR
|
||||
SolidityCompiler::version(self) >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,10 @@ pragma solidity >=0.6.9;
|
||||
import "./callable.sol";
|
||||
|
||||
contract Main {
|
||||
function main(uint[1] calldata p1, Callable callable) public returns(uint) {
|
||||
function main(
|
||||
uint[1] calldata p1,
|
||||
Callable callable
|
||||
) public pure returns (uint) {
|
||||
return callable.f(p1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use revive_dt_common::types::VersionOrRequirement;
|
||||
use revive_dt_compiler::{Compiler, SolidityCompiler, revive_resolc::Resolc, solc::Solc};
|
||||
use revive_dt_config::ExecutionContext;
|
||||
use revive_dt_compiler::{Compiler, revive_resolc::Resolc, solc::Solc};
|
||||
use revive_dt_config::TestExecutionContext;
|
||||
use semver::Version;
|
||||
|
||||
#[tokio::test]
|
||||
async fn contracts_can_be_compiled_with_solc() {
|
||||
// Arrange
|
||||
let args = ExecutionContext::default();
|
||||
let args = TestExecutionContext::default();
|
||||
let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -49,7 +49,7 @@ async fn contracts_can_be_compiled_with_solc() {
|
||||
#[tokio::test]
|
||||
async fn contracts_can_be_compiled_with_resolc() {
|
||||
// Arrange
|
||||
let args = ExecutionContext::default();
|
||||
let args = TestExecutionContext::default();
|
||||
let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -9,6 +9,8 @@ repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
revive-dt-common = { workspace = true }
|
||||
|
||||
alloy = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
|
||||
+528
-75
@@ -12,22 +12,26 @@ use std::{
|
||||
|
||||
use alloy::{
|
||||
genesis::Genesis,
|
||||
hex::ToHexExt,
|
||||
network::EthereumWallet,
|
||||
primitives::{FixedBytes, U256},
|
||||
primitives::{B256, FixedBytes, U256},
|
||||
signers::local::PrivateKeySigner,
|
||||
};
|
||||
use clap::{Parser, ValueEnum, ValueHint};
|
||||
use revive_dt_common::types::PlatformIdentifier;
|
||||
use semver::Version;
|
||||
use serde::{Serialize, Serializer};
|
||||
use serde::{Deserialize, Serialize, Serializer};
|
||||
use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
|
||||
use temp_dir::TempDir;
|
||||
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
#[command(name = "retester")]
|
||||
pub enum Context {
|
||||
/// Executes tests in the MatterLabs format differentially against a leader and a follower.
|
||||
ExecuteTests(Box<ExecutionContext>),
|
||||
/// Executes tests in the MatterLabs format differentially on multiple targets concurrently.
|
||||
Test(Box<TestExecutionContext>),
|
||||
|
||||
/// Executes differential benchmarks on various platforms.
|
||||
Benchmark(Box<BenchmarkingContext>),
|
||||
|
||||
/// Exports the JSON schema of the MatterLabs test format used by the tool.
|
||||
ExportJsonSchema,
|
||||
}
|
||||
@@ -45,8 +49,142 @@ impl Context {
|
||||
impl AsRef<WorkingDirectoryConfiguration> for Context {
|
||||
fn as_ref(&self) -> &WorkingDirectoryConfiguration {
|
||||
match self {
|
||||
Context::ExecuteTests(execution_context) => &execution_context.working_directory,
|
||||
Context::ExportJsonSchema => unreachable!(),
|
||||
Self::Test(context) => context.as_ref().as_ref(),
|
||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<CorpusConfiguration> for Context {
|
||||
fn as_ref(&self) -> &CorpusConfiguration {
|
||||
match self {
|
||||
Self::Test(context) => context.as_ref().as_ref(),
|
||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<SolcConfiguration> for Context {
|
||||
fn as_ref(&self) -> &SolcConfiguration {
|
||||
match self {
|
||||
Self::Test(context) => context.as_ref().as_ref(),
|
||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ResolcConfiguration> for Context {
|
||||
fn as_ref(&self) -> &ResolcConfiguration {
|
||||
match self {
|
||||
Self::Test(context) => context.as_ref().as_ref(),
|
||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<GethConfiguration> for Context {
|
||||
fn as_ref(&self) -> &GethConfiguration {
|
||||
match self {
|
||||
Self::Test(context) => context.as_ref().as_ref(),
|
||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<KurtosisConfiguration> for Context {
|
||||
fn as_ref(&self) -> &KurtosisConfiguration {
|
||||
match self {
|
||||
Self::Test(context) => context.as_ref().as_ref(),
|
||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<PolkadotParachainConfiguration> for Context {
|
||||
fn as_ref(&self) -> &PolkadotParachainConfiguration {
|
||||
match self {
|
||||
Self::Test(context) => context.as_ref().as_ref(),
|
||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<KitchensinkConfiguration> for Context {
|
||||
fn as_ref(&self) -> &KitchensinkConfiguration {
|
||||
match self {
|
||||
Self::Test(context) => context.as_ref().as_ref(),
|
||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ReviveDevNodeConfiguration> for Context {
|
||||
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
|
||||
match self {
|
||||
Self::Test(context) => context.as_ref().as_ref(),
|
||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<EthRpcConfiguration> for Context {
|
||||
fn as_ref(&self) -> &EthRpcConfiguration {
|
||||
match self {
|
||||
Self::Test(context) => context.as_ref().as_ref(),
|
||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<GenesisConfiguration> for Context {
|
||||
fn as_ref(&self) -> &GenesisConfiguration {
|
||||
match self {
|
||||
Self::Test(context) => context.as_ref().as_ref(),
|
||||
Self::Benchmark(..) => {
|
||||
static GENESIS: LazyLock<GenesisConfiguration> = LazyLock::new(Default::default);
|
||||
&GENESIS
|
||||
}
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<WalletConfiguration> for Context {
|
||||
fn as_ref(&self) -> &WalletConfiguration {
|
||||
match self {
|
||||
Self::Test(context) => context.as_ref().as_ref(),
|
||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ConcurrencyConfiguration> for Context {
|
||||
fn as_ref(&self) -> &ConcurrencyConfiguration {
|
||||
match self {
|
||||
Self::Test(context) => context.as_ref().as_ref(),
|
||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<CompilationConfiguration> for Context {
|
||||
fn as_ref(&self) -> &CompilationConfiguration {
|
||||
match self {
|
||||
Self::Test(context) => context.as_ref().as_ref(),
|
||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -54,14 +192,37 @@ impl AsRef<WorkingDirectoryConfiguration> for Context {
|
||||
impl AsRef<ReportConfiguration> for Context {
|
||||
fn as_ref(&self) -> &ReportConfiguration {
|
||||
match self {
|
||||
Context::ExecuteTests(execution_context) => &execution_context.report_configuration,
|
||||
Context::ExportJsonSchema => unreachable!(),
|
||||
Self::Test(context) => context.as_ref().as_ref(),
|
||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct ExecutionContext {
|
||||
impl AsRef<IgnoreSuccessConfiguration> for Context {
|
||||
fn as_ref(&self) -> &IgnoreSuccessConfiguration {
|
||||
match self {
|
||||
Self::Test(context) => context.as_ref().as_ref(),
|
||||
Self::Benchmark(..) => unreachable!(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
pub struct TestExecutionContext {
|
||||
/// The set of platforms that the differential tests should run on.
|
||||
#[arg(
|
||||
short = 'p',
|
||||
long = "platform",
|
||||
default_values = ["geth-evm-solc", "revive-dev-node-polkavm-resolc"]
|
||||
)]
|
||||
pub platforms: Vec<PlatformIdentifier>,
|
||||
|
||||
/// The output format to use for the tool's output.
|
||||
#[arg(short, long, default_value_t = OutputFormat::CargoTestLike)]
|
||||
pub output_format: OutputFormat,
|
||||
|
||||
/// The working directory that the program will use for all of the temporary artifacts needed at
|
||||
/// runtime.
|
||||
///
|
||||
@@ -75,17 +236,9 @@ pub struct ExecutionContext {
|
||||
)]
|
||||
pub working_directory: WorkingDirectoryConfiguration,
|
||||
|
||||
/// The differential testing leader node implementation.
|
||||
#[arg(short, long = "leader", default_value_t = TestingPlatform::Geth)]
|
||||
pub leader: TestingPlatform,
|
||||
|
||||
/// The differential testing follower node implementation.
|
||||
#[arg(short, long = "follower", default_value_t = TestingPlatform::Kitchensink)]
|
||||
pub follower: TestingPlatform,
|
||||
|
||||
/// A list of test corpus JSON files to be tested.
|
||||
#[arg(long = "corpus", short)]
|
||||
pub corpus: Vec<PathBuf>,
|
||||
/// Configuration parameters for the corpus files to use.
|
||||
#[clap(flatten, next_help_heading = "Corpus Configuration")]
|
||||
pub corpus_configuration: CorpusConfiguration,
|
||||
|
||||
/// Configuration parameters for the solc compiler.
|
||||
#[clap(flatten, next_help_heading = "Solc Configuration")]
|
||||
@@ -95,10 +248,18 @@ pub struct ExecutionContext {
|
||||
#[clap(flatten, next_help_heading = "Resolc Configuration")]
|
||||
pub resolc_configuration: ResolcConfiguration,
|
||||
|
||||
/// Configuration parameters for the Polkadot Parachain.
|
||||
#[clap(flatten, next_help_heading = "Polkadot Parachain Configuration")]
|
||||
pub polkadot_parachain_configuration: PolkadotParachainConfiguration,
|
||||
|
||||
/// Configuration parameters for the geth node.
|
||||
#[clap(flatten, next_help_heading = "Geth Configuration")]
|
||||
pub geth_configuration: GethConfiguration,
|
||||
|
||||
/// Configuration parameters for the lighthouse node.
|
||||
#[clap(flatten, next_help_heading = "Lighthouse Configuration")]
|
||||
pub lighthouse_configuration: KurtosisConfiguration,
|
||||
|
||||
/// Configuration parameters for the Kitchensink.
|
||||
#[clap(flatten, next_help_heading = "Kitchensink Configuration")]
|
||||
pub kitchensink_configuration: KitchensinkConfiguration,
|
||||
@@ -130,88 +291,295 @@ pub struct ExecutionContext {
|
||||
/// Configuration parameters for the report.
|
||||
#[clap(flatten, next_help_heading = "Report Configuration")]
|
||||
pub report_configuration: ReportConfiguration,
|
||||
|
||||
/// Configuration parameters for ignoring certain test cases based on the report
|
||||
#[clap(flatten, next_help_heading = "Ignore Success Configuration")]
|
||||
pub ignore_success_configuration: IgnoreSuccessConfiguration,
|
||||
}
|
||||
|
||||
impl Default for ExecutionContext {
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
pub struct BenchmarkingContext {
|
||||
/// The working directory that the program will use for all of the temporary artifacts needed at
|
||||
/// runtime.
|
||||
///
|
||||
/// If not specified, then a temporary directory will be created and used by the program for all
|
||||
/// temporary artifacts.
|
||||
#[clap(
|
||||
short,
|
||||
long,
|
||||
default_value = "",
|
||||
value_hint = ValueHint::DirPath,
|
||||
)]
|
||||
pub working_directory: WorkingDirectoryConfiguration,
|
||||
|
||||
/// The set of platforms that the differential tests should run on.
|
||||
#[arg(
|
||||
short = 'p',
|
||||
long = "platform",
|
||||
default_values = ["geth-evm-solc", "revive-dev-node-polkavm-resolc"]
|
||||
)]
|
||||
pub platforms: Vec<PlatformIdentifier>,
|
||||
|
||||
/// The default repetition count for any workload specified but that doesn't contain a repeat
|
||||
/// step.
|
||||
#[arg(short = 'r', long = "default-repetition-count", default_value_t = 1000)]
|
||||
pub default_repetition_count: usize,
|
||||
|
||||
/// Configuration parameters for the corpus files to use.
|
||||
#[clap(flatten, next_help_heading = "Corpus Configuration")]
|
||||
pub corpus_configuration: CorpusConfiguration,
|
||||
|
||||
/// Configuration parameters for the solc compiler.
|
||||
#[clap(flatten, next_help_heading = "Solc Configuration")]
|
||||
pub solc_configuration: SolcConfiguration,
|
||||
|
||||
/// Configuration parameters for the resolc compiler.
|
||||
#[clap(flatten, next_help_heading = "Resolc Configuration")]
|
||||
pub resolc_configuration: ResolcConfiguration,
|
||||
|
||||
/// Configuration parameters for the geth node.
|
||||
#[clap(flatten, next_help_heading = "Geth Configuration")]
|
||||
pub geth_configuration: GethConfiguration,
|
||||
|
||||
/// Configuration parameters for the lighthouse node.
|
||||
#[clap(flatten, next_help_heading = "Lighthouse Configuration")]
|
||||
pub lighthouse_configuration: KurtosisConfiguration,
|
||||
|
||||
/// Configuration parameters for the Kitchensink.
|
||||
#[clap(flatten, next_help_heading = "Kitchensink Configuration")]
|
||||
pub kitchensink_configuration: KitchensinkConfiguration,
|
||||
|
||||
/// Configuration parameters for the Polkadot Parachain.
|
||||
#[clap(flatten, next_help_heading = "Polkadot Parachain Configuration")]
|
||||
pub polkadot_parachain_configuration: PolkadotParachainConfiguration,
|
||||
|
||||
/// Configuration parameters for the Revive Dev Node.
|
||||
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
|
||||
pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
|
||||
|
||||
/// Configuration parameters for the Eth Rpc.
|
||||
#[clap(flatten, next_help_heading = "Eth RPC Configuration")]
|
||||
pub eth_rpc_configuration: EthRpcConfiguration,
|
||||
|
||||
/// Configuration parameters for the wallet.
|
||||
#[clap(flatten, next_help_heading = "Wallet Configuration")]
|
||||
pub wallet_configuration: WalletConfiguration,
|
||||
|
||||
/// Configuration parameters for concurrency.
|
||||
#[clap(flatten, next_help_heading = "Concurrency Configuration")]
|
||||
pub concurrency_configuration: ConcurrencyConfiguration,
|
||||
|
||||
/// Configuration parameters for the compilers and compilation.
|
||||
#[clap(flatten, next_help_heading = "Compilation Configuration")]
|
||||
pub compilation_configuration: CompilationConfiguration,
|
||||
|
||||
/// Configuration parameters for the report.
|
||||
#[clap(flatten, next_help_heading = "Report Configuration")]
|
||||
pub report_configuration: ReportConfiguration,
|
||||
}
|
||||
|
||||
impl Default for TestExecutionContext {
|
||||
fn default() -> Self {
|
||||
Self::parse_from(["execution-context"])
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<WorkingDirectoryConfiguration> for ExecutionContext {
|
||||
impl AsRef<WorkingDirectoryConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &WorkingDirectoryConfiguration {
|
||||
&self.working_directory
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<SolcConfiguration> for ExecutionContext {
|
||||
impl AsRef<CorpusConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &CorpusConfiguration {
|
||||
&self.corpus_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<SolcConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &SolcConfiguration {
|
||||
&self.solc_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ResolcConfiguration> for ExecutionContext {
|
||||
impl AsRef<ResolcConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &ResolcConfiguration {
|
||||
&self.resolc_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<GethConfiguration> for ExecutionContext {
|
||||
impl AsRef<GethConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &GethConfiguration {
|
||||
&self.geth_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<KitchensinkConfiguration> for ExecutionContext {
|
||||
impl AsRef<PolkadotParachainConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &PolkadotParachainConfiguration {
|
||||
&self.polkadot_parachain_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<KurtosisConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &KurtosisConfiguration {
|
||||
&self.lighthouse_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<KitchensinkConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &KitchensinkConfiguration {
|
||||
&self.kitchensink_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ReviveDevNodeConfiguration> for ExecutionContext {
|
||||
impl AsRef<ReviveDevNodeConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
|
||||
&self.revive_dev_node_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<EthRpcConfiguration> for ExecutionContext {
|
||||
impl AsRef<EthRpcConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &EthRpcConfiguration {
|
||||
&self.eth_rpc_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<GenesisConfiguration> for ExecutionContext {
|
||||
impl AsRef<GenesisConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &GenesisConfiguration {
|
||||
&self.genesis_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<WalletConfiguration> for ExecutionContext {
|
||||
impl AsRef<WalletConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &WalletConfiguration {
|
||||
&self.wallet_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ConcurrencyConfiguration> for ExecutionContext {
|
||||
impl AsRef<ConcurrencyConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &ConcurrencyConfiguration {
|
||||
&self.concurrency_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<CompilationConfiguration> for ExecutionContext {
|
||||
impl AsRef<CompilationConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &CompilationConfiguration {
|
||||
&self.compilation_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ReportConfiguration> for ExecutionContext {
|
||||
impl AsRef<ReportConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &ReportConfiguration {
|
||||
&self.report_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<IgnoreSuccessConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &IgnoreSuccessConfiguration {
|
||||
&self.ignore_success_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for BenchmarkingContext {
|
||||
fn default() -> Self {
|
||||
Self::parse_from(["execution-context"])
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<WorkingDirectoryConfiguration> for BenchmarkingContext {
|
||||
fn as_ref(&self) -> &WorkingDirectoryConfiguration {
|
||||
&self.working_directory
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<CorpusConfiguration> for BenchmarkingContext {
|
||||
fn as_ref(&self) -> &CorpusConfiguration {
|
||||
&self.corpus_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<SolcConfiguration> for BenchmarkingContext {
|
||||
fn as_ref(&self) -> &SolcConfiguration {
|
||||
&self.solc_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ResolcConfiguration> for BenchmarkingContext {
|
||||
fn as_ref(&self) -> &ResolcConfiguration {
|
||||
&self.resolc_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<GethConfiguration> for BenchmarkingContext {
|
||||
fn as_ref(&self) -> &GethConfiguration {
|
||||
&self.geth_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<KurtosisConfiguration> for BenchmarkingContext {
|
||||
fn as_ref(&self) -> &KurtosisConfiguration {
|
||||
&self.lighthouse_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<PolkadotParachainConfiguration> for BenchmarkingContext {
|
||||
fn as_ref(&self) -> &PolkadotParachainConfiguration {
|
||||
&self.polkadot_parachain_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<KitchensinkConfiguration> for BenchmarkingContext {
|
||||
fn as_ref(&self) -> &KitchensinkConfiguration {
|
||||
&self.kitchensink_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ReviveDevNodeConfiguration> for BenchmarkingContext {
|
||||
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
|
||||
&self.revive_dev_node_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<EthRpcConfiguration> for BenchmarkingContext {
|
||||
fn as_ref(&self) -> &EthRpcConfiguration {
|
||||
&self.eth_rpc_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<WalletConfiguration> for BenchmarkingContext {
|
||||
fn as_ref(&self) -> &WalletConfiguration {
|
||||
&self.wallet_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ConcurrencyConfiguration> for BenchmarkingContext {
|
||||
fn as_ref(&self) -> &ConcurrencyConfiguration {
|
||||
&self.concurrency_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<CompilationConfiguration> for BenchmarkingContext {
|
||||
fn as_ref(&self) -> &CompilationConfiguration {
|
||||
&self.compilation_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ReportConfiguration> for BenchmarkingContext {
|
||||
fn as_ref(&self) -> &ReportConfiguration {
|
||||
&self.report_configuration
|
||||
}
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for the corpus files to use for the execution.
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
pub struct CorpusConfiguration {
|
||||
/// A list of test corpus JSON files to be tested.
|
||||
#[arg(short = 'c', long = "corpus")]
|
||||
pub paths: Vec<PathBuf>,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for Solc.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
pub struct SolcConfiguration {
|
||||
/// Specifies the default version of the Solc compiler that should be used if there is no
|
||||
/// override specified by one of the test cases.
|
||||
@@ -220,7 +588,7 @@ pub struct SolcConfiguration {
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for Resolc.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
pub struct ResolcConfiguration {
|
||||
/// Specifies the path of the resolc compiler to be used by the tool.
|
||||
///
|
||||
@@ -230,8 +598,32 @@ pub struct ResolcConfiguration {
|
||||
pub path: PathBuf,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for Polkadot Parachain.
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
pub struct PolkadotParachainConfiguration {
|
||||
/// Specifies the path of the polkadot-parachain node to be used by the tool.
|
||||
///
|
||||
/// If this is not specified, then the tool assumes that it should use the polkadot-parachain binary
|
||||
/// that's provided in the user's $PATH.
|
||||
#[clap(
|
||||
id = "polkadot-parachain.path",
|
||||
long = "polkadot-parachain.path",
|
||||
default_value = "polkadot-parachain"
|
||||
)]
|
||||
pub path: PathBuf,
|
||||
|
||||
/// The amount of time to wait upon startup before considering that the node timed out.
|
||||
#[clap(
|
||||
id = "polkadot-parachain.start-timeout-ms",
|
||||
long = "polkadot-parachain.start-timeout-ms",
|
||||
default_value = "5000",
|
||||
value_parser = parse_duration
|
||||
)]
|
||||
pub start_timeout_ms: Duration,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for Geth.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
pub struct GethConfiguration {
|
||||
/// Specifies the path of the geth node to be used by the tool.
|
||||
///
|
||||
@@ -244,14 +636,29 @@ pub struct GethConfiguration {
|
||||
#[clap(
|
||||
id = "geth.start-timeout-ms",
|
||||
long = "geth.start-timeout-ms",
|
||||
default_value = "5000",
|
||||
default_value = "30000",
|
||||
value_parser = parse_duration
|
||||
)]
|
||||
pub start_timeout_ms: Duration,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for kurtosis.
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
pub struct KurtosisConfiguration {
|
||||
/// Specifies the path of the kurtosis node to be used by the tool.
|
||||
///
|
||||
/// If this is not specified, then the tool assumes that it should use the kurtosis binary that's
|
||||
/// provided in the user's $PATH.
|
||||
#[clap(
|
||||
id = "kurtosis.path",
|
||||
long = "kurtosis.path",
|
||||
default_value = "kurtosis"
|
||||
)]
|
||||
pub path: PathBuf,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for Kitchensink.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
pub struct KitchensinkConfiguration {
|
||||
/// Specifies the path of the kitchensink node to be used by the tool.
|
||||
///
|
||||
@@ -268,18 +675,14 @@ pub struct KitchensinkConfiguration {
|
||||
#[clap(
|
||||
id = "kitchensink.start-timeout-ms",
|
||||
long = "kitchensink.start-timeout-ms",
|
||||
default_value = "5000",
|
||||
default_value = "30000",
|
||||
value_parser = parse_duration
|
||||
)]
|
||||
pub start_timeout_ms: Duration,
|
||||
|
||||
/// This configures the tool to use Kitchensink instead of using the revive-dev-node.
|
||||
#[clap(long = "kitchensink.dont-use-dev-node")]
|
||||
pub use_kitchensink: bool,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for the revive dev node.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
pub struct ReviveDevNodeConfiguration {
|
||||
/// Specifies the path of the revive dev node to be used by the tool.
|
||||
///
|
||||
@@ -296,14 +699,22 @@ pub struct ReviveDevNodeConfiguration {
|
||||
#[clap(
|
||||
id = "revive-dev-node.start-timeout-ms",
|
||||
long = "revive-dev-node.start-timeout-ms",
|
||||
default_value = "5000",
|
||||
default_value = "30000",
|
||||
value_parser = parse_duration
|
||||
)]
|
||||
pub start_timeout_ms: Duration,
|
||||
|
||||
/// The consensus to use for the spawned revive-dev-node.
|
||||
#[clap(
|
||||
id = "revive-dev-node.consensus",
|
||||
long = "revive-dev-node.consensus",
|
||||
default_value = "instant-seal"
|
||||
)]
|
||||
pub consensus: String,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for the ETH RPC.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
pub struct EthRpcConfiguration {
|
||||
/// Specifies the path of the ETH RPC to be used by the tool.
|
||||
///
|
||||
@@ -316,14 +727,14 @@ pub struct EthRpcConfiguration {
|
||||
#[clap(
|
||||
id = "eth-rpc.start-timeout-ms",
|
||||
long = "eth-rpc.start-timeout-ms",
|
||||
default_value = "5000",
|
||||
default_value = "30000",
|
||||
value_parser = parse_duration
|
||||
)]
|
||||
pub start_timeout_ms: Duration,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for the genesis.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
#[derive(Clone, Debug, Default, Parser, Serialize, Deserialize)]
|
||||
pub struct GenesisConfiguration {
|
||||
/// Specifies the path of the genesis file to use for the nodes that are started.
|
||||
///
|
||||
@@ -340,7 +751,7 @@ pub struct GenesisConfiguration {
|
||||
impl GenesisConfiguration {
|
||||
pub fn genesis(&self) -> anyhow::Result<&Genesis> {
|
||||
static DEFAULT_GENESIS: LazyLock<Genesis> = LazyLock::new(|| {
|
||||
let genesis = include_str!("../../../genesis.json");
|
||||
let genesis = include_str!("../../../assets/dev-genesis.json");
|
||||
serde_json::from_str(genesis).unwrap()
|
||||
});
|
||||
|
||||
@@ -361,21 +772,20 @@ impl GenesisConfiguration {
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for the wallet.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
pub struct WalletConfiguration {
|
||||
/// The private key of the default signer.
|
||||
#[clap(
|
||||
long = "wallet.default-private-key",
|
||||
default_value = "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d"
|
||||
)]
|
||||
#[serde(serialize_with = "serialize_private_key")]
|
||||
default_key: PrivateKeySigner,
|
||||
default_key: B256,
|
||||
|
||||
/// This argument controls which private keys the nodes should have access to and be added to
|
||||
/// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set
|
||||
/// of the node.
|
||||
#[clap(long = "wallet.additional-keys", default_value_t = 100_000)]
|
||||
additional_keys: usize,
|
||||
pub additional_keys: usize,
|
||||
|
||||
/// The wallet object that will be used.
|
||||
#[clap(skip)]
|
||||
@@ -387,7 +797,8 @@ impl WalletConfiguration {
|
||||
pub fn wallet(&self) -> Arc<EthereumWallet> {
|
||||
self.wallet
|
||||
.get_or_init(|| {
|
||||
let mut wallet = EthereumWallet::new(self.default_key.clone());
|
||||
let mut wallet =
|
||||
EthereumWallet::new(PrivateKeySigner::from_bytes(&self.default_key).unwrap());
|
||||
for signer in (1..=self.additional_keys)
|
||||
.map(|id| U256::from(id))
|
||||
.map(|id| id.to_be_bytes::<32>())
|
||||
@@ -399,17 +810,14 @@ impl WalletConfiguration {
|
||||
})
|
||||
.clone()
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize_private_key<S>(value: &PrivateKeySigner, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
value.to_bytes().encode_hex().serialize(serializer)
|
||||
pub fn highest_private_key_exclusive(&self) -> U256 {
|
||||
U256::try_from(self.additional_keys).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// A set of configuration for concurrency.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
pub struct ConcurrencyConfiguration {
|
||||
/// Determines the amount of nodes that will be spawned for each chain.
|
||||
#[clap(long = "concurrency.number-of-nodes", default_value_t = 5)]
|
||||
@@ -447,14 +855,14 @@ impl ConcurrencyConfiguration {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
pub struct CompilationConfiguration {
|
||||
/// Controls if the compilation cache should be invalidated or not.
|
||||
#[arg(long = "compilation.invalidate-cache")]
|
||||
pub invalidate_compilation_cache: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
pub struct ReportConfiguration {
|
||||
/// Controls if the compiler input is included in the final report.
|
||||
#[clap(long = "report.include-compiler-input")]
|
||||
@@ -465,6 +873,13 @@ pub struct ReportConfiguration {
|
||||
pub include_compiler_output: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
pub struct IgnoreSuccessConfiguration {
|
||||
/// The path of the report generated by the tool to use to ignore the cases that succeeded.
|
||||
#[clap(long = "ignore-success.report-path")]
|
||||
pub path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
/// Represents the working directory that the program uses.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum WorkingDirectoryConfiguration {
|
||||
@@ -474,6 +889,24 @@ pub enum WorkingDirectoryConfiguration {
|
||||
Path(PathBuf),
|
||||
}
|
||||
|
||||
impl Serialize for WorkingDirectoryConfiguration {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
self.as_path().serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Deserialize<'a> for WorkingDirectoryConfiguration {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'a>,
|
||||
{
|
||||
PathBuf::deserialize(deserializer).map(Self::Path)
|
||||
}
|
||||
}
|
||||
|
||||
impl WorkingDirectoryConfiguration {
|
||||
pub fn as_path(&self) -> &Path {
|
||||
self.as_ref()
|
||||
@@ -523,15 +956,6 @@ impl Display for WorkingDirectoryConfiguration {
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for WorkingDirectoryConfiguration {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
self.as_path().serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_duration(s: &str) -> anyhow::Result<Duration> {
|
||||
u64::from_str(s)
|
||||
.map(Duration::from_millis)
|
||||
@@ -563,4 +987,33 @@ pub enum TestingPlatform {
|
||||
Geth,
|
||||
/// The kitchensink runtime provides the PolkaVM (PVM) based node implementation.
|
||||
Kitchensink,
|
||||
/// A polkadot/Substrate based network
|
||||
Zombienet,
|
||||
}
|
||||
|
||||
/// The output format to use for the test execution output.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
ValueEnum,
|
||||
EnumString,
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
)]
|
||||
#[strum(serialize_all = "kebab-case")]
|
||||
pub enum OutputFormat {
|
||||
/// The legacy format that was used in the past for the output.
|
||||
Legacy,
|
||||
|
||||
/// An output format that looks heavily resembles the output from `cargo test`.
|
||||
CargoTestLike,
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ revive-dt-node = { workspace = true }
|
||||
revive-dt-node-interaction = { workspace = true }
|
||||
revive-dt-report = { workspace = true }
|
||||
|
||||
ansi_term = { workspace = true }
|
||||
alloy = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
bson = { workspace = true }
|
||||
|
||||
@@ -0,0 +1,758 @@
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
ops::ControlFlow,
|
||||
sync::{
|
||||
Arc,
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use alloy::{
|
||||
hex,
|
||||
json_abi::JsonAbi,
|
||||
network::{Ethereum, TransactionBuilder},
|
||||
primitives::{Address, TxHash, U256},
|
||||
rpc::types::{
|
||||
TransactionReceipt, TransactionRequest,
|
||||
trace::geth::{
|
||||
CallFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType,
|
||||
GethDebugTracingOptions,
|
||||
},
|
||||
},
|
||||
};
|
||||
use anyhow::{Context as _, Result, bail};
|
||||
use futures::TryFutureExt;
|
||||
use indexmap::IndexMap;
|
||||
use revive_dt_common::{
|
||||
futures::{PollingWaitBehavior, poll},
|
||||
types::PrivateKeyAllocator,
|
||||
};
|
||||
use revive_dt_format::{
|
||||
metadata::{ContractInstance, ContractPathAndIdent},
|
||||
steps::{
|
||||
AllocateAccountStep, BalanceAssertionStep, Calldata, EtherValue, FunctionCallStep, Method,
|
||||
RepeatStep, Step, StepAddress, StepIdx, StepPath, StorageEmptyAssertionStep,
|
||||
},
|
||||
traits::{ResolutionContext, ResolverApi},
|
||||
};
|
||||
use tokio::sync::{Mutex, OnceCell, mpsc::UnboundedSender};
|
||||
use tracing::{Instrument, Span, debug, error, field::display, info, info_span, instrument};
|
||||
|
||||
use crate::{
|
||||
differential_benchmarks::{ExecutionState, WatcherEvent},
|
||||
helpers::{CachedCompiler, TestDefinition, TestPlatformInformation},
|
||||
};
|
||||
|
||||
static DRIVER_COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
/// The differential tests driver for a single platform.
|
||||
pub struct Driver<'a, I> {
|
||||
/// The id of the driver.
|
||||
driver_id: usize,
|
||||
|
||||
/// The information of the platform that this driver is for.
|
||||
platform_information: &'a TestPlatformInformation<'a>,
|
||||
|
||||
/// The resolver of the platform.
|
||||
resolver: Arc<dyn ResolverApi + 'a>,
|
||||
|
||||
/// The definition of the test that the driver is instructed to execute.
|
||||
test_definition: &'a TestDefinition<'a>,
|
||||
|
||||
/// The private key allocator used by this driver and other drivers when account allocations are
|
||||
/// needed.
|
||||
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
|
||||
|
||||
/// The execution state associated with the platform.
|
||||
execution_state: ExecutionState,
|
||||
|
||||
/// The send side of the watcher's unbounded channel associated with this driver.
|
||||
watcher_tx: UnboundedSender<WatcherEvent>,
|
||||
|
||||
/// The number of steps that were executed on the driver.
|
||||
steps_executed: usize,
|
||||
|
||||
/// This is the queue of steps that are to be executed by the driver for this test case. Each
|
||||
/// time `execute_step` is called one of the steps is executed.
|
||||
steps_iterator: I,
|
||||
}
|
||||
|
||||
impl<'a, I> Driver<'a, I>
|
||||
where
|
||||
I: Iterator<Item = (StepPath, Step)>,
|
||||
{
|
||||
// region:Constructors & Initialization
|
||||
pub async fn new(
|
||||
platform_information: &'a TestPlatformInformation<'a>,
|
||||
test_definition: &'a TestDefinition<'a>,
|
||||
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
|
||||
cached_compiler: &CachedCompiler<'a>,
|
||||
watcher_tx: UnboundedSender<WatcherEvent>,
|
||||
steps: I,
|
||||
) -> Result<Self> {
|
||||
let mut this = Driver {
|
||||
driver_id: DRIVER_COUNT.fetch_add(1, Ordering::SeqCst),
|
||||
platform_information,
|
||||
resolver: platform_information
|
||||
.node
|
||||
.resolver()
|
||||
.await
|
||||
.context("Failed to create resolver")?,
|
||||
test_definition,
|
||||
private_key_allocator,
|
||||
execution_state: ExecutionState::empty(),
|
||||
steps_executed: 0,
|
||||
steps_iterator: steps,
|
||||
watcher_tx,
|
||||
};
|
||||
this.init_execution_state(cached_compiler)
|
||||
.await
|
||||
.context("Failed to initialize the execution state of the platform")?;
|
||||
Ok(this)
|
||||
}
|
||||
|
||||
async fn init_execution_state(&mut self, cached_compiler: &CachedCompiler<'a>) -> Result<()> {
|
||||
let compiler_output = cached_compiler
|
||||
.compile_contracts(
|
||||
self.test_definition.metadata,
|
||||
self.test_definition.metadata_file_path,
|
||||
self.test_definition.mode.clone(),
|
||||
None,
|
||||
self.platform_information.compiler.as_ref(),
|
||||
self.platform_information.platform,
|
||||
&self.platform_information.reporter,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| error!(?err, "Pre-linking compilation failed"))
|
||||
.context("Failed to produce the pre-linking compiled contracts")?;
|
||||
|
||||
let mut deployed_libraries = None::<HashMap<_, _>>;
|
||||
let mut contract_sources = self
|
||||
.test_definition
|
||||
.metadata
|
||||
.contract_sources()
|
||||
.inspect_err(|err| error!(?err, "Failed to retrieve contract sources from metadata"))
|
||||
.context("Failed to get the contract instances from the metadata file")?;
|
||||
for library_instance in self
|
||||
.test_definition
|
||||
.metadata
|
||||
.libraries
|
||||
.iter()
|
||||
.flatten()
|
||||
.flat_map(|(_, map)| map.values())
|
||||
{
|
||||
debug!(%library_instance, "Deploying Library Instance");
|
||||
|
||||
let ContractPathAndIdent {
|
||||
contract_source_path: library_source_path,
|
||||
contract_ident: library_ident,
|
||||
} = contract_sources
|
||||
.remove(library_instance)
|
||||
.context("Failed to get the contract sources of the contract instance")?;
|
||||
|
||||
let (code, abi) = compiler_output
|
||||
.contracts
|
||||
.get(&library_source_path)
|
||||
.and_then(|contracts| contracts.get(library_ident.as_str()))
|
||||
.context("Failed to get the code and abi for the instance")?;
|
||||
|
||||
let code = alloy::hex::decode(code)?;
|
||||
|
||||
// Getting the deployer address from the cases themselves. This is to ensure
|
||||
// that we're doing the deployments from different accounts and therefore we're
|
||||
// not slowed down by the nonce.
|
||||
let deployer_address = self
|
||||
.test_definition
|
||||
.case
|
||||
.steps
|
||||
.iter()
|
||||
.filter_map(|step| match step {
|
||||
Step::FunctionCall(input) => input.caller.as_address().copied(),
|
||||
Step::BalanceAssertion(..) => None,
|
||||
Step::StorageEmptyAssertion(..) => None,
|
||||
Step::Repeat(..) => None,
|
||||
Step::AllocateAccount(..) => None,
|
||||
})
|
||||
.next()
|
||||
.unwrap_or(FunctionCallStep::default_caller_address());
|
||||
let tx = TransactionBuilder::<Ethereum>::with_deploy_code(
|
||||
TransactionRequest::default().from(deployer_address),
|
||||
code,
|
||||
);
|
||||
let receipt = self
|
||||
.execute_transaction(tx)
|
||||
.and_then(|(_, receipt_fut)| receipt_fut)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
%library_instance,
|
||||
"Failed to deploy the library"
|
||||
)
|
||||
})?;
|
||||
|
||||
debug!(?library_instance, "Deployed library");
|
||||
|
||||
let library_address = receipt
|
||||
.contract_address
|
||||
.expect("Failed to deploy the library");
|
||||
|
||||
deployed_libraries.get_or_insert_default().insert(
|
||||
library_instance.clone(),
|
||||
(library_ident.clone(), library_address, abi.clone()),
|
||||
);
|
||||
}
|
||||
|
||||
let compiler_output = cached_compiler
|
||||
.compile_contracts(
|
||||
self.test_definition.metadata,
|
||||
self.test_definition.metadata_file_path,
|
||||
self.test_definition.mode.clone(),
|
||||
deployed_libraries.as_ref(),
|
||||
self.platform_information.compiler.as_ref(),
|
||||
self.platform_information.platform,
|
||||
&self.platform_information.reporter,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| error!(?err, "Post-linking compilation failed"))
|
||||
.context("Failed to compile the post-link contracts")?;
|
||||
|
||||
self.execution_state = ExecutionState::new(
|
||||
compiler_output.contracts,
|
||||
deployed_libraries.unwrap_or_default(),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// endregion:Constructors & Initialization
|
||||
|
||||
// region:Step Handling
|
||||
pub async fn execute_all(mut self) -> Result<usize> {
|
||||
while let Some(result) = self.execute_next_step().await {
|
||||
result?
|
||||
}
|
||||
Ok(self.steps_executed)
|
||||
}
|
||||
|
||||
pub async fn execute_next_step(&mut self) -> Option<Result<()>> {
|
||||
let (step_path, step) = self.steps_iterator.next()?;
|
||||
info!(%step_path, "Executing Step");
|
||||
Some(
|
||||
self.execute_step(&step_path, &step)
|
||||
.await
|
||||
.inspect(|_| info!(%step_path, "Step execution succeeded"))
|
||||
.inspect_err(|err| error!(%step_path, ?err, "Step execution failed")),
|
||||
)
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
level = "info",
|
||||
skip_all,
|
||||
fields(
|
||||
driver_id = self.driver_id,
|
||||
%step_path,
|
||||
),
|
||||
err(Debug),
|
||||
)]
|
||||
async fn execute_step(&mut self, step_path: &StepPath, step: &Step) -> Result<()> {
|
||||
let steps_executed = match step {
|
||||
Step::FunctionCall(step) => self
|
||||
.execute_function_call(step_path, step.as_ref())
|
||||
.await
|
||||
.context("Function call step Failed"),
|
||||
Step::Repeat(step) => self
|
||||
.execute_repeat_step(step_path, step.as_ref())
|
||||
.await
|
||||
.context("Repetition Step Failed"),
|
||||
Step::AllocateAccount(step) => self
|
||||
.execute_account_allocation(step_path, step.as_ref())
|
||||
.await
|
||||
.context("Account Allocation Step Failed"),
|
||||
// The following steps are disabled in the benchmarking driver.
|
||||
Step::BalanceAssertion(..) | Step::StorageEmptyAssertion(..) => Ok(0),
|
||||
}?;
|
||||
self.steps_executed += steps_executed;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id))]
|
||||
pub async fn execute_function_call(
|
||||
&mut self,
|
||||
_: &StepPath,
|
||||
step: &FunctionCallStep,
|
||||
) -> Result<usize> {
|
||||
let deployment_receipts = self
|
||||
.handle_function_call_contract_deployment(step)
|
||||
.await
|
||||
.context("Failed to deploy contracts for the function call step")?;
|
||||
let transaction_hash = self
|
||||
.handle_function_call_execution(step, deployment_receipts)
|
||||
.await
|
||||
.context("Failed to handle the function call execution")?;
|
||||
self.handle_function_call_variable_assignment(step, transaction_hash)
|
||||
.await
|
||||
.context("Failed to handle function call variable assignment")?;
|
||||
Ok(1)
|
||||
}
|
||||
|
||||
async fn handle_function_call_contract_deployment(
|
||||
&mut self,
|
||||
step: &FunctionCallStep,
|
||||
) -> Result<HashMap<ContractInstance, TransactionReceipt>> {
|
||||
let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new();
|
||||
for instance in step.find_all_contract_instances().into_iter() {
|
||||
if !self
|
||||
.execution_state
|
||||
.deployed_contracts
|
||||
.contains_key(&instance)
|
||||
{
|
||||
instances_we_must_deploy.entry(instance).or_insert(false);
|
||||
}
|
||||
}
|
||||
if let Method::Deployer = step.method {
|
||||
instances_we_must_deploy.swap_remove(&step.instance);
|
||||
instances_we_must_deploy.insert(step.instance.clone(), true);
|
||||
}
|
||||
|
||||
let mut receipts = HashMap::new();
|
||||
for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() {
|
||||
let calldata = deploy_with_constructor_arguments.then_some(&step.calldata);
|
||||
let value = deploy_with_constructor_arguments
|
||||
.then_some(step.value)
|
||||
.flatten();
|
||||
|
||||
let caller = {
|
||||
let context = self.default_resolution_context();
|
||||
step.caller
|
||||
.resolve_address(self.resolver.as_ref(), context)
|
||||
.await?
|
||||
};
|
||||
if let (_, _, Some(receipt)) = self
|
||||
.get_or_deploy_contract_instance(&instance, caller, calldata, value)
|
||||
.await
|
||||
.context("Failed to get or deploy contract instance during input execution")?
|
||||
{
|
||||
receipts.insert(instance.clone(), receipt);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(receipts)
|
||||
}
|
||||
|
||||
async fn handle_function_call_execution(
|
||||
&mut self,
|
||||
step: &FunctionCallStep,
|
||||
mut deployment_receipts: HashMap<ContractInstance, TransactionReceipt>,
|
||||
) -> Result<TxHash> {
|
||||
match step.method {
|
||||
// This step was already executed when `handle_step` was called. We just need to
|
||||
// lookup the transaction receipt in this case and continue on.
|
||||
Method::Deployer => deployment_receipts
|
||||
.remove(&step.instance)
|
||||
.context("Failed to find deployment receipt for constructor call")
|
||||
.map(|receipt| receipt.transaction_hash),
|
||||
Method::Fallback | Method::FunctionName(_) => {
|
||||
let tx = step
|
||||
.as_transaction(self.resolver.as_ref(), self.default_resolution_context())
|
||||
.await?;
|
||||
Ok(self.execute_transaction(tx).await?.0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_function_call_call_frame_tracing(
|
||||
&mut self,
|
||||
tx_hash: TxHash,
|
||||
) -> Result<CallFrame> {
|
||||
self.platform_information
|
||||
.node
|
||||
.trace_transaction(
|
||||
tx_hash,
|
||||
GethDebugTracingOptions {
|
||||
tracer: Some(GethDebugTracerType::BuiltInTracer(
|
||||
GethDebugBuiltInTracerType::CallTracer,
|
||||
)),
|
||||
tracer_config: GethDebugTracerConfig(serde_json::json! {{
|
||||
"onlyTopCall": true,
|
||||
"withLog": false,
|
||||
"withStorage": false,
|
||||
"withMemory": false,
|
||||
"withStack": false,
|
||||
"withReturnData": true
|
||||
}}),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map(|trace| {
|
||||
trace
|
||||
.try_into_call_frame()
|
||||
.expect("Impossible - we requested a callframe trace so we must get it back")
|
||||
})
|
||||
}
|
||||
|
||||
async fn handle_function_call_variable_assignment(
|
||||
&mut self,
|
||||
step: &FunctionCallStep,
|
||||
tx_hash: TxHash,
|
||||
) -> Result<()> {
|
||||
let Some(ref assignments) = step.variable_assignments else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
// Handling the return data variable assignments.
|
||||
let callframe = OnceCell::new();
|
||||
for (variable_name, output_word) in assignments.return_data.iter().zip(
|
||||
callframe
|
||||
.get_or_try_init(|| self.handle_function_call_call_frame_tracing(tx_hash))
|
||||
.await
|
||||
.context("Failed to get the callframe trace for transaction")?
|
||||
.output
|
||||
.as_ref()
|
||||
.unwrap_or_default()
|
||||
.to_vec()
|
||||
.chunks(32),
|
||||
) {
|
||||
let value = U256::from_be_slice(output_word);
|
||||
self.execution_state
|
||||
.variables
|
||||
.insert(variable_name.clone(), value);
|
||||
tracing::info!(
|
||||
variable_name,
|
||||
variable_value = hex::encode(value.to_be_bytes::<32>()),
|
||||
"Assigned variable"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id))]
|
||||
pub async fn execute_balance_assertion(
|
||||
&mut self,
|
||||
_: &StepPath,
|
||||
_: &BalanceAssertionStep,
|
||||
) -> anyhow::Result<usize> {
|
||||
// Kept empty intentionally for the benchmark driver.
|
||||
Ok(1)
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id), err(Debug))]
|
||||
async fn execute_storage_empty_assertion_step(
|
||||
&mut self,
|
||||
_: &StepPath,
|
||||
_: &StorageEmptyAssertionStep,
|
||||
) -> Result<usize> {
|
||||
// Kept empty intentionally for the benchmark driver.
|
||||
Ok(1)
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id), err(Debug))]
|
||||
async fn execute_repeat_step(
|
||||
&mut self,
|
||||
step_path: &StepPath,
|
||||
step: &RepeatStep,
|
||||
) -> Result<usize> {
|
||||
let tasks = (0..step.repeat)
|
||||
.map(|_| Driver {
|
||||
driver_id: DRIVER_COUNT.fetch_add(1, Ordering::SeqCst),
|
||||
platform_information: self.platform_information,
|
||||
resolver: self.resolver.clone(),
|
||||
test_definition: self.test_definition,
|
||||
private_key_allocator: self.private_key_allocator.clone(),
|
||||
execution_state: self.execution_state.clone(),
|
||||
steps_executed: 0,
|
||||
steps_iterator: {
|
||||
let steps = step
|
||||
.steps
|
||||
.iter()
|
||||
.cloned()
|
||||
.enumerate()
|
||||
.map(|(step_idx, step)| {
|
||||
let step_idx = StepIdx::new(step_idx);
|
||||
let step_path = step_path.append(step_idx);
|
||||
(step_path, step)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
steps.into_iter()
|
||||
},
|
||||
watcher_tx: self.watcher_tx.clone(),
|
||||
})
|
||||
.map(|driver| driver.execute_all());
|
||||
|
||||
// TODO: Determine how we want to know the `ignore_block_before` and if it's through the
|
||||
// receipt and how this would impact the architecture and the possibility of us not waiting
|
||||
// for receipts in the future.
|
||||
self.watcher_tx
|
||||
.send(WatcherEvent::RepetitionStartEvent {
|
||||
ignore_block_before: 0,
|
||||
})
|
||||
.context("Failed to send message on the watcher's tx")?;
|
||||
|
||||
let res = futures::future::try_join_all(tasks)
|
||||
.await
|
||||
.context("Repetition execution failed")?;
|
||||
Ok(res.into_iter().sum())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", fields(driver_id = self.driver_id), skip_all, err(Debug))]
|
||||
pub async fn execute_account_allocation(
|
||||
&mut self,
|
||||
_: &StepPath,
|
||||
step: &AllocateAccountStep,
|
||||
) -> Result<usize> {
|
||||
let Some(variable_name) = step.variable_name.strip_prefix("$VARIABLE:") else {
|
||||
bail!("Account allocation must start with $VARIABLE:");
|
||||
};
|
||||
|
||||
let private_key = self
|
||||
.private_key_allocator
|
||||
.lock()
|
||||
.await
|
||||
.allocate()
|
||||
.context("Account allocation through the private key allocator failed")?;
|
||||
let account = private_key.address();
|
||||
let variable = U256::from_be_slice(account.0.as_slice());
|
||||
|
||||
self.execution_state
|
||||
.variables
|
||||
.insert(variable_name.to_string(), variable);
|
||||
|
||||
Ok(1)
|
||||
}
|
||||
// endregion:Step Handling
|
||||
|
||||
// region:Contract Deployment
|
||||
#[instrument(
|
||||
level = "info",
|
||||
skip_all,
|
||||
fields(
|
||||
driver_id = self.driver_id,
|
||||
%contract_instance,
|
||||
%deployer
|
||||
),
|
||||
err(Debug),
|
||||
)]
|
||||
async fn get_or_deploy_contract_instance(
|
||||
&mut self,
|
||||
contract_instance: &ContractInstance,
|
||||
deployer: Address,
|
||||
calldata: Option<&Calldata>,
|
||||
value: Option<EtherValue>,
|
||||
) -> Result<(Address, JsonAbi, Option<TransactionReceipt>)> {
|
||||
if let Some((_, address, abi)) = self
|
||||
.execution_state
|
||||
.deployed_contracts
|
||||
.get(contract_instance)
|
||||
{
|
||||
info!(
|
||||
|
||||
%address,
|
||||
"Contract instance already deployed."
|
||||
);
|
||||
Ok((*address, abi.clone(), None))
|
||||
} else {
|
||||
info!("Contract instance requires deployment.");
|
||||
let (address, abi, receipt) = self
|
||||
.deploy_contract(contract_instance, deployer, calldata, value)
|
||||
.await
|
||||
.context("Failed to deploy contract")?;
|
||||
info!(
|
||||
%address,
|
||||
"Contract instance has been deployed."
|
||||
);
|
||||
Ok((address, abi, Some(receipt)))
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
level = "info",
|
||||
skip_all,
|
||||
fields(
|
||||
driver_id = self.driver_id,
|
||||
%contract_instance,
|
||||
%deployer
|
||||
),
|
||||
err(Debug),
|
||||
)]
|
||||
async fn deploy_contract(
|
||||
&mut self,
|
||||
contract_instance: &ContractInstance,
|
||||
deployer: Address,
|
||||
calldata: Option<&Calldata>,
|
||||
value: Option<EtherValue>,
|
||||
) -> Result<(Address, JsonAbi, TransactionReceipt)> {
|
||||
let Some(ContractPathAndIdent {
|
||||
contract_source_path,
|
||||
contract_ident,
|
||||
}) = self
|
||||
.test_definition
|
||||
.metadata
|
||||
.contract_sources()?
|
||||
.remove(contract_instance)
|
||||
else {
|
||||
anyhow::bail!(
|
||||
"Contract source not found for instance {:?}",
|
||||
contract_instance
|
||||
)
|
||||
};
|
||||
|
||||
let Some((code, abi)) = self
|
||||
.execution_state
|
||||
.compiled_contracts
|
||||
.get(&contract_source_path)
|
||||
.and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref()))
|
||||
.cloned()
|
||||
else {
|
||||
anyhow::bail!(
|
||||
"Failed to find information for contract {:?}",
|
||||
contract_instance
|
||||
)
|
||||
};
|
||||
|
||||
let mut code = match alloy::hex::decode(&code) {
|
||||
Ok(code) => code,
|
||||
Err(error) => {
|
||||
tracing::error!(
|
||||
?error,
|
||||
contract_source_path = contract_source_path.display().to_string(),
|
||||
contract_ident = contract_ident.as_ref(),
|
||||
"Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking"
|
||||
);
|
||||
anyhow::bail!("Failed to hex-decode the byte code {}", error)
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(calldata) = calldata {
|
||||
let calldata = calldata
|
||||
.calldata(self.resolver.as_ref(), self.default_resolution_context())
|
||||
.await?;
|
||||
code.extend(calldata);
|
||||
}
|
||||
|
||||
let tx = {
|
||||
let tx = TransactionRequest::default().from(deployer);
|
||||
let tx = match value {
|
||||
Some(ref value) => tx.value(value.into_inner()),
|
||||
_ => tx,
|
||||
};
|
||||
TransactionBuilder::<Ethereum>::with_deploy_code(tx, code)
|
||||
};
|
||||
|
||||
let receipt = match self
|
||||
.execute_transaction(tx)
|
||||
.and_then(|(_, receipt_fut)| receipt_fut)
|
||||
.await
|
||||
{
|
||||
Ok(receipt) => receipt,
|
||||
Err(error) => {
|
||||
tracing::error!(?error, "Contract deployment transaction failed.");
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
|
||||
let Some(address) = receipt.contract_address else {
|
||||
anyhow::bail!("Contract deployment didn't return an address");
|
||||
};
|
||||
tracing::info!(
|
||||
instance_name = ?contract_instance,
|
||||
instance_address = ?address,
|
||||
"Deployed contract"
|
||||
);
|
||||
self.platform_information
|
||||
.reporter
|
||||
.report_contract_deployed_event(contract_instance.clone(), address)?;
|
||||
|
||||
self.execution_state.deployed_contracts.insert(
|
||||
contract_instance.clone(),
|
||||
(contract_ident, address, abi.clone()),
|
||||
);
|
||||
|
||||
Ok((address, abi, receipt))
|
||||
}
|
||||
|
||||
#[instrument(level = "info", fields(driver_id = self.driver_id), skip_all)]
|
||||
async fn step_address_auto_deployment(
|
||||
&mut self,
|
||||
step_address: &StepAddress,
|
||||
) -> Result<Address> {
|
||||
match step_address {
|
||||
StepAddress::Address(address) => Ok(*address),
|
||||
StepAddress::ResolvableAddress(resolvable) => {
|
||||
let Some(instance) = resolvable
|
||||
.strip_suffix(".address")
|
||||
.map(ContractInstance::new)
|
||||
else {
|
||||
bail!("Not an address variable");
|
||||
};
|
||||
|
||||
self.get_or_deploy_contract_instance(
|
||||
&instance,
|
||||
FunctionCallStep::default_caller_address(),
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.map(|v| v.0)
|
||||
}
|
||||
}
|
||||
}
|
||||
// endregion:Contract Deployment
|
||||
|
||||
// region:Resolution & Resolver
|
||||
fn default_resolution_context(&self) -> ResolutionContext<'_> {
|
||||
ResolutionContext::default()
|
||||
.with_deployed_contracts(&self.execution_state.deployed_contracts)
|
||||
.with_variables(&self.execution_state.variables)
|
||||
}
|
||||
// endregion:Resolution & Resolver
|
||||
|
||||
// region:Transaction Execution
|
||||
/// Executes the transaction on the driver's node with some custom waiting logic for the receipt
|
||||
#[instrument(
|
||||
level = "info",
|
||||
skip_all,
|
||||
fields(driver_id = self.driver_id, transaction_hash = tracing::field::Empty)
|
||||
)]
|
||||
async fn execute_transaction(
|
||||
&self,
|
||||
transaction: TransactionRequest,
|
||||
) -> anyhow::Result<(TxHash, impl Future<Output = Result<TransactionReceipt>>)> {
|
||||
let node = self.platform_information.node;
|
||||
let transaction_hash = node
|
||||
.submit_transaction(transaction)
|
||||
.await
|
||||
.context("Failed to submit transaction")?;
|
||||
Span::current().record("transaction_hash", display(transaction_hash));
|
||||
|
||||
info!("Submitted transaction");
|
||||
self.watcher_tx
|
||||
.send(WatcherEvent::SubmittedTransaction { transaction_hash })
|
||||
.context("Failed to send the transaction hash to the watcher")?;
|
||||
|
||||
Ok((transaction_hash, async move {
|
||||
info!("Starting to poll for transaction receipt");
|
||||
poll(
|
||||
Duration::from_secs(30 * 60),
|
||||
PollingWaitBehavior::Constant(Duration::from_secs(1)),
|
||||
|| {
|
||||
async move {
|
||||
match node.get_receipt(transaction_hash).await {
|
||||
Ok(receipt) => {
|
||||
info!("Polling succeeded, receipt found");
|
||||
Ok(ControlFlow::Break(receipt))
|
||||
}
|
||||
Err(_) => Ok(ControlFlow::Continue(())),
|
||||
}
|
||||
}
|
||||
.instrument(info_span!("Polling for receipt"))
|
||||
},
|
||||
)
|
||||
.instrument(info_span!("Polling for receipt", %transaction_hash))
|
||||
.await
|
||||
.inspect(|_| info!("Found the transaction receipt"))
|
||||
}))
|
||||
}
|
||||
// endregion:Transaction Execution
|
||||
}
|
||||
@@ -0,0 +1,181 @@
|
||||
//! The main entry point for differential benchmarking.
|
||||
|
||||
use std::{collections::BTreeMap, sync::Arc};
|
||||
|
||||
use anyhow::Context as _;
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use revive_dt_common::types::PrivateKeyAllocator;
|
||||
use revive_dt_core::Platform;
|
||||
use revive_dt_format::steps::{Step, StepIdx, StepPath};
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::{Instrument, error, info, info_span, instrument, warn};
|
||||
|
||||
use revive_dt_config::{BenchmarkingContext, Context};
|
||||
use revive_dt_report::Reporter;
|
||||
|
||||
use crate::{
|
||||
differential_benchmarks::{Driver, Watcher, WatcherEvent},
|
||||
helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream},
|
||||
};
|
||||
|
||||
/// Handles the differential testing executing it according to the information defined in the
|
||||
/// context
|
||||
#[instrument(level = "info", err(Debug), skip_all)]
|
||||
pub async fn handle_differential_benchmarks(
|
||||
mut context: BenchmarkingContext,
|
||||
reporter: Reporter,
|
||||
) -> anyhow::Result<()> {
|
||||
// A bit of a hack but we need to override the number of nodes specified through the CLI since
|
||||
// benchmarks can only be run on a single node. Perhaps in the future we'd have a cleaner way to
|
||||
// do this. But, for the time being, we need to override the cli arguments.
|
||||
if context.concurrency_configuration.number_of_nodes != 1 {
|
||||
warn!(
|
||||
specified_number_of_nodes = context.concurrency_configuration.number_of_nodes,
|
||||
updated_number_of_nodes = 1,
|
||||
"Invalid number of nodes specified through the CLI. Benchmarks can only be run on a single node. Updated the arguments."
|
||||
);
|
||||
context.concurrency_configuration.number_of_nodes = 1;
|
||||
};
|
||||
let full_context = Context::Benchmark(Box::new(context.clone()));
|
||||
|
||||
// Discover all of the metadata files that are defined in the context.
|
||||
let metadata_files = collect_metadata_files(&context)
|
||||
.context("Failed to collect metadata files for differential testing")?;
|
||||
info!(len = metadata_files.len(), "Discovered metadata files");
|
||||
|
||||
// Discover the list of platforms that the tests should run on based on the context.
|
||||
let platforms = context
|
||||
.platforms
|
||||
.iter()
|
||||
.copied()
|
||||
.map(Into::<&dyn Platform>::into)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Starting the nodes of the various platforms specified in the context. Note that we use the
|
||||
// node pool since it contains all of the code needed to spawn nodes from A to Z and therefore
|
||||
// it's the preferred way for us to start nodes even when we're starting just a single node. The
|
||||
// added overhead from it is quite small (performance wise) since it's involved only when we're
|
||||
// creating the test definitions, but it might have other maintenance overhead as it obscures
|
||||
// the fact that only a single node is spawned.
|
||||
let platforms_and_nodes = {
|
||||
let mut map = BTreeMap::new();
|
||||
|
||||
for platform in platforms.iter() {
|
||||
let platform_identifier = platform.platform_identifier();
|
||||
|
||||
let node_pool = NodePool::new(full_context.clone(), *platform)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
%platform_identifier,
|
||||
"Failed to initialize the node pool for the platform."
|
||||
)
|
||||
})
|
||||
.context("Failed to initialize the node pool")?;
|
||||
|
||||
map.insert(platform_identifier, (*platform, node_pool));
|
||||
}
|
||||
|
||||
map
|
||||
};
|
||||
info!("Spawned the platform nodes");
|
||||
|
||||
// Preparing test definitions for the execution.
|
||||
let test_definitions = create_test_definitions_stream(
|
||||
&full_context,
|
||||
metadata_files.iter(),
|
||||
&platforms_and_nodes,
|
||||
None,
|
||||
reporter.clone(),
|
||||
)
|
||||
.await
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
info!(len = test_definitions.len(), "Created test definitions");
|
||||
|
||||
// Creating the objects that will be shared between the various runs. The cached compiler is the
|
||||
// only one at the current moment of time that's safe to share between runs.
|
||||
let cached_compiler = CachedCompiler::new(
|
||||
context
|
||||
.working_directory
|
||||
.as_path()
|
||||
.join("compilation_cache"),
|
||||
context
|
||||
.compilation_configuration
|
||||
.invalidate_compilation_cache,
|
||||
)
|
||||
.await
|
||||
.map(Arc::new)
|
||||
.context("Failed to initialize cached compiler")?;
|
||||
|
||||
// Note: we do not want to run all of the workloads concurrently on all platforms. Rather, we'd
|
||||
// like to run all of the workloads for one platform, and then the next sequentially as we'd
|
||||
// like for the effect of concurrency to be minimized when we're doing the benchmarking.
|
||||
for platform in platforms.iter() {
|
||||
let platform_identifier = platform.platform_identifier();
|
||||
|
||||
let span = info_span!("Benchmarking for the platform", %platform_identifier);
|
||||
let _guard = span.enter();
|
||||
|
||||
for test_definition in test_definitions.iter() {
|
||||
let platform_information = &test_definition.platforms[&platform_identifier];
|
||||
|
||||
let span = info_span!(
|
||||
"Executing workload",
|
||||
metadata_file_path = %test_definition.metadata_file_path.display(),
|
||||
case_idx = %test_definition.case_idx,
|
||||
mode = %test_definition.mode,
|
||||
);
|
||||
let _guard = span.enter();
|
||||
|
||||
// Initializing all of the components requires to execute this particular workload.
|
||||
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
|
||||
context.wallet_configuration.highest_private_key_exclusive(),
|
||||
)));
|
||||
let (watcher, watcher_tx) = Watcher::new(
|
||||
platform_identifier,
|
||||
platform_information
|
||||
.node
|
||||
.subscribe_to_full_blocks_information()
|
||||
.await
|
||||
.context("Failed to subscribe to full blocks information from the node")?,
|
||||
);
|
||||
let driver = Driver::new(
|
||||
platform_information,
|
||||
test_definition,
|
||||
private_key_allocator,
|
||||
cached_compiler.as_ref(),
|
||||
watcher_tx.clone(),
|
||||
test_definition
|
||||
.case
|
||||
.steps_iterator_for_benchmarks(context.default_repetition_count)
|
||||
.enumerate()
|
||||
.map(|(step_idx, step)| -> (StepPath, Step) {
|
||||
(StepPath::new(vec![StepIdx::new(step_idx)]), step)
|
||||
}),
|
||||
)
|
||||
.await
|
||||
.context("Failed to create the benchmarks driver")?;
|
||||
|
||||
futures::future::try_join(
|
||||
watcher.run(),
|
||||
driver
|
||||
.execute_all()
|
||||
.instrument(info_span!("Executing Benchmarks", %platform_identifier))
|
||||
.inspect(|_| {
|
||||
info!("All transactions submitted - driver completed execution");
|
||||
watcher_tx
|
||||
.send(WatcherEvent::AllTransactionsSubmitted)
|
||||
.unwrap()
|
||||
}),
|
||||
)
|
||||
.await
|
||||
.context("Failed to run the driver and executor")
|
||||
.inspect(|(_, steps_executed)| info!(steps_executed, "Workload Execution Succeeded"))
|
||||
.inspect_err(|err| error!(?err, "Workload Execution Failed"))?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
use std::{collections::HashMap, path::PathBuf};
|
||||
|
||||
use alloy::{
|
||||
json_abi::JsonAbi,
|
||||
primitives::{Address, U256},
|
||||
};
|
||||
|
||||
use revive_dt_format::metadata::{ContractIdent, ContractInstance};
|
||||
|
||||
#[derive(Clone)]
|
||||
/// The state associated with the test execution of one of the workloads.
|
||||
pub struct ExecutionState {
|
||||
/// The compiled contracts, these contracts have been compiled and have had the libraries linked
|
||||
/// against them and therefore they're ready to be deployed on-demand.
|
||||
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
|
||||
/// A map of all of the deployed contracts and information about them.
|
||||
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||
|
||||
/// This map stores the variables used for each one of the cases contained in the metadata file.
|
||||
pub variables: HashMap<String, U256>,
|
||||
}
|
||||
|
||||
impl ExecutionState {
|
||||
pub fn new(
|
||||
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||
) -> Self {
|
||||
Self {
|
||||
compiled_contracts,
|
||||
deployed_contracts,
|
||||
variables: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
compiled_contracts: Default::default(),
|
||||
deployed_contracts: Default::default(),
|
||||
variables: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
mod driver;
|
||||
mod entry_point;
|
||||
mod execution_state;
|
||||
mod watcher;
|
||||
|
||||
pub use driver::*;
|
||||
pub use entry_point::*;
|
||||
pub use execution_state::*;
|
||||
pub use watcher::*;
|
||||
@@ -0,0 +1,195 @@
|
||||
use std::{collections::HashSet, pin::Pin, sync::Arc};
|
||||
|
||||
use alloy::primitives::{BlockNumber, TxHash};
|
||||
use anyhow::Result;
|
||||
use futures::{Stream, StreamExt};
|
||||
use revive_dt_common::types::PlatformIdentifier;
|
||||
use revive_dt_node_interaction::MinedBlockInformation;
|
||||
use tokio::sync::{
|
||||
RwLock,
|
||||
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
|
||||
};
|
||||
use tracing::{info, instrument};
|
||||
|
||||
/// This struct defines the watcher used in the benchmarks. A watcher is only valid for 1 workload
|
||||
/// and MUST NOT be re-used between workloads since it holds important internal state for a given
|
||||
/// workload and is not designed for reuse.
|
||||
pub struct Watcher {
|
||||
/// The identifier of the platform that this watcher is for.
|
||||
platform_identifier: PlatformIdentifier,
|
||||
|
||||
/// The receive side of the channel that all of the drivers and various other parts of the code
|
||||
/// send events to the watcher on.
|
||||
rx: UnboundedReceiver<WatcherEvent>,
|
||||
|
||||
/// This is a stream of the blocks that were mined by the node. This is for a single platform
|
||||
/// and a single node from that platform.
|
||||
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
||||
}
|
||||
|
||||
impl Watcher {
|
||||
pub fn new(
|
||||
platform_identifier: PlatformIdentifier,
|
||||
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
||||
) -> (Self, UnboundedSender<WatcherEvent>) {
|
||||
let (tx, rx) = unbounded_channel::<WatcherEvent>();
|
||||
(
|
||||
Self {
|
||||
platform_identifier,
|
||||
rx,
|
||||
blocks_stream,
|
||||
},
|
||||
tx,
|
||||
)
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub async fn run(mut self) -> Result<()> {
|
||||
// The first event that the watcher receives must be a `RepetitionStartEvent` that informs
|
||||
// the watcher of the last block number that it should ignore and what the block number is
|
||||
// for the first important block that it should look for.
|
||||
let ignore_block_before = loop {
|
||||
let Some(WatcherEvent::RepetitionStartEvent {
|
||||
ignore_block_before,
|
||||
}) = self.rx.recv().await
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
break ignore_block_before;
|
||||
};
|
||||
|
||||
// This is the set of the transaction hashes that the watcher should be looking for and
|
||||
// watch for them in the blocks. The watcher will keep watching for blocks until it sees
|
||||
// that all of the transactions that it was watching for has been seen in the mined blocks.
|
||||
let watch_for_transaction_hashes = Arc::new(RwLock::new(HashSet::<TxHash>::new()));
|
||||
|
||||
// A boolean that keeps track of whether all of the transactions were submitted or if more
|
||||
// txs are expected to come through the receive side of the channel. We do not want to rely
|
||||
// on the channel closing alone for the watcher to know that all of the transactions were
|
||||
// submitted and for there to be an explicit event sent by the core orchestrator that
|
||||
// informs the watcher that no further transactions are to be expected and that it can
|
||||
// safely ignore the channel.
|
||||
let all_transactions_submitted = Arc::new(RwLock::new(false));
|
||||
|
||||
let watcher_event_watching_task = {
|
||||
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
|
||||
let all_transactions_submitted = all_transactions_submitted.clone();
|
||||
async move {
|
||||
while let Some(watcher_event) = self.rx.recv().await {
|
||||
match watcher_event {
|
||||
// Subsequent repetition starts are ignored since certain workloads can
|
||||
// contain nested repetitions and therefore there's no use in doing any
|
||||
// action if the repetitions are nested.
|
||||
WatcherEvent::RepetitionStartEvent { .. } => {}
|
||||
WatcherEvent::SubmittedTransaction { transaction_hash } => {
|
||||
watch_for_transaction_hashes
|
||||
.write()
|
||||
.await
|
||||
.insert(transaction_hash);
|
||||
}
|
||||
WatcherEvent::AllTransactionsSubmitted => {
|
||||
*all_transactions_submitted.write().await = true;
|
||||
self.rx.close();
|
||||
info!("Watcher's Events Watching Task Finished");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
let block_information_watching_task = {
|
||||
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
|
||||
let all_transactions_submitted = all_transactions_submitted.clone();
|
||||
let mut blocks_information_stream = self.blocks_stream;
|
||||
async move {
|
||||
let mut mined_blocks_information = Vec::new();
|
||||
|
||||
// region:TEMPORARY
|
||||
eprintln!("Watcher information for {}", self.platform_identifier);
|
||||
eprintln!("block_number,block_timestamp,mined_gas,block_gas_limit,tx_count");
|
||||
// endregion:TEMPORARY
|
||||
while let Some(block) = blocks_information_stream.next().await {
|
||||
// If the block number is equal to or less than the last block before the
|
||||
// repetition then we ignore it and continue on to the next block.
|
||||
if block.block_number <= ignore_block_before {
|
||||
continue;
|
||||
}
|
||||
|
||||
if *all_transactions_submitted.read().await
|
||||
&& watch_for_transaction_hashes.read().await.is_empty()
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
info!(
|
||||
block_number = block.block_number,
|
||||
block_tx_count = block.transaction_hashes.len(),
|
||||
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
|
||||
"Observed a block"
|
||||
);
|
||||
|
||||
// Remove all of the transaction hashes observed in this block from the txs we
|
||||
// are currently watching for.
|
||||
let mut watch_for_transaction_hashes =
|
||||
watch_for_transaction_hashes.write().await;
|
||||
for tx_hash in block.transaction_hashes.iter() {
|
||||
watch_for_transaction_hashes.remove(tx_hash);
|
||||
}
|
||||
|
||||
// region:TEMPORARY
|
||||
// TODO: The following core is TEMPORARY and will be removed once we have proper
|
||||
// reporting in place and then it can be removed. This serves as as way of doing
|
||||
// some very simple reporting for the time being.
|
||||
eprintln!(
|
||||
"\"{}\",\"{}\",\"{}\",\"{}\",\"{}\"",
|
||||
block.block_number,
|
||||
block.block_timestamp,
|
||||
block.mined_gas,
|
||||
block.block_gas_limit,
|
||||
block.transaction_hashes.len()
|
||||
);
|
||||
// endregion:TEMPORARY
|
||||
|
||||
mined_blocks_information.push(block);
|
||||
}
|
||||
|
||||
info!("Watcher's Block Watching Task Finished");
|
||||
mined_blocks_information
|
||||
}
|
||||
};
|
||||
|
||||
let (_, _) =
|
||||
futures::future::join(watcher_event_watching_task, block_information_watching_task)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub enum WatcherEvent {
|
||||
/// Informs the watcher that it should begin watching for the blocks mined by the platforms.
|
||||
/// Before the watcher receives this event it will not be watching for the mined blocks. The
|
||||
/// reason behind this is that we do not want the initialization transactions (e.g., contract
|
||||
/// deployments) to be included in the overall TPS and GPS measurements since these blocks will
|
||||
/// most likely only contain a single transaction since they're just being used for
|
||||
/// initialization.
|
||||
RepetitionStartEvent {
|
||||
/// This is the block number of the last block seen before the repetition started. This is
|
||||
/// used to instruct the watcher to ignore all block prior to this block when it starts
|
||||
/// streaming the blocks.
|
||||
ignore_block_before: BlockNumber,
|
||||
},
|
||||
|
||||
/// Informs the watcher that a transaction was submitted and that the watcher should watch for a
|
||||
/// transaction with this hash in the blocks that it watches.
|
||||
SubmittedTransaction {
|
||||
/// The hash of the submitted transaction.
|
||||
transaction_hash: TxHash,
|
||||
},
|
||||
|
||||
/// Informs the watcher that all of the transactions of this benchmark have been submitted and
|
||||
/// that it can expect to receive no further transaction hashes and not even watch the channel
|
||||
/// any longer.
|
||||
AllTransactionsSubmitted,
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,358 @@
|
||||
//! The main entry point into differential testing.
|
||||
|
||||
use std::{
|
||||
collections::{BTreeMap, BTreeSet},
|
||||
io::{BufWriter, Write, stderr},
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use ansi_term::{ANSIStrings, Color};
|
||||
use anyhow::Context as _;
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use revive_dt_common::{cached_fs::read_to_string, types::PrivateKeyAllocator};
|
||||
use revive_dt_core::Platform;
|
||||
use tokio::sync::{Mutex, RwLock, Semaphore};
|
||||
use tracing::{Instrument, error, info, info_span, instrument};
|
||||
|
||||
use revive_dt_config::{Context, OutputFormat, TestExecutionContext};
|
||||
use revive_dt_report::{Reporter, ReporterEvent, TestCaseStatus};
|
||||
|
||||
use crate::{
|
||||
differential_tests::Driver,
|
||||
helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream},
|
||||
};
|
||||
|
||||
/// Handles the differential testing executing it according to the information defined in the
|
||||
/// context
|
||||
#[instrument(level = "info", err(Debug), skip_all)]
|
||||
pub async fn handle_differential_tests(
|
||||
context: TestExecutionContext,
|
||||
reporter: Reporter,
|
||||
) -> anyhow::Result<()> {
|
||||
let reporter_clone = reporter.clone();
|
||||
|
||||
// Discover all of the metadata files that are defined in the context.
|
||||
let metadata_files = collect_metadata_files(&context)
|
||||
.context("Failed to collect metadata files for differential testing")?;
|
||||
info!(len = metadata_files.len(), "Discovered metadata files");
|
||||
|
||||
// Discover the list of platforms that the tests should run on based on the context.
|
||||
let platforms = context
|
||||
.platforms
|
||||
.iter()
|
||||
.copied()
|
||||
.map(Into::<&dyn Platform>::into)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Starting the nodes of the various platforms specified in the context.
|
||||
let platforms_and_nodes = {
|
||||
let mut map = BTreeMap::new();
|
||||
|
||||
for platform in platforms.iter() {
|
||||
let platform_identifier = platform.platform_identifier();
|
||||
|
||||
let context = Context::Test(Box::new(context.clone()));
|
||||
let node_pool = NodePool::new(context, *platform)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
%platform_identifier,
|
||||
"Failed to initialize the node pool for the platform."
|
||||
)
|
||||
})
|
||||
.context("Failed to initialize the node pool")?;
|
||||
|
||||
map.insert(platform_identifier, (*platform, node_pool));
|
||||
}
|
||||
|
||||
map
|
||||
};
|
||||
info!("Spawned the platform nodes");
|
||||
|
||||
// Preparing test definitions.
|
||||
let only_execute_failed_tests = match context.ignore_success_configuration.path.as_ref() {
|
||||
Some(path) => {
|
||||
let report = read_to_string(path)
|
||||
.context("Failed to read the report file to ignore the succeeding test cases")?;
|
||||
Some(serde_json::from_str(&report).context("Failed to deserialize report")?)
|
||||
}
|
||||
None => None,
|
||||
};
|
||||
let full_context = Context::Test(Box::new(context.clone()));
|
||||
let test_definitions = create_test_definitions_stream(
|
||||
&full_context,
|
||||
metadata_files.iter(),
|
||||
&platforms_and_nodes,
|
||||
only_execute_failed_tests.as_ref(),
|
||||
reporter.clone(),
|
||||
)
|
||||
.await
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
info!(len = test_definitions.len(), "Created test definitions");
|
||||
|
||||
// Creating everything else required for the driver to run.
|
||||
let cached_compiler = CachedCompiler::new(
|
||||
context
|
||||
.working_directory
|
||||
.as_path()
|
||||
.join("compilation_cache"),
|
||||
context
|
||||
.compilation_configuration
|
||||
.invalidate_compilation_cache,
|
||||
)
|
||||
.await
|
||||
.map(Arc::new)
|
||||
.context("Failed to initialize cached compiler")?;
|
||||
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
|
||||
context.wallet_configuration.highest_private_key_exclusive(),
|
||||
)));
|
||||
|
||||
// Creating the driver and executing all of the steps.
|
||||
let semaphore = context
|
||||
.concurrency_configuration
|
||||
.concurrency_limit()
|
||||
.map(Semaphore::new)
|
||||
.map(Arc::new);
|
||||
let running_task_list = Arc::new(RwLock::new(BTreeSet::<usize>::new()));
|
||||
let driver_task = futures::future::join_all(test_definitions.iter().enumerate().map(
|
||||
|(test_id, test_definition)| {
|
||||
let running_task_list = running_task_list.clone();
|
||||
let semaphore = semaphore.clone();
|
||||
|
||||
let private_key_allocator = private_key_allocator.clone();
|
||||
let cached_compiler = cached_compiler.clone();
|
||||
let mode = test_definition.mode.clone();
|
||||
let span = info_span!(
|
||||
"Executing Test Case",
|
||||
test_id,
|
||||
metadata_file_path = %test_definition.metadata_file_path.display(),
|
||||
case_idx = %test_definition.case_idx,
|
||||
mode = %mode,
|
||||
);
|
||||
async move {
|
||||
let permit = match semaphore.as_ref() {
|
||||
Some(semaphore) => Some(semaphore.acquire().await.expect("Can't fail")),
|
||||
None => None,
|
||||
};
|
||||
|
||||
running_task_list.write().await.insert(test_id);
|
||||
let driver = match Driver::new_root(
|
||||
test_definition,
|
||||
private_key_allocator,
|
||||
&cached_compiler,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(driver) => driver,
|
||||
Err(error) => {
|
||||
test_definition
|
||||
.reporter
|
||||
.report_test_failed_event(format!("{error:#}"))
|
||||
.expect("Can't fail");
|
||||
error!("Test Case Failed");
|
||||
drop(permit);
|
||||
running_task_list.write().await.remove(&test_id);
|
||||
return;
|
||||
}
|
||||
};
|
||||
info!("Created the driver for the test case");
|
||||
|
||||
match driver.execute_all().await {
|
||||
Ok(steps_executed) => test_definition
|
||||
.reporter
|
||||
.report_test_succeeded_event(steps_executed)
|
||||
.expect("Can't fail"),
|
||||
Err(error) => {
|
||||
test_definition
|
||||
.reporter
|
||||
.report_test_failed_event(format!("{error:#}"))
|
||||
.expect("Can't fail");
|
||||
error!("Test Case Failed");
|
||||
}
|
||||
};
|
||||
info!("Finished the execution of the test case");
|
||||
drop(permit);
|
||||
running_task_list.write().await.remove(&test_id);
|
||||
}
|
||||
.instrument(span)
|
||||
},
|
||||
))
|
||||
.inspect(|_| {
|
||||
info!("Finished executing all test cases");
|
||||
reporter_clone
|
||||
.report_completion_event()
|
||||
.expect("Can't fail")
|
||||
});
|
||||
let cli_reporting_task = start_cli_reporting_task(context.output_format, reporter);
|
||||
|
||||
tokio::task::spawn(async move {
|
||||
loop {
|
||||
let remaining_tasks = running_task_list.read().await;
|
||||
info!(
|
||||
count = remaining_tasks.len(),
|
||||
?remaining_tasks,
|
||||
"Remaining Tests"
|
||||
);
|
||||
tokio::time::sleep(Duration::from_secs(10)).await
|
||||
}
|
||||
});
|
||||
|
||||
futures::future::join(driver_task, cli_reporting_task).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
|
||||
async fn start_cli_reporting_task(output_format: OutputFormat, reporter: Reporter) {
|
||||
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
|
||||
drop(reporter);
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
let mut global_success_count = 0;
|
||||
let mut global_failure_count = 0;
|
||||
let mut global_ignore_count = 0;
|
||||
|
||||
let mut buf = BufWriter::new(stderr());
|
||||
while let Ok(event) = aggregator_events_rx.recv().await {
|
||||
let ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
||||
metadata_file_path,
|
||||
mode,
|
||||
case_status,
|
||||
} = event
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
|
||||
match output_format {
|
||||
OutputFormat::Legacy => {
|
||||
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
|
||||
for (case_idx, case_status) in case_status.into_iter() {
|
||||
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
|
||||
let _ = match case_status {
|
||||
TestCaseStatus::Succeeded { steps_executed } => {
|
||||
global_success_count += 1;
|
||||
writeln!(
|
||||
buf,
|
||||
"{}",
|
||||
ANSIStrings(&[
|
||||
Color::Green.bold().paint("Case Succeeded"),
|
||||
Color::Green
|
||||
.paint(format!(" - Steps Executed: {steps_executed}")),
|
||||
])
|
||||
)
|
||||
}
|
||||
TestCaseStatus::Failed { reason } => {
|
||||
global_failure_count += 1;
|
||||
writeln!(
|
||||
buf,
|
||||
"{}",
|
||||
ANSIStrings(&[
|
||||
Color::Red.bold().paint("Case Failed"),
|
||||
Color::Red.paint(format!(" - Reason: {}", reason.trim())),
|
||||
])
|
||||
)
|
||||
}
|
||||
TestCaseStatus::Ignored { reason, .. } => {
|
||||
global_ignore_count += 1;
|
||||
writeln!(
|
||||
buf,
|
||||
"{}",
|
||||
ANSIStrings(&[
|
||||
Color::Yellow.bold().paint("Case Ignored"),
|
||||
Color::Yellow.paint(format!(" - Reason: {}", reason.trim())),
|
||||
])
|
||||
)
|
||||
}
|
||||
};
|
||||
}
|
||||
let _ = writeln!(buf);
|
||||
}
|
||||
OutputFormat::CargoTestLike => {
|
||||
writeln!(
|
||||
buf,
|
||||
"\t{} {} - {}\n",
|
||||
Color::Green.paint("Running"),
|
||||
metadata_file_path.display(),
|
||||
mode
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let mut success_count = 0;
|
||||
let mut failure_count = 0;
|
||||
let mut ignored_count = 0;
|
||||
writeln!(buf, "running {} tests", case_status.len()).unwrap();
|
||||
for (case_idx, case_result) in case_status.iter() {
|
||||
let status = match case_result {
|
||||
TestCaseStatus::Succeeded { .. } => {
|
||||
success_count += 1;
|
||||
global_success_count += 1;
|
||||
Color::Green.paint("ok")
|
||||
}
|
||||
TestCaseStatus::Failed { reason } => {
|
||||
failure_count += 1;
|
||||
global_failure_count += 1;
|
||||
Color::Red.paint(format!("FAILED, {reason}"))
|
||||
}
|
||||
TestCaseStatus::Ignored { reason, .. } => {
|
||||
ignored_count += 1;
|
||||
global_ignore_count += 1;
|
||||
Color::Yellow.paint(format!("ignored, {reason:?}"))
|
||||
}
|
||||
};
|
||||
writeln!(buf, "test case_idx_{} ... {}", case_idx, status).unwrap();
|
||||
}
|
||||
writeln!(buf).unwrap();
|
||||
|
||||
let status = if failure_count > 0 {
|
||||
Color::Red.paint("FAILED")
|
||||
} else {
|
||||
Color::Green.paint("ok")
|
||||
};
|
||||
writeln!(
|
||||
buf,
|
||||
"test result: {}. {} passed; {} failed; {} ignored",
|
||||
status, success_count, failure_count, ignored_count,
|
||||
)
|
||||
.unwrap();
|
||||
writeln!(buf).unwrap();
|
||||
|
||||
buf = tokio::task::spawn_blocking(move || {
|
||||
buf.flush().unwrap();
|
||||
buf
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Summary at the end.
|
||||
match output_format {
|
||||
OutputFormat::Legacy => {
|
||||
writeln!(
|
||||
buf,
|
||||
"{} cases: {} cases succeeded, {} cases failed in {} seconds",
|
||||
global_success_count + global_failure_count + global_ignore_count,
|
||||
Color::Green.paint(global_success_count.to_string()),
|
||||
Color::Red.paint(global_failure_count.to_string()),
|
||||
start.elapsed().as_secs()
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
OutputFormat::CargoTestLike => {
|
||||
writeln!(
|
||||
buf,
|
||||
"run finished. {} passed; {} failed; {} ignored; finished in {}s",
|
||||
global_success_count,
|
||||
global_failure_count,
|
||||
global_ignore_count,
|
||||
start.elapsed().as_secs()
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,35 @@
|
||||
use std::{collections::HashMap, path::PathBuf};
|
||||
|
||||
use alloy::{
|
||||
json_abi::JsonAbi,
|
||||
primitives::{Address, U256},
|
||||
};
|
||||
|
||||
use revive_dt_format::metadata::{ContractIdent, ContractInstance};
|
||||
|
||||
#[derive(Clone)]
|
||||
/// The state associated with the test execution of one of the tests.
|
||||
pub struct ExecutionState {
|
||||
/// The compiled contracts, these contracts have been compiled and have had the libraries linked
|
||||
/// against them and therefore they're ready to be deployed on-demand.
|
||||
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
|
||||
/// A map of all of the deployed contracts and information about them.
|
||||
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||
|
||||
/// This map stores the variables used for each one of the cases contained in the metadata file.
|
||||
pub variables: HashMap<String, U256>,
|
||||
}
|
||||
|
||||
impl ExecutionState {
|
||||
pub fn new(
|
||||
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||
) -> Self {
|
||||
Self {
|
||||
compiled_contracts,
|
||||
deployed_contracts,
|
||||
variables: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
//! This module contains all of the code responsible for performing differential tests including the
|
||||
//! driver implementation, state implementation, and the core logic that allows for tests to be
|
||||
//! executed.
|
||||
|
||||
mod driver;
|
||||
mod entry_point;
|
||||
mod execution_state;
|
||||
|
||||
pub use driver::*;
|
||||
pub use entry_point::*;
|
||||
pub use execution_state::*;
|
||||
@@ -1,856 +0,0 @@
|
||||
//! The test driver handles the compilation and execution of the test cases.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::marker::PhantomData;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use alloy::consensus::EMPTY_ROOT_HASH;
|
||||
use alloy::hex;
|
||||
use alloy::json_abi::JsonAbi;
|
||||
use alloy::network::{Ethereum, TransactionBuilder};
|
||||
use alloy::primitives::U256;
|
||||
use alloy::rpc::types::TransactionReceipt;
|
||||
use alloy::rpc::types::trace::geth::{
|
||||
CallFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType,
|
||||
GethDebugTracingOptions, GethTrace, PreStateConfig,
|
||||
};
|
||||
use alloy::{
|
||||
primitives::Address,
|
||||
rpc::types::{TransactionRequest, trace::geth::DiffMode},
|
||||
};
|
||||
use anyhow::Context as _;
|
||||
use futures::TryStreamExt;
|
||||
use indexmap::IndexMap;
|
||||
use revive_dt_format::traits::{ResolutionContext, ResolverApi};
|
||||
use revive_dt_report::ExecutionSpecificReporter;
|
||||
use semver::Version;
|
||||
|
||||
use revive_dt_format::case::Case;
|
||||
use revive_dt_format::input::{
|
||||
BalanceAssertion, Calldata, EtherValue, Expected, ExpectedOutput, Input, Method, StepIdx,
|
||||
StorageEmptyAssertion,
|
||||
};
|
||||
use revive_dt_format::metadata::{ContractIdent, ContractInstance, ContractPathAndIdent};
|
||||
use revive_dt_format::{input::Step, metadata::Metadata};
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
use tokio::try_join;
|
||||
use tracing::{Instrument, info, info_span, instrument};
|
||||
|
||||
use crate::Platform;
|
||||
|
||||
pub struct CaseState<T: Platform> {
|
||||
/// A map of all of the compiled contracts for the given metadata file.
|
||||
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
|
||||
/// This map stores the contracts deployments for this case.
|
||||
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||
|
||||
/// This map stores the variables used for each one of the cases contained in the metadata
|
||||
/// file.
|
||||
variables: HashMap<String, U256>,
|
||||
|
||||
/// Stores the version used for the current case.
|
||||
compiler_version: Version,
|
||||
|
||||
/// The execution reporter.
|
||||
execution_reporter: ExecutionSpecificReporter,
|
||||
|
||||
phantom: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> CaseState<T>
|
||||
where
|
||||
T: Platform,
|
||||
{
|
||||
pub fn new(
|
||||
compiler_version: Version,
|
||||
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||
execution_reporter: ExecutionSpecificReporter,
|
||||
) -> Self {
|
||||
Self {
|
||||
compiled_contracts,
|
||||
deployed_contracts,
|
||||
variables: Default::default(),
|
||||
compiler_version,
|
||||
execution_reporter,
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_step(
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
step: &Step,
|
||||
node: &T::Blockchain,
|
||||
) -> anyhow::Result<StepOutput> {
|
||||
match step {
|
||||
Step::FunctionCall(input) => {
|
||||
let (receipt, geth_trace, diff_mode) = self
|
||||
.handle_input(metadata, input, node)
|
||||
.await
|
||||
.context("Failed to handle function call step")?;
|
||||
Ok(StepOutput::FunctionCall(receipt, geth_trace, diff_mode))
|
||||
}
|
||||
Step::BalanceAssertion(balance_assertion) => {
|
||||
self.handle_balance_assertion(metadata, balance_assertion, node)
|
||||
.await
|
||||
.context("Failed to handle balance assertion step")?;
|
||||
Ok(StepOutput::BalanceAssertion)
|
||||
}
|
||||
Step::StorageEmptyAssertion(storage_empty) => {
|
||||
self.handle_storage_empty(metadata, storage_empty, node)
|
||||
.await
|
||||
.context("Failed to handle storage empty assertion step")?;
|
||||
Ok(StepOutput::StorageEmptyAssertion)
|
||||
}
|
||||
}
|
||||
.inspect(|_| info!("Step Succeeded"))
|
||||
}
|
||||
|
||||
#[instrument(level = "info", name = "Handling Input", skip_all)]
|
||||
pub async fn handle_input(
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
input: &Input,
|
||||
node: &T::Blockchain,
|
||||
) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> {
|
||||
let deployment_receipts = self
|
||||
.handle_input_contract_deployment(metadata, input, node)
|
||||
.await
|
||||
.context("Failed during contract deployment phase of input handling")?;
|
||||
let execution_receipt = self
|
||||
.handle_input_execution(input, deployment_receipts, node)
|
||||
.await
|
||||
.context("Failed during transaction execution phase of input handling")?;
|
||||
let tracing_result = self
|
||||
.handle_input_call_frame_tracing(&execution_receipt, node)
|
||||
.await
|
||||
.context("Failed during callframe tracing phase of input handling")?;
|
||||
self.handle_input_variable_assignment(input, &tracing_result)
|
||||
.context("Failed to assign variables from callframe output")?;
|
||||
let (_, (geth_trace, diff_mode)) = try_join!(
|
||||
self.handle_input_expectations(input, &execution_receipt, node, &tracing_result),
|
||||
self.handle_input_diff(&execution_receipt, node)
|
||||
)
|
||||
.context("Failed while evaluating expectations and diffs in parallel")?;
|
||||
Ok((execution_receipt, geth_trace, diff_mode))
|
||||
}
|
||||
|
||||
#[instrument(level = "info", name = "Handling Balance Assertion", skip_all)]
|
||||
pub async fn handle_balance_assertion(
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
balance_assertion: &BalanceAssertion,
|
||||
node: &T::Blockchain,
|
||||
) -> anyhow::Result<()> {
|
||||
self.handle_balance_assertion_contract_deployment(metadata, balance_assertion, node)
|
||||
.await
|
||||
.context("Failed to deploy contract for balance assertion")?;
|
||||
self.handle_balance_assertion_execution(balance_assertion, node)
|
||||
.await
|
||||
.context("Failed to execute balance assertion")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", name = "Handling Storage Assertion", skip_all)]
|
||||
pub async fn handle_storage_empty(
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
storage_empty: &StorageEmptyAssertion,
|
||||
node: &T::Blockchain,
|
||||
) -> anyhow::Result<()> {
|
||||
self.handle_storage_empty_assertion_contract_deployment(metadata, storage_empty, node)
|
||||
.await
|
||||
.context("Failed to deploy contract for storage empty assertion")?;
|
||||
self.handle_storage_empty_assertion_execution(storage_empty, node)
|
||||
.await
|
||||
.context("Failed to execute storage empty assertion")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handles the contract deployment for a given input performing it if it needs to be performed.
|
||||
#[instrument(level = "info", skip_all)]
|
||||
async fn handle_input_contract_deployment(
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
input: &Input,
|
||||
node: &T::Blockchain,
|
||||
) -> anyhow::Result<HashMap<ContractInstance, TransactionReceipt>> {
|
||||
let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new();
|
||||
for instance in input.find_all_contract_instances().into_iter() {
|
||||
if !self.deployed_contracts.contains_key(&instance) {
|
||||
instances_we_must_deploy.entry(instance).or_insert(false);
|
||||
}
|
||||
}
|
||||
if let Method::Deployer = input.method {
|
||||
instances_we_must_deploy.swap_remove(&input.instance);
|
||||
instances_we_must_deploy.insert(input.instance.clone(), true);
|
||||
}
|
||||
|
||||
let mut receipts = HashMap::new();
|
||||
for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() {
|
||||
let calldata = deploy_with_constructor_arguments.then_some(&input.calldata);
|
||||
let value = deploy_with_constructor_arguments
|
||||
.then_some(input.value)
|
||||
.flatten();
|
||||
|
||||
if let (_, _, Some(receipt)) = self
|
||||
.get_or_deploy_contract_instance(
|
||||
&instance,
|
||||
metadata,
|
||||
input.caller,
|
||||
calldata,
|
||||
value,
|
||||
node,
|
||||
)
|
||||
.await
|
||||
.context("Failed to get or deploy contract instance during input execution")?
|
||||
{
|
||||
receipts.insert(instance.clone(), receipt);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(receipts)
|
||||
}
|
||||
|
||||
/// Handles the execution of the input in terms of the calls that need to be made.
|
||||
#[instrument(level = "info", skip_all)]
|
||||
async fn handle_input_execution(
|
||||
&mut self,
|
||||
input: &Input,
|
||||
mut deployment_receipts: HashMap<ContractInstance, TransactionReceipt>,
|
||||
node: &T::Blockchain,
|
||||
) -> anyhow::Result<TransactionReceipt> {
|
||||
match input.method {
|
||||
// This input was already executed when `handle_input` was called. We just need to
|
||||
// lookup the transaction receipt in this case and continue on.
|
||||
Method::Deployer => deployment_receipts
|
||||
.remove(&input.instance)
|
||||
.context("Failed to find deployment receipt for constructor call"),
|
||||
Method::Fallback | Method::FunctionName(_) => {
|
||||
let tx = match input
|
||||
.legacy_transaction(node, self.default_resolution_context())
|
||||
.await
|
||||
{
|
||||
Ok(tx) => tx,
|
||||
Err(err) => {
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
|
||||
match node.execute_transaction(tx).await {
|
||||
Ok(receipt) => Ok(receipt),
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
async fn handle_input_call_frame_tracing(
|
||||
&self,
|
||||
execution_receipt: &TransactionReceipt,
|
||||
node: &T::Blockchain,
|
||||
) -> anyhow::Result<CallFrame> {
|
||||
node.trace_transaction(
|
||||
execution_receipt,
|
||||
GethDebugTracingOptions {
|
||||
tracer: Some(GethDebugTracerType::BuiltInTracer(
|
||||
GethDebugBuiltInTracerType::CallTracer,
|
||||
)),
|
||||
tracer_config: GethDebugTracerConfig(serde_json::json! {{
|
||||
"onlyTopCall": true,
|
||||
"withLog": false,
|
||||
"withStorage": false,
|
||||
"withMemory": false,
|
||||
"withStack": false,
|
||||
"withReturnData": true
|
||||
}}),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map(|trace| {
|
||||
trace
|
||||
.try_into_call_frame()
|
||||
.expect("Impossible - we requested a callframe trace so we must get it back")
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
fn handle_input_variable_assignment(
|
||||
&mut self,
|
||||
input: &Input,
|
||||
tracing_result: &CallFrame,
|
||||
) -> anyhow::Result<()> {
|
||||
let Some(ref assignments) = input.variable_assignments else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
// Handling the return data variable assignments.
|
||||
for (variable_name, output_word) in assignments.return_data.iter().zip(
|
||||
tracing_result
|
||||
.output
|
||||
.as_ref()
|
||||
.unwrap_or_default()
|
||||
.to_vec()
|
||||
.chunks(32),
|
||||
) {
|
||||
let value = U256::from_be_slice(output_word);
|
||||
self.variables.insert(variable_name.clone(), value);
|
||||
tracing::info!(
|
||||
variable_name,
|
||||
variable_value = hex::encode(value.to_be_bytes::<32>()),
|
||||
"Assigned variable"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
async fn handle_input_expectations(
|
||||
&self,
|
||||
input: &Input,
|
||||
execution_receipt: &TransactionReceipt,
|
||||
resolver: &impl ResolverApi,
|
||||
tracing_result: &CallFrame,
|
||||
) -> anyhow::Result<()> {
|
||||
// Resolving the `input.expected` into a series of expectations that we can then assert on.
|
||||
let mut expectations = match input {
|
||||
Input {
|
||||
expected: Some(Expected::Calldata(calldata)),
|
||||
..
|
||||
} => vec![ExpectedOutput::new().with_calldata(calldata.clone())],
|
||||
Input {
|
||||
expected: Some(Expected::Expected(expected)),
|
||||
..
|
||||
} => vec![expected.clone()],
|
||||
Input {
|
||||
expected: Some(Expected::ExpectedMany(expected)),
|
||||
..
|
||||
} => expected.clone(),
|
||||
Input { expected: None, .. } => vec![ExpectedOutput::new().with_success()],
|
||||
};
|
||||
|
||||
// This is a bit of a special case and we have to support it separately on it's own. If it's
|
||||
// a call to the deployer method, then the tests will assert that it "returns" the address
|
||||
// of the contract. Deployments do not return the address of the contract but the runtime
|
||||
// code of the contracts. Therefore, this assertion would always fail. So, we replace it
|
||||
// with an assertion of "check if it succeeded"
|
||||
if let Method::Deployer = &input.method {
|
||||
for expectation in expectations.iter_mut() {
|
||||
expectation.return_data = None;
|
||||
}
|
||||
}
|
||||
|
||||
futures::stream::iter(expectations.into_iter().map(Ok))
|
||||
.try_for_each_concurrent(None, |expectation| async move {
|
||||
self.handle_input_expectation_item(
|
||||
execution_receipt,
|
||||
resolver,
|
||||
expectation,
|
||||
tracing_result,
|
||||
)
|
||||
.await
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
async fn handle_input_expectation_item(
|
||||
&self,
|
||||
execution_receipt: &TransactionReceipt,
|
||||
resolver: &impl ResolverApi,
|
||||
expectation: ExpectedOutput,
|
||||
tracing_result: &CallFrame,
|
||||
) -> anyhow::Result<()> {
|
||||
if let Some(ref version_requirement) = expectation.compiler_version {
|
||||
if !version_requirement.matches(&self.compiler_version) {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
let resolution_context = self
|
||||
.default_resolution_context()
|
||||
.with_block_number(execution_receipt.block_number.as_ref())
|
||||
.with_transaction_hash(&execution_receipt.transaction_hash);
|
||||
|
||||
// Handling the receipt state assertion.
|
||||
let expected = !expectation.exception;
|
||||
let actual = execution_receipt.status();
|
||||
if actual != expected {
|
||||
tracing::error!(
|
||||
expected,
|
||||
actual,
|
||||
?execution_receipt,
|
||||
?tracing_result,
|
||||
"Transaction status assertion failed"
|
||||
);
|
||||
anyhow::bail!(
|
||||
"Transaction status assertion failed - Expected {expected} but got {actual}",
|
||||
);
|
||||
}
|
||||
|
||||
// Handling the calldata assertion
|
||||
if let Some(ref expected_calldata) = expectation.return_data {
|
||||
let expected = expected_calldata;
|
||||
let actual = &tracing_result.output.as_ref().unwrap_or_default();
|
||||
if !expected
|
||||
.is_equivalent(actual, resolver, resolution_context)
|
||||
.await
|
||||
.context("Failed to resolve calldata equivalence for return data assertion")?
|
||||
{
|
||||
tracing::error!(
|
||||
?execution_receipt,
|
||||
?expected,
|
||||
%actual,
|
||||
"Calldata assertion failed"
|
||||
);
|
||||
anyhow::bail!("Calldata assertion failed - Expected {expected:?} but got {actual}",);
|
||||
}
|
||||
}
|
||||
|
||||
// Handling the events assertion
|
||||
if let Some(ref expected_events) = expectation.events {
|
||||
// Handling the events length assertion.
|
||||
let expected = expected_events.len();
|
||||
let actual = execution_receipt.logs().len();
|
||||
if actual != expected {
|
||||
tracing::error!(expected, actual, "Event count assertion failed",);
|
||||
anyhow::bail!(
|
||||
"Event count assertion failed - Expected {expected} but got {actual}",
|
||||
);
|
||||
}
|
||||
|
||||
// Handling the events assertion.
|
||||
for (event_idx, (expected_event, actual_event)) in expected_events
|
||||
.iter()
|
||||
.zip(execution_receipt.logs())
|
||||
.enumerate()
|
||||
{
|
||||
// Handling the emitter assertion.
|
||||
if let Some(ref expected_address) = expected_event.address {
|
||||
let expected = Address::from_slice(
|
||||
Calldata::new_compound([expected_address])
|
||||
.calldata(resolver, resolution_context)
|
||||
.await?
|
||||
.get(12..32)
|
||||
.expect("Can't fail"),
|
||||
);
|
||||
let actual = actual_event.address();
|
||||
if actual != expected {
|
||||
tracing::error!(
|
||||
event_idx,
|
||||
%expected,
|
||||
%actual,
|
||||
"Event emitter assertion failed",
|
||||
);
|
||||
anyhow::bail!(
|
||||
"Event emitter assertion failed - Expected {expected} but got {actual}",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Handling the topics assertion.
|
||||
for (expected, actual) in expected_event
|
||||
.topics
|
||||
.as_slice()
|
||||
.iter()
|
||||
.zip(actual_event.topics())
|
||||
{
|
||||
let expected = Calldata::new_compound([expected]);
|
||||
if !expected
|
||||
.is_equivalent(&actual.0, resolver, resolution_context)
|
||||
.await
|
||||
.context("Failed to resolve event topic equivalence")?
|
||||
{
|
||||
tracing::error!(
|
||||
event_idx,
|
||||
?execution_receipt,
|
||||
?expected,
|
||||
?actual,
|
||||
"Event topics assertion failed",
|
||||
);
|
||||
anyhow::bail!(
|
||||
"Event topics assertion failed - Expected {expected:?} but got {actual:?}",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Handling the values assertion.
|
||||
let expected = &expected_event.values;
|
||||
let actual = &actual_event.data().data;
|
||||
if !expected
|
||||
.is_equivalent(&actual.0, resolver, resolution_context)
|
||||
.await
|
||||
.context("Failed to resolve event value equivalence")?
|
||||
{
|
||||
tracing::error!(
|
||||
event_idx,
|
||||
?execution_receipt,
|
||||
?expected,
|
||||
?actual,
|
||||
"Event value assertion failed",
|
||||
);
|
||||
anyhow::bail!(
|
||||
"Event value assertion failed - Expected {expected:?} but got {actual:?}",
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
async fn handle_input_diff(
|
||||
&self,
|
||||
execution_receipt: &TransactionReceipt,
|
||||
node: &T::Blockchain,
|
||||
) -> anyhow::Result<(GethTrace, DiffMode)> {
|
||||
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
|
||||
diff_mode: Some(true),
|
||||
disable_code: None,
|
||||
disable_storage: None,
|
||||
});
|
||||
|
||||
let trace = node
|
||||
.trace_transaction(execution_receipt, trace_options)
|
||||
.await
|
||||
.context("Failed to obtain geth prestate tracer output")?;
|
||||
let diff = node
|
||||
.state_diff(execution_receipt)
|
||||
.await
|
||||
.context("Failed to obtain state diff for transaction")?;
|
||||
|
||||
Ok((trace, diff))
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub async fn handle_balance_assertion_contract_deployment(
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
balance_assertion: &BalanceAssertion,
|
||||
node: &T::Blockchain,
|
||||
) -> anyhow::Result<()> {
|
||||
let Some(instance) = balance_assertion
|
||||
.address
|
||||
.strip_suffix(".address")
|
||||
.map(ContractInstance::new)
|
||||
else {
|
||||
return Ok(());
|
||||
};
|
||||
self.get_or_deploy_contract_instance(
|
||||
&instance,
|
||||
metadata,
|
||||
Input::default_caller(),
|
||||
None,
|
||||
None,
|
||||
node,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub async fn handle_balance_assertion_execution(
|
||||
&mut self,
|
||||
BalanceAssertion {
|
||||
address: address_string,
|
||||
expected_balance: amount,
|
||||
..
|
||||
}: &BalanceAssertion,
|
||||
node: &T::Blockchain,
|
||||
) -> anyhow::Result<()> {
|
||||
let address = Address::from_slice(
|
||||
Calldata::new_compound([address_string])
|
||||
.calldata(node, self.default_resolution_context())
|
||||
.await?
|
||||
.get(12..32)
|
||||
.expect("Can't fail"),
|
||||
);
|
||||
|
||||
let balance = node.balance_of(address).await?;
|
||||
|
||||
let expected = *amount;
|
||||
let actual = balance;
|
||||
if expected != actual {
|
||||
tracing::error!(%expected, %actual, %address, "Balance assertion failed");
|
||||
anyhow::bail!(
|
||||
"Balance assertion failed - Expected {} but got {} for {} resolved to {}",
|
||||
expected,
|
||||
actual,
|
||||
address_string,
|
||||
address,
|
||||
)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub async fn handle_storage_empty_assertion_contract_deployment(
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
storage_empty_assertion: &StorageEmptyAssertion,
|
||||
node: &T::Blockchain,
|
||||
) -> anyhow::Result<()> {
|
||||
let Some(instance) = storage_empty_assertion
|
||||
.address
|
||||
.strip_suffix(".address")
|
||||
.map(ContractInstance::new)
|
||||
else {
|
||||
return Ok(());
|
||||
};
|
||||
self.get_or_deploy_contract_instance(
|
||||
&instance,
|
||||
metadata,
|
||||
Input::default_caller(),
|
||||
None,
|
||||
None,
|
||||
node,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub async fn handle_storage_empty_assertion_execution(
|
||||
&mut self,
|
||||
StorageEmptyAssertion {
|
||||
address: address_string,
|
||||
is_storage_empty,
|
||||
..
|
||||
}: &StorageEmptyAssertion,
|
||||
node: &T::Blockchain,
|
||||
) -> anyhow::Result<()> {
|
||||
let address = Address::from_slice(
|
||||
Calldata::new_compound([address_string])
|
||||
.calldata(node, self.default_resolution_context())
|
||||
.await?
|
||||
.get(12..32)
|
||||
.expect("Can't fail"),
|
||||
);
|
||||
|
||||
let storage = node.latest_state_proof(address, Default::default()).await?;
|
||||
let is_empty = storage.storage_hash == EMPTY_ROOT_HASH;
|
||||
|
||||
let expected = is_storage_empty;
|
||||
let actual = is_empty;
|
||||
|
||||
if *expected != actual {
|
||||
tracing::error!(%expected, %actual, %address, "Storage Empty Assertion failed");
|
||||
anyhow::bail!(
|
||||
"Storage Empty Assertion failed - Expected {} but got {} for {} resolved to {}",
|
||||
expected,
|
||||
actual,
|
||||
address_string,
|
||||
address,
|
||||
)
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets the information of a deployed contract or library from the state. If it's found to not
|
||||
/// be deployed then it will be deployed.
|
||||
///
|
||||
/// If a [`CaseIdx`] is not specified then this contact instance address will be stored in the
|
||||
/// cross-case deployed contracts address mapping.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn get_or_deploy_contract_instance(
|
||||
&mut self,
|
||||
contract_instance: &ContractInstance,
|
||||
metadata: &Metadata,
|
||||
deployer: Address,
|
||||
calldata: Option<&Calldata>,
|
||||
value: Option<EtherValue>,
|
||||
node: &T::Blockchain,
|
||||
) -> anyhow::Result<(Address, JsonAbi, Option<TransactionReceipt>)> {
|
||||
if let Some((_, address, abi)) = self.deployed_contracts.get(contract_instance) {
|
||||
return Ok((*address, abi.clone(), None));
|
||||
}
|
||||
|
||||
let Some(ContractPathAndIdent {
|
||||
contract_source_path,
|
||||
contract_ident,
|
||||
}) = metadata.contract_sources()?.remove(contract_instance)
|
||||
else {
|
||||
anyhow::bail!(
|
||||
"Contract source not found for instance {:?}",
|
||||
contract_instance
|
||||
)
|
||||
};
|
||||
|
||||
let Some((code, abi)) = self
|
||||
.compiled_contracts
|
||||
.get(&contract_source_path)
|
||||
.and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref()))
|
||||
.cloned()
|
||||
else {
|
||||
anyhow::bail!(
|
||||
"Failed to find information for contract {:?}",
|
||||
contract_instance
|
||||
)
|
||||
};
|
||||
|
||||
let mut code = match alloy::hex::decode(&code) {
|
||||
Ok(code) => code,
|
||||
Err(error) => {
|
||||
tracing::error!(
|
||||
?error,
|
||||
contract_source_path = contract_source_path.display().to_string(),
|
||||
contract_ident = contract_ident.as_ref(),
|
||||
"Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking"
|
||||
);
|
||||
anyhow::bail!("Failed to hex-decode the byte code {}", error)
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(calldata) = calldata {
|
||||
let calldata = calldata
|
||||
.calldata(node, self.default_resolution_context())
|
||||
.await?;
|
||||
code.extend(calldata);
|
||||
}
|
||||
|
||||
let tx = {
|
||||
let tx = TransactionRequest::default().from(deployer);
|
||||
let tx = match value {
|
||||
Some(ref value) => tx.value(value.into_inner()),
|
||||
_ => tx,
|
||||
};
|
||||
TransactionBuilder::<Ethereum>::with_deploy_code(tx, code)
|
||||
};
|
||||
|
||||
let receipt = match node.execute_transaction(tx).await {
|
||||
Ok(receipt) => receipt,
|
||||
Err(error) => {
|
||||
tracing::error!(
|
||||
node = std::any::type_name::<T>(),
|
||||
?error,
|
||||
"Contract deployment transaction failed."
|
||||
);
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
|
||||
let Some(address) = receipt.contract_address else {
|
||||
anyhow::bail!("Contract deployment didn't return an address");
|
||||
};
|
||||
tracing::info!(
|
||||
instance_name = ?contract_instance,
|
||||
instance_address = ?address,
|
||||
"Deployed contract"
|
||||
);
|
||||
self.execution_reporter
|
||||
.report_contract_deployed_event(contract_instance.clone(), address)?;
|
||||
|
||||
self.deployed_contracts.insert(
|
||||
contract_instance.clone(),
|
||||
(contract_ident, address, abi.clone()),
|
||||
);
|
||||
|
||||
Ok((address, abi, Some(receipt)))
|
||||
}
|
||||
|
||||
fn default_resolution_context(&self) -> ResolutionContext<'_> {
|
||||
ResolutionContext::default()
|
||||
.with_deployed_contracts(&self.deployed_contracts)
|
||||
.with_variables(&self.variables)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CaseDriver<'a, Leader: Platform, Follower: Platform> {
|
||||
metadata: &'a Metadata,
|
||||
case: &'a Case,
|
||||
leader_node: &'a Leader::Blockchain,
|
||||
follower_node: &'a Follower::Blockchain,
|
||||
leader_state: CaseState<Leader>,
|
||||
follower_state: CaseState<Follower>,
|
||||
}
|
||||
|
||||
impl<'a, L, F> CaseDriver<'a, L, F>
|
||||
where
|
||||
L: Platform,
|
||||
F: Platform,
|
||||
{
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
metadata: &'a Metadata,
|
||||
case: &'a Case,
|
||||
leader_node: &'a L::Blockchain,
|
||||
follower_node: &'a F::Blockchain,
|
||||
leader_state: CaseState<L>,
|
||||
follower_state: CaseState<F>,
|
||||
) -> CaseDriver<'a, L, F> {
|
||||
Self {
|
||||
metadata,
|
||||
case,
|
||||
leader_node,
|
||||
follower_node,
|
||||
leader_state,
|
||||
follower_state,
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "info", name = "Executing Case", skip_all)]
|
||||
pub async fn execute(&mut self) -> anyhow::Result<usize> {
|
||||
let mut steps_executed = 0;
|
||||
for (step_idx, step) in self
|
||||
.case
|
||||
.steps_iterator()
|
||||
.enumerate()
|
||||
.map(|(idx, v)| (StepIdx::new(idx), v))
|
||||
{
|
||||
let (leader_step_output, follower_step_output) = try_join!(
|
||||
self.leader_state
|
||||
.handle_step(self.metadata, &step, self.leader_node)
|
||||
.instrument(info_span!(
|
||||
"Handling Step",
|
||||
%step_idx,
|
||||
target = "Leader",
|
||||
)),
|
||||
self.follower_state
|
||||
.handle_step(self.metadata, &step, self.follower_node)
|
||||
.instrument(info_span!(
|
||||
"Handling Step",
|
||||
%step_idx,
|
||||
target = "Follower",
|
||||
))
|
||||
)?;
|
||||
|
||||
match (leader_step_output, follower_step_output) {
|
||||
(StepOutput::FunctionCall(..), StepOutput::FunctionCall(..)) => {
|
||||
// TODO: We need to actually work out how/if we will compare the diff between
|
||||
// the leader and the follower. The diffs are almost guaranteed to be different
|
||||
// from leader and follower and therefore without an actual strategy for this
|
||||
// we have something that's guaranteed to fail. Even a simple call to some
|
||||
// contract will produce two non-equal diffs because on the leader the contract
|
||||
// has address X and on the follower it has address Y. On the leader contract X
|
||||
// contains address A in the state and on the follower it contains address B. So
|
||||
// this isn't exactly a straightforward thing to do and I'm not even sure that
|
||||
// it's possible to do. Once we have an actual strategy for doing the diffs we
|
||||
// will implement it here. Until then, this remains empty.
|
||||
}
|
||||
(StepOutput::BalanceAssertion, StepOutput::BalanceAssertion) => {}
|
||||
(StepOutput::StorageEmptyAssertion, StepOutput::StorageEmptyAssertion) => {}
|
||||
_ => unreachable!("The two step outputs can not be of a different kind"),
|
||||
}
|
||||
|
||||
steps_executed += 1;
|
||||
}
|
||||
|
||||
Ok(steps_executed)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum StepOutput {
|
||||
FunctionCall(TransactionReceipt, GethTrace, DiffMode),
|
||||
BalanceAssertion,
|
||||
StorageEmptyAssertion,
|
||||
}
|
||||
@@ -5,13 +5,13 @@ use std::{
|
||||
borrow::Cow,
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
sync::{Arc, LazyLock},
|
||||
};
|
||||
|
||||
use futures::FutureExt;
|
||||
use revive_dt_common::iterators::FilesWithExtensionIterator;
|
||||
use revive_dt_common::{iterators::FilesWithExtensionIterator, types::CompilerIdentifier};
|
||||
use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler};
|
||||
use revive_dt_config::TestingPlatform;
|
||||
use revive_dt_core::Platform;
|
||||
use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata};
|
||||
|
||||
use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address};
|
||||
@@ -19,11 +19,9 @@ use anyhow::{Context as _, Error, Result};
|
||||
use revive_dt_report::ExecutionSpecificReporter;
|
||||
use semver::Version;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::{Mutex, RwLock};
|
||||
use tokio::sync::{Mutex, RwLock, Semaphore};
|
||||
use tracing::{Instrument, debug, debug_span, instrument};
|
||||
|
||||
use crate::Platform;
|
||||
|
||||
pub struct CachedCompiler<'a> {
|
||||
/// The cache that stores the compiled contracts.
|
||||
artifacts_cache: ArtifactsCache,
|
||||
@@ -57,21 +55,22 @@ impl<'a> CachedCompiler<'a> {
|
||||
fields(
|
||||
metadata_file_path = %metadata_file_path.display(),
|
||||
%mode,
|
||||
platform = P::config_id().to_string()
|
||||
platform = %platform.platform_identifier()
|
||||
),
|
||||
err
|
||||
)]
|
||||
pub async fn compile_contracts<P: Platform>(
|
||||
pub async fn compile_contracts(
|
||||
&self,
|
||||
metadata: &'a Metadata,
|
||||
metadata_file_path: &'a Path,
|
||||
mode: Cow<'a, Mode>,
|
||||
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||
compiler: &P::Compiler,
|
||||
compiler: &dyn SolidityCompiler,
|
||||
platform: &dyn Platform,
|
||||
reporter: &ExecutionSpecificReporter,
|
||||
) -> Result<CompilerOutput> {
|
||||
let cache_key = CacheKey {
|
||||
platform_key: P::config_id(),
|
||||
compiler_identifier: platform.compiler_identifier(),
|
||||
compiler_version: compiler.version().clone(),
|
||||
metadata_file_path,
|
||||
solc_mode: mode.clone(),
|
||||
@@ -79,7 +78,7 @@ impl<'a> CachedCompiler<'a> {
|
||||
|
||||
let compilation_callback = || {
|
||||
async move {
|
||||
compile_contracts::<P>(
|
||||
compile_contracts(
|
||||
metadata
|
||||
.directory()
|
||||
.context("Failed to get metadata directory while preparing compilation")?,
|
||||
@@ -96,7 +95,7 @@ impl<'a> CachedCompiler<'a> {
|
||||
}
|
||||
.instrument(debug_span!(
|
||||
"Running compilation for the cache key",
|
||||
cache_key.platform_key = %cache_key.platform_key,
|
||||
cache_key.compiler_identifier = %cache_key.compiler_identifier,
|
||||
cache_key.compiler_version = %cache_key.compiler_version,
|
||||
cache_key.metadata_file_path = %cache_key.metadata_file_path.display(),
|
||||
cache_key.solc_mode = %cache_key.solc_mode,
|
||||
@@ -166,10 +165,22 @@ impl<'a> CachedCompiler<'a> {
|
||||
cache_value.compiler_output
|
||||
}
|
||||
None => {
|
||||
compilation_callback()
|
||||
let compiler_output = compilation_callback()
|
||||
.await
|
||||
.context("Compilation callback failed (cache miss path)")?
|
||||
.compiler_output
|
||||
.compiler_output;
|
||||
self.artifacts_cache
|
||||
.insert(
|
||||
&cache_key,
|
||||
&CacheValue {
|
||||
compiler_output: compiler_output.clone(),
|
||||
},
|
||||
)
|
||||
.await
|
||||
.context(
|
||||
"Failed to write the cached value of the compilation artifacts",
|
||||
)?;
|
||||
compiler_output
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -179,14 +190,20 @@ impl<'a> CachedCompiler<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
async fn compile_contracts<P: Platform>(
|
||||
async fn compile_contracts(
|
||||
metadata_directory: impl AsRef<Path>,
|
||||
mut files_to_compile: impl Iterator<Item = PathBuf>,
|
||||
mode: &Mode,
|
||||
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||
compiler: &P::Compiler,
|
||||
compiler: &dyn SolidityCompiler,
|
||||
reporter: &ExecutionSpecificReporter,
|
||||
) -> Result<CompilerOutput> {
|
||||
// Puts a limit on how many compilations we can perform at any given instance which helps us
|
||||
// with some of the errors we've been seeing with high concurrency on MacOS (we have not tried
|
||||
// it on Linux so we don't know if these issues also persist there or not.)
|
||||
static SPAWN_GATE: LazyLock<Semaphore> = LazyLock::new(|| Semaphore::new(5));
|
||||
let _permit = SPAWN_GATE.acquire().await?;
|
||||
|
||||
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
|
||||
.with_allowed_extension("sol")
|
||||
.with_use_cached_fs(true)
|
||||
@@ -332,9 +349,8 @@ impl ArtifactsCache {
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)]
|
||||
struct CacheKey<'a> {
|
||||
/// The platform name that this artifact was compiled for. For example, this could be EVM or
|
||||
/// PVM.
|
||||
platform_key: &'a TestingPlatform,
|
||||
/// The identifier of the used compiler.
|
||||
compiler_identifier: CompilerIdentifier,
|
||||
|
||||
/// The version of the compiler that was used to compile the artifacts.
|
||||
compiler_version: Version,
|
||||
@@ -0,0 +1,33 @@
|
||||
use revive_dt_config::CorpusConfiguration;
|
||||
use revive_dt_format::{corpus::Corpus, metadata::MetadataFile};
|
||||
use tracing::{info, info_span, instrument};
|
||||
|
||||
/// Given an object that implements [`AsRef<CorpusConfiguration>`], this function finds all of the
|
||||
/// corpus files and produces a map containing all of the [`MetadataFile`]s discovered.
|
||||
#[instrument(level = "debug", name = "Collecting Corpora", skip_all)]
|
||||
pub fn collect_metadata_files(
|
||||
context: impl AsRef<CorpusConfiguration>,
|
||||
) -> anyhow::Result<Vec<MetadataFile>> {
|
||||
let mut metadata_files = Vec::new();
|
||||
|
||||
let corpus_configuration = AsRef::<CorpusConfiguration>::as_ref(&context);
|
||||
for path in &corpus_configuration.paths {
|
||||
let span = info_span!("Processing corpus file", path = %path.display());
|
||||
let _guard = span.enter();
|
||||
|
||||
let corpus = Corpus::try_from_path(path)?;
|
||||
info!(
|
||||
name = corpus.name(),
|
||||
number_of_contained_paths = corpus.path_count(),
|
||||
"Deserialized corpus file"
|
||||
);
|
||||
metadata_files.extend(corpus.enumerate_tests());
|
||||
}
|
||||
|
||||
// There's a possibility that there are certain paths that all lead to the same metadata files
|
||||
// and therefore it's important that we sort them and then deduplicate them.
|
||||
metadata_files.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
|
||||
metadata_files.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
|
||||
|
||||
Ok(metadata_files)
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
mod cached_compiler;
|
||||
mod metadata;
|
||||
mod pool;
|
||||
mod test;
|
||||
|
||||
pub use cached_compiler::*;
|
||||
pub use metadata::*;
|
||||
pub use pool::*;
|
||||
pub use test::*;
|
||||
@@ -0,0 +1,59 @@
|
||||
//! This crate implements concurrent handling of testing node.
|
||||
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use anyhow::Context as _;
|
||||
use revive_dt_config::*;
|
||||
use revive_dt_core::Platform;
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
|
||||
/// The node pool starts one or more [Node] which then can be accessed
|
||||
/// in a round robbin fashion.
|
||||
pub struct NodePool {
|
||||
next: AtomicUsize,
|
||||
nodes: Vec<Box<dyn EthereumNode + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl NodePool {
|
||||
/// Create a new Pool. This will start as many nodes as there are workers in `config`.
|
||||
pub async fn new(context: Context, platform: &dyn Platform) -> anyhow::Result<Self> {
|
||||
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
|
||||
let nodes = concurrency_configuration.number_of_nodes;
|
||||
|
||||
let mut handles = Vec::with_capacity(nodes);
|
||||
for _ in 0..nodes {
|
||||
let context = context.clone();
|
||||
handles.push(platform.new_node(context)?);
|
||||
}
|
||||
|
||||
let mut nodes = Vec::with_capacity(nodes);
|
||||
for handle in handles {
|
||||
nodes.push(
|
||||
handle
|
||||
.join()
|
||||
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))
|
||||
.context("Failed to join node spawn thread")?
|
||||
.context("Node failed to spawn")?,
|
||||
);
|
||||
}
|
||||
|
||||
let pre_transactions_tasks = nodes
|
||||
.iter_mut()
|
||||
.map(|node| node.pre_transactions())
|
||||
.collect::<Vec<_>>();
|
||||
futures::future::try_join_all(pre_transactions_tasks)
|
||||
.await
|
||||
.context("Failed to run the pre-transactions task")?;
|
||||
|
||||
Ok(Self {
|
||||
nodes,
|
||||
next: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Get a handle to the next node.
|
||||
pub fn round_robbin(&self) -> &dyn EthereumNode {
|
||||
let current = self.next.fetch_add(1, Ordering::SeqCst) % self.nodes.len();
|
||||
self.nodes.get(current).unwrap().as_ref()
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,359 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::Arc;
|
||||
use std::{borrow::Cow, path::Path};
|
||||
|
||||
use futures::{Stream, StreamExt, stream};
|
||||
use indexmap::{IndexMap, indexmap};
|
||||
use revive_dt_common::iterators::EitherIter;
|
||||
use revive_dt_common::types::{ParsedMode, PlatformIdentifier};
|
||||
use revive_dt_config::Context;
|
||||
use serde_json::{Value, json};
|
||||
|
||||
use revive_dt_compiler::Mode;
|
||||
use revive_dt_compiler::SolidityCompiler;
|
||||
use revive_dt_format::{
|
||||
case::{Case, CaseIdx},
|
||||
metadata::MetadataFile,
|
||||
};
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
use revive_dt_report::{ExecutionSpecificReporter, Report, Reporter, TestCaseStatus};
|
||||
use revive_dt_report::{TestSpecificReporter, TestSpecifier};
|
||||
use tracing::{debug, error, info};
|
||||
|
||||
use crate::Platform;
|
||||
use crate::helpers::NodePool;
|
||||
|
||||
pub async fn create_test_definitions_stream<'a>(
|
||||
// This is only required for creating the compiler objects and is not used anywhere else in the
|
||||
// function.
|
||||
context: &Context,
|
||||
metadata_files: impl IntoIterator<Item = &'a MetadataFile>,
|
||||
platforms_and_nodes: &'a BTreeMap<PlatformIdentifier, (&dyn Platform, NodePool)>,
|
||||
only_execute_failed_tests: Option<&Report>,
|
||||
reporter: Reporter,
|
||||
) -> impl Stream<Item = TestDefinition<'a>> {
|
||||
stream::iter(
|
||||
metadata_files
|
||||
.into_iter()
|
||||
// Flatten over the cases.
|
||||
.flat_map(|metadata_file| {
|
||||
metadata_file
|
||||
.cases
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(move |(case_idx, case)| (metadata_file, case_idx, case))
|
||||
})
|
||||
// Flatten over the modes, prefer the case modes over the metadata file modes.
|
||||
.flat_map(move |(metadata_file, case_idx, case)| {
|
||||
let reporter = reporter.clone();
|
||||
|
||||
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
|
||||
let modes = match modes {
|
||||
Some(modes) => EitherIter::A(
|
||||
ParsedMode::many_to_modes(modes.iter()).map(Cow::<'static, _>::Owned),
|
||||
),
|
||||
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
|
||||
};
|
||||
|
||||
modes.into_iter().map(move |mode| {
|
||||
(
|
||||
metadata_file,
|
||||
case_idx,
|
||||
case,
|
||||
mode.clone(),
|
||||
reporter.test_specific_reporter(Arc::new(TestSpecifier {
|
||||
solc_mode: mode.as_ref().clone(),
|
||||
metadata_file_path: metadata_file.metadata_file_path.clone(),
|
||||
case_idx: CaseIdx::new(case_idx),
|
||||
})),
|
||||
)
|
||||
})
|
||||
})
|
||||
// Inform the reporter of each one of the test cases that were discovered which we expect to
|
||||
// run.
|
||||
.inspect(|(_, _, _, _, reporter)| {
|
||||
reporter
|
||||
.report_test_case_discovery_event()
|
||||
.expect("Can't fail");
|
||||
}),
|
||||
)
|
||||
// Creating the Test Definition objects from all of the various objects we have and creating
|
||||
// their required dependencies (e.g., compiler).
|
||||
.filter_map(
|
||||
move |(metadata_file, case_idx, case, mode, reporter)| async move {
|
||||
let mut platforms = BTreeMap::new();
|
||||
for (platform, node_pool) in platforms_and_nodes.values() {
|
||||
let node = node_pool.round_robbin();
|
||||
let compiler = platform
|
||||
.new_compiler(context.clone(), mode.version.clone().map(Into::into))
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
platform_identifier = %platform.platform_identifier(),
|
||||
"Failed to instantiate the compiler"
|
||||
)
|
||||
})
|
||||
.ok()?;
|
||||
|
||||
reporter
|
||||
.report_node_assigned_event(
|
||||
node.id(),
|
||||
platform.platform_identifier(),
|
||||
node.connection_string(),
|
||||
)
|
||||
.expect("Can't fail");
|
||||
|
||||
let reporter =
|
||||
reporter.execution_specific_reporter(node.id(), platform.platform_identifier());
|
||||
|
||||
platforms.insert(
|
||||
platform.platform_identifier(),
|
||||
TestPlatformInformation {
|
||||
platform: *platform,
|
||||
node,
|
||||
compiler,
|
||||
reporter,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
Some(TestDefinition {
|
||||
/* Metadata file information */
|
||||
metadata: metadata_file,
|
||||
metadata_file_path: metadata_file.metadata_file_path.as_path(),
|
||||
|
||||
/* Mode Information */
|
||||
mode: mode.clone(),
|
||||
|
||||
/* Case Information */
|
||||
case_idx: CaseIdx::new(case_idx),
|
||||
case,
|
||||
|
||||
/* Platform and Node Assignment Information */
|
||||
platforms,
|
||||
|
||||
/* Reporter */
|
||||
reporter,
|
||||
})
|
||||
},
|
||||
)
|
||||
// Filter out the test cases which are incompatible or that can't run in the current setup.
|
||||
.filter_map(move |test| async move {
|
||||
match test.check_compatibility(only_execute_failed_tests) {
|
||||
Ok(()) => Some(test),
|
||||
Err((reason, additional_information)) => {
|
||||
debug!(
|
||||
metadata_file_path = %test.metadata.metadata_file_path.display(),
|
||||
case_idx = %test.case_idx,
|
||||
mode = %test.mode,
|
||||
reason,
|
||||
additional_information =
|
||||
serde_json::to_string(&additional_information).unwrap(),
|
||||
"Ignoring Test Case"
|
||||
);
|
||||
test.reporter
|
||||
.report_test_ignored_event(
|
||||
reason.to_string(),
|
||||
additional_information
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.into(), v))
|
||||
.collect::<IndexMap<_, _>>(),
|
||||
)
|
||||
.expect("Can't fail");
|
||||
None
|
||||
}
|
||||
}
|
||||
})
|
||||
.inspect(|test| {
|
||||
info!(
|
||||
metadata_file_path = %test.metadata_file_path.display(),
|
||||
case_idx = %test.case_idx,
|
||||
mode = %test.mode,
|
||||
"Created a test case definition"
|
||||
);
|
||||
})
|
||||
}
|
||||
|
||||
/// This is a full description of a differential test to run alongside the full metadata file, the
|
||||
/// specific case to be tested, the platforms that the tests should run on, the specific nodes of
|
||||
/// these platforms that they should run on, the compilers to use, and everything else needed making
|
||||
/// it a complete description.
|
||||
pub struct TestDefinition<'a> {
|
||||
/* Metadata file information */
|
||||
pub metadata: &'a MetadataFile,
|
||||
pub metadata_file_path: &'a Path,
|
||||
|
||||
/* Mode Information */
|
||||
pub mode: Cow<'a, Mode>,
|
||||
|
||||
/* Case Information */
|
||||
pub case_idx: CaseIdx,
|
||||
pub case: &'a Case,
|
||||
|
||||
/* Platform and Node Assignment Information */
|
||||
pub platforms: BTreeMap<PlatformIdentifier, TestPlatformInformation<'a>>,
|
||||
|
||||
/* Reporter */
|
||||
pub reporter: TestSpecificReporter,
|
||||
}
|
||||
|
||||
impl<'a> TestDefinition<'a> {
|
||||
/// Checks if this test can be ran with the current configuration.
|
||||
pub fn check_compatibility(
|
||||
&self,
|
||||
only_execute_failed_tests: Option<&Report>,
|
||||
) -> TestCheckFunctionResult {
|
||||
self.check_metadata_file_ignored()?;
|
||||
self.check_case_file_ignored()?;
|
||||
self.check_target_compatibility()?;
|
||||
self.check_evm_version_compatibility()?;
|
||||
self.check_compiler_compatibility()?;
|
||||
self.check_ignore_succeeded(only_execute_failed_tests)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks if the metadata file is ignored or not.
|
||||
fn check_metadata_file_ignored(&self) -> TestCheckFunctionResult {
|
||||
if self.metadata.ignore.is_some_and(|ignore| ignore) {
|
||||
Err(("Metadata file is ignored.", indexmap! {}))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the case file is ignored or not.
|
||||
fn check_case_file_ignored(&self) -> TestCheckFunctionResult {
|
||||
if self.case.ignore.is_some_and(|ignore| ignore) {
|
||||
Err(("Case is ignored.", indexmap! {}))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the platforms all support the desired targets in the metadata file.
|
||||
fn check_target_compatibility(&self) -> TestCheckFunctionResult {
|
||||
let mut error_map = indexmap! {
|
||||
"test_desired_targets" => json!(self.metadata.targets.as_ref()),
|
||||
};
|
||||
let mut is_allowed = true;
|
||||
for (_, platform_information) in self.platforms.iter() {
|
||||
let is_allowed_for_platform = match self.metadata.targets.as_ref() {
|
||||
None => true,
|
||||
Some(required_vm_identifiers) => {
|
||||
required_vm_identifiers.contains(&platform_information.platform.vm_identifier())
|
||||
}
|
||||
};
|
||||
is_allowed &= is_allowed_for_platform;
|
||||
error_map.insert(
|
||||
platform_information.platform.platform_identifier().into(),
|
||||
json!(is_allowed_for_platform),
|
||||
);
|
||||
}
|
||||
|
||||
if is_allowed {
|
||||
Ok(())
|
||||
} else {
|
||||
Err((
|
||||
"One of the platforms do do not support the targets allowed by the test.",
|
||||
error_map,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// Checks for the compatibility of the EVM version with the platforms specified.
|
||||
fn check_evm_version_compatibility(&self) -> TestCheckFunctionResult {
|
||||
let Some(evm_version_requirement) = self.metadata.required_evm_version else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let mut error_map = indexmap! {
|
||||
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
||||
};
|
||||
let mut is_allowed = true;
|
||||
for (_, platform_information) in self.platforms.iter() {
|
||||
let is_allowed_for_platform =
|
||||
evm_version_requirement.matches(&platform_information.node.evm_version());
|
||||
is_allowed &= is_allowed_for_platform;
|
||||
error_map.insert(
|
||||
platform_information.platform.platform_identifier().into(),
|
||||
json!(is_allowed_for_platform),
|
||||
);
|
||||
}
|
||||
|
||||
if is_allowed {
|
||||
Ok(())
|
||||
} else {
|
||||
Err((
|
||||
"EVM version is incompatible for the platforms specified",
|
||||
error_map,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the platforms compilers support the mode that the test is for.
|
||||
fn check_compiler_compatibility(&self) -> TestCheckFunctionResult {
|
||||
let mut error_map = indexmap! {
|
||||
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
||||
};
|
||||
let mut is_allowed = true;
|
||||
for (_, platform_information) in self.platforms.iter() {
|
||||
let is_allowed_for_platform = platform_information
|
||||
.compiler
|
||||
.supports_mode(self.mode.optimize_setting, self.mode.pipeline);
|
||||
is_allowed &= is_allowed_for_platform;
|
||||
error_map.insert(
|
||||
platform_information.platform.platform_identifier().into(),
|
||||
json!(is_allowed_for_platform),
|
||||
);
|
||||
}
|
||||
|
||||
if is_allowed {
|
||||
Ok(())
|
||||
} else {
|
||||
Err((
|
||||
"Compilers do not support this mode either for the provided platforms.",
|
||||
error_map,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the test case should be executed or not based on the passed report and whether the
|
||||
/// user has instructed the tool to ignore the already succeeding test cases.
|
||||
fn check_ignore_succeeded(
|
||||
&self,
|
||||
only_execute_failed_tests: Option<&Report>,
|
||||
) -> TestCheckFunctionResult {
|
||||
let Some(report) = only_execute_failed_tests else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let test_case_status = report
|
||||
.test_case_information
|
||||
.get(&(self.metadata_file_path.to_path_buf().into()))
|
||||
.and_then(|obj| obj.get(&self.mode))
|
||||
.and_then(|obj| obj.get(&self.case_idx))
|
||||
.and_then(|obj| obj.status.as_ref());
|
||||
|
||||
match test_case_status {
|
||||
Some(TestCaseStatus::Failed { .. }) => Ok(()),
|
||||
Some(TestCaseStatus::Ignored { .. }) => Err((
|
||||
"Ignored since it was ignored in a previous run",
|
||||
indexmap! {},
|
||||
)),
|
||||
Some(TestCaseStatus::Succeeded { .. }) => {
|
||||
Err(("Ignored since it succeeded in a prior run", indexmap! {}))
|
||||
}
|
||||
None => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TestPlatformInformation<'a> {
|
||||
pub platform: &'a dyn Platform,
|
||||
pub node: &'a dyn EthereumNode,
|
||||
pub compiler: Box<dyn SolidityCompiler>,
|
||||
pub reporter: ExecutionSpecificReporter,
|
||||
}
|
||||
|
||||
type TestCheckFunctionResult = Result<(), (&'static str, IndexMap<&'static str, Value>)>;
|
||||
+517
-27
@@ -3,45 +3,535 @@
|
||||
//! This crate defines the testing configuration and
|
||||
//! provides a helper utility to execute tests.
|
||||
|
||||
use revive_dt_compiler::{SolidityCompiler, revive_resolc, solc};
|
||||
use revive_dt_config::TestingPlatform;
|
||||
use revive_dt_format::traits::ResolverApi;
|
||||
use revive_dt_node::{Node, geth, kitchensink::KitchensinkNode};
|
||||
use std::{
|
||||
pin::Pin,
|
||||
thread::{self, JoinHandle},
|
||||
};
|
||||
|
||||
use alloy::genesis::Genesis;
|
||||
use anyhow::Context as _;
|
||||
use revive_dt_common::types::*;
|
||||
use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc};
|
||||
use revive_dt_config::*;
|
||||
use revive_dt_node::{
|
||||
Node, node_implementations::geth::GethNode,
|
||||
node_implementations::lighthouse_geth::LighthouseGethNode,
|
||||
node_implementations::substrate::SubstrateNode, node_implementations::zombienet::ZombienetNode,
|
||||
};
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
use tracing::info;
|
||||
|
||||
pub mod driver;
|
||||
|
||||
/// One platform can be tested differentially against another.
|
||||
///
|
||||
/// For this we need a blockchain node implementation and a compiler.
|
||||
/// A trait that describes the interface for the platforms that are supported by the tool.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub trait Platform {
|
||||
type Blockchain: EthereumNode + Node + ResolverApi;
|
||||
type Compiler: SolidityCompiler;
|
||||
/// Returns the identifier of this platform. This is a combination of the node and the compiler
|
||||
/// used.
|
||||
fn platform_identifier(&self) -> PlatformIdentifier;
|
||||
|
||||
/// Returns the matching [TestingPlatform] of the [revive_dt_config::Arguments].
|
||||
fn config_id() -> &'static TestingPlatform;
|
||||
/// Returns a full identifier for the platform.
|
||||
fn full_identifier(&self) -> (NodeIdentifier, VmIdentifier, CompilerIdentifier) {
|
||||
(
|
||||
self.node_identifier(),
|
||||
self.vm_identifier(),
|
||||
self.compiler_identifier(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns the identifier of the node used.
|
||||
fn node_identifier(&self) -> NodeIdentifier;
|
||||
|
||||
/// Returns the identifier of the vm used.
|
||||
fn vm_identifier(&self) -> VmIdentifier;
|
||||
|
||||
/// Returns the identifier of the compiler used.
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier;
|
||||
|
||||
/// Creates a new node for the platform by spawning a new thread, creating the node object,
|
||||
/// initializing it, spawning it, and waiting for it to start up.
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>>;
|
||||
|
||||
/// Creates a new compiler for the provided platform
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>>;
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Geth;
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct GethEvmSolcPlatform;
|
||||
|
||||
impl Platform for Geth {
|
||||
type Blockchain = geth::GethNode;
|
||||
type Compiler = solc::Solc;
|
||||
impl Platform for GethEvmSolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::GethEvmSolc
|
||||
}
|
||||
|
||||
fn config_id() -> &'static TestingPlatform {
|
||||
&TestingPlatform::Geth
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::Geth
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = GethNode::new(context);
|
||||
let node = spawn_node::<GethNode>(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Kitchensink;
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct LighthouseGethEvmSolcPlatform;
|
||||
|
||||
impl Platform for Kitchensink {
|
||||
type Blockchain = KitchensinkNode;
|
||||
type Compiler = revive_resolc::Resolc;
|
||||
impl Platform for LighthouseGethEvmSolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::LighthouseGethEvmSolc
|
||||
}
|
||||
|
||||
fn config_id() -> &'static TestingPlatform {
|
||||
&TestingPlatform::Kitchensink
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::LighthouseGeth
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = LighthouseGethNode::new(context);
|
||||
let node = spawn_node::<LighthouseGethNode>(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct KitchensinkPolkavmResolcPlatform;
|
||||
|
||||
impl Platform for KitchensinkPolkavmResolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::KitchensinkPolkavmResolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::Kitchensink
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::PolkaVM
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Resolc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context)
|
||||
.path
|
||||
.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = SubstrateNode::new(
|
||||
kitchensink_path,
|
||||
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
|
||||
None,
|
||||
context,
|
||||
);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Resolc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct KitchensinkRevmSolcPlatform;
|
||||
|
||||
impl Platform for KitchensinkRevmSolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::KitchensinkRevmSolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::Kitchensink
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context)
|
||||
.path
|
||||
.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = SubstrateNode::new(
|
||||
kitchensink_path,
|
||||
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
|
||||
None,
|
||||
context,
|
||||
);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct ReviveDevNodePolkavmResolcPlatform;
|
||||
|
||||
impl Platform for ReviveDevNodePolkavmResolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::ReviveDevNodePolkavmResolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::ReviveDevNode
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::PolkaVM
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Resolc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let revive_dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
|
||||
|
||||
let revive_dev_node_path = revive_dev_node_configuration.path.clone();
|
||||
let revive_dev_node_consensus = revive_dev_node_configuration.consensus.clone();
|
||||
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = SubstrateNode::new(
|
||||
revive_dev_node_path,
|
||||
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
||||
Some(revive_dev_node_consensus),
|
||||
context,
|
||||
);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Resolc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct ReviveDevNodeRevmSolcPlatform;
|
||||
|
||||
impl Platform for ReviveDevNodeRevmSolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::ReviveDevNodeRevmSolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::ReviveDevNode
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let revive_dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
|
||||
|
||||
let revive_dev_node_path = revive_dev_node_configuration.path.clone();
|
||||
let revive_dev_node_consensus = revive_dev_node_configuration.consensus.clone();
|
||||
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = SubstrateNode::new(
|
||||
revive_dev_node_path,
|
||||
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
||||
Some(revive_dev_node_consensus),
|
||||
context,
|
||||
);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct ZombienetPolkavmResolcPlatform;
|
||||
|
||||
impl Platform for ZombienetPolkavmResolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::ZombienetPolkavmResolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::Zombienet
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::PolkaVM
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Resolc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
||||
.path
|
||||
.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = ZombienetNode::new(polkadot_parachain_path, context);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Resolc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct ZombienetRevmSolcPlatform;
|
||||
|
||||
impl Platform for ZombienetRevmSolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::ZombienetRevmSolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::Zombienet
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
||||
.path
|
||||
.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = ZombienetNode::new(polkadot_parachain_path, context);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PlatformIdentifier> for Box<dyn Platform> {
|
||||
fn from(value: PlatformIdentifier) -> Self {
|
||||
match value {
|
||||
PlatformIdentifier::GethEvmSolc => Box::new(GethEvmSolcPlatform) as Box<_>,
|
||||
PlatformIdentifier::LighthouseGethEvmSolc => {
|
||||
Box::new(LighthouseGethEvmSolcPlatform) as Box<_>
|
||||
}
|
||||
PlatformIdentifier::KitchensinkPolkavmResolc => {
|
||||
Box::new(KitchensinkPolkavmResolcPlatform) as Box<_>
|
||||
}
|
||||
PlatformIdentifier::KitchensinkRevmSolc => {
|
||||
Box::new(KitchensinkRevmSolcPlatform) as Box<_>
|
||||
}
|
||||
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
||||
Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_>
|
||||
}
|
||||
PlatformIdentifier::ReviveDevNodeRevmSolc => {
|
||||
Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_>
|
||||
}
|
||||
PlatformIdentifier::ZombienetPolkavmResolc => {
|
||||
Box::new(ZombienetPolkavmResolcPlatform) as Box<_>
|
||||
}
|
||||
PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PlatformIdentifier> for &dyn Platform {
|
||||
fn from(value: PlatformIdentifier) -> Self {
|
||||
match value {
|
||||
PlatformIdentifier::GethEvmSolc => &GethEvmSolcPlatform as &dyn Platform,
|
||||
PlatformIdentifier::LighthouseGethEvmSolc => {
|
||||
&LighthouseGethEvmSolcPlatform as &dyn Platform
|
||||
}
|
||||
PlatformIdentifier::KitchensinkPolkavmResolc => {
|
||||
&KitchensinkPolkavmResolcPlatform as &dyn Platform
|
||||
}
|
||||
PlatformIdentifier::KitchensinkRevmSolc => {
|
||||
&KitchensinkRevmSolcPlatform as &dyn Platform
|
||||
}
|
||||
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
||||
&ReviveDevNodePolkavmResolcPlatform as &dyn Platform
|
||||
}
|
||||
PlatformIdentifier::ReviveDevNodeRevmSolc => {
|
||||
&ReviveDevNodeRevmSolcPlatform as &dyn Platform
|
||||
}
|
||||
PlatformIdentifier::ZombienetPolkavmResolc => {
|
||||
&ZombienetPolkavmResolcPlatform as &dyn Platform
|
||||
}
|
||||
PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn spawn_node<T: Node + EthereumNode + Send + Sync>(
|
||||
mut node: T,
|
||||
genesis: Genesis,
|
||||
) -> anyhow::Result<T> {
|
||||
info!(
|
||||
id = node.id(),
|
||||
connection_string = node.connection_string(),
|
||||
"Spawning node"
|
||||
);
|
||||
node.spawn(genesis)
|
||||
.context("Failed to spawn node process")?;
|
||||
info!(
|
||||
id = node.id(),
|
||||
connection_string = node.connection_string(),
|
||||
"Spawned node"
|
||||
);
|
||||
Ok(node)
|
||||
}
|
||||
|
||||
+42
-780
@@ -1,51 +1,21 @@
|
||||
mod cached_compiler;
|
||||
mod differential_benchmarks;
|
||||
mod differential_tests;
|
||||
mod helpers;
|
||||
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::{BTreeMap, HashMap},
|
||||
io::{BufWriter, Write, stderr},
|
||||
path::Path,
|
||||
sync::Arc,
|
||||
time::Instant,
|
||||
};
|
||||
|
||||
use alloy::{
|
||||
network::{Ethereum, TransactionBuilder},
|
||||
rpc::types::TransactionRequest,
|
||||
};
|
||||
use anyhow::Context as _;
|
||||
use clap::Parser;
|
||||
use futures::stream;
|
||||
use futures::{Stream, StreamExt};
|
||||
use indexmap::{IndexMap, indexmap};
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
use revive_dt_report::{
|
||||
NodeDesignation, ReportAggregator, Reporter, ReporterEvent, TestCaseStatus,
|
||||
TestSpecificReporter, TestSpecifier,
|
||||
};
|
||||
use revive_dt_report::ReportAggregator;
|
||||
use schemars::schema_for;
|
||||
use serde_json::{Value, json};
|
||||
use tokio::try_join;
|
||||
use tracing::{debug, error, info, info_span, instrument};
|
||||
use tracing::info;
|
||||
use tracing_subscriber::{EnvFilter, FmtSubscriber};
|
||||
|
||||
use revive_dt_common::{iterators::EitherIter, types::Mode};
|
||||
use revive_dt_compiler::{CompilerOutput, SolidityCompiler};
|
||||
use revive_dt_config::{Context, *};
|
||||
use revive_dt_core::{
|
||||
Geth, Kitchensink, Platform,
|
||||
driver::{CaseDriver, CaseState},
|
||||
};
|
||||
use revive_dt_format::{
|
||||
case::{Case, CaseIdx},
|
||||
corpus::Corpus,
|
||||
input::{Input, Step},
|
||||
metadata::{ContractPathAndIdent, Metadata, MetadataFile},
|
||||
mode::ParsedMode,
|
||||
};
|
||||
use revive_dt_node::{Node, pool::NodePool};
|
||||
use revive_dt_config::Context;
|
||||
use revive_dt_core::Platform;
|
||||
use revive_dt_format::metadata::Metadata;
|
||||
|
||||
use crate::cached_compiler::CachedCompiler;
|
||||
use crate::{
|
||||
differential_benchmarks::handle_differential_benchmarks,
|
||||
differential_tests::handle_differential_tests,
|
||||
};
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let (writer, _guard) = tracing_appender::non_blocking::NonBlockingBuilder::default()
|
||||
@@ -71,37 +41,37 @@ fn main() -> anyhow::Result<()> {
|
||||
let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task();
|
||||
|
||||
match context {
|
||||
Context::ExecuteTests(context) => {
|
||||
let tests = collect_corpora(&context)
|
||||
.context("Failed to collect corpus files from provided arguments")?
|
||||
.into_iter()
|
||||
.inspect(|(corpus, _)| {
|
||||
reporter
|
||||
.report_corpus_file_discovery_event(corpus.clone())
|
||||
.expect("Can't fail")
|
||||
})
|
||||
.flat_map(|(_, files)| files.into_iter())
|
||||
.inspect(|metadata_file| {
|
||||
reporter
|
||||
.report_metadata_file_discovery_event(
|
||||
metadata_file.metadata_file_path.clone(),
|
||||
metadata_file.content.clone(),
|
||||
)
|
||||
.expect("Can't fail")
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
Context::Test(context) => tokio::runtime::Builder::new_multi_thread()
|
||||
.worker_threads(context.concurrency_configuration.number_of_threads)
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Failed building the Runtime")
|
||||
.block_on(async move {
|
||||
let differential_tests_handling_task =
|
||||
handle_differential_tests(*context, reporter);
|
||||
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.worker_threads(context.concurrency_configuration.number_of_threads)
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Failed building the Runtime")
|
||||
.block_on(async move {
|
||||
execute_corpus(*context, &tests, reporter, report_aggregator_task)
|
||||
.await
|
||||
.context("Failed to execute corpus")
|
||||
})
|
||||
}
|
||||
futures::future::try_join(differential_tests_handling_task, report_aggregator_task)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}),
|
||||
Context::Benchmark(context) => tokio::runtime::Builder::new_multi_thread()
|
||||
.worker_threads(context.concurrency_configuration.number_of_threads)
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Failed building the Runtime")
|
||||
.block_on(async move {
|
||||
let differential_benchmarks_handling_task =
|
||||
handle_differential_benchmarks(*context, reporter);
|
||||
|
||||
futures::future::try_join(
|
||||
differential_benchmarks_handling_task,
|
||||
report_aggregator_task,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}),
|
||||
Context::ExportJsonSchema => {
|
||||
let schema = schema_for!(Metadata);
|
||||
println!("{}", serde_json::to_string_pretty(&schema).unwrap());
|
||||
@@ -109,711 +79,3 @@ fn main() -> anyhow::Result<()> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", name = "Collecting Corpora", skip_all)]
|
||||
fn collect_corpora(
|
||||
context: &ExecutionContext,
|
||||
) -> anyhow::Result<HashMap<Corpus, Vec<MetadataFile>>> {
|
||||
let mut corpora = HashMap::new();
|
||||
|
||||
for path in &context.corpus {
|
||||
let span = info_span!("Processing corpus file", path = %path.display());
|
||||
let _guard = span.enter();
|
||||
|
||||
let corpus = Corpus::try_from_path(path)?;
|
||||
info!(
|
||||
name = corpus.name(),
|
||||
number_of_contained_paths = corpus.path_count(),
|
||||
"Deserialized corpus file"
|
||||
);
|
||||
let tests = corpus.enumerate_tests();
|
||||
corpora.insert(corpus, tests);
|
||||
}
|
||||
|
||||
Ok(corpora)
|
||||
}
|
||||
|
||||
async fn run_driver<L, F>(
|
||||
context: ExecutionContext,
|
||||
metadata_files: &[MetadataFile],
|
||||
reporter: Reporter,
|
||||
report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
|
||||
) -> anyhow::Result<()>
|
||||
where
|
||||
L: Platform,
|
||||
F: Platform,
|
||||
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
||||
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
||||
{
|
||||
let leader_nodes = NodePool::<L::Blockchain>::new(context.clone())
|
||||
.context("Failed to initialize leader node pool")?;
|
||||
let follower_nodes = NodePool::<F::Blockchain>::new(context.clone())
|
||||
.context("Failed to initialize follower node pool")?;
|
||||
|
||||
let tests_stream = tests_stream(
|
||||
&context,
|
||||
metadata_files.iter(),
|
||||
&leader_nodes,
|
||||
&follower_nodes,
|
||||
reporter.clone(),
|
||||
)
|
||||
.await;
|
||||
let driver_task = start_driver_task::<L, F>(&context, tests_stream)
|
||||
.await
|
||||
.context("Failed to start driver task")?;
|
||||
let cli_reporting_task = start_cli_reporting_task(reporter);
|
||||
|
||||
let (_, _, rtn) = tokio::join!(cli_reporting_task, driver_task, report_aggregator_task);
|
||||
rtn?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn tests_stream<'a, L, F>(
|
||||
args: &ExecutionContext,
|
||||
metadata_files: impl IntoIterator<Item = &'a MetadataFile> + Clone,
|
||||
leader_node_pool: &'a NodePool<L::Blockchain>,
|
||||
follower_node_pool: &'a NodePool<F::Blockchain>,
|
||||
reporter: Reporter,
|
||||
) -> impl Stream<Item = Test<'a, L, F>>
|
||||
where
|
||||
L: Platform,
|
||||
F: Platform,
|
||||
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
||||
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
||||
{
|
||||
let tests = metadata_files
|
||||
.into_iter()
|
||||
.flat_map(|metadata_file| {
|
||||
metadata_file
|
||||
.cases
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(move |(case_idx, case)| (metadata_file, case_idx, case))
|
||||
})
|
||||
// Flatten over the modes, prefer the case modes over the metadata file modes.
|
||||
.flat_map(|(metadata_file, case_idx, case)| {
|
||||
let reporter = reporter.clone();
|
||||
|
||||
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
|
||||
let modes = match modes {
|
||||
Some(modes) => EitherIter::A(
|
||||
ParsedMode::many_to_modes(modes.iter()).map(Cow::<'static, _>::Owned),
|
||||
),
|
||||
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
|
||||
};
|
||||
|
||||
modes.into_iter().map(move |mode| {
|
||||
(
|
||||
metadata_file,
|
||||
case_idx,
|
||||
case,
|
||||
mode.clone(),
|
||||
reporter.test_specific_reporter(Arc::new(TestSpecifier {
|
||||
solc_mode: mode.as_ref().clone(),
|
||||
metadata_file_path: metadata_file.metadata_file_path.clone(),
|
||||
case_idx: CaseIdx::new(case_idx),
|
||||
})),
|
||||
)
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Note: before we do any kind of filtering or process the iterator in any way, we need to
|
||||
// inform the report aggregator of all of the cases that were found as it keeps a state of the
|
||||
// test cases for its internal use.
|
||||
for (_, _, _, _, reporter) in tests.iter() {
|
||||
reporter
|
||||
.report_test_case_discovery_event()
|
||||
.expect("Can't fail")
|
||||
}
|
||||
|
||||
stream::iter(tests.into_iter())
|
||||
.filter_map(
|
||||
move |(metadata_file, case_idx, case, mode, reporter)| async move {
|
||||
let leader_compiler = <L::Compiler as SolidityCompiler>::new(
|
||||
args,
|
||||
mode.version.clone().map(Into::into),
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| error!(?err, "Failed to instantiate the leader compiler"))
|
||||
.ok()?;
|
||||
|
||||
let follower_compiler = <F::Compiler as SolidityCompiler>::new(
|
||||
args,
|
||||
mode.version.clone().map(Into::into),
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| error!(?err, "Failed to instantiate the follower compiler"))
|
||||
.ok()?;
|
||||
|
||||
let leader_node = leader_node_pool.round_robbin();
|
||||
let follower_node = follower_node_pool.round_robbin();
|
||||
|
||||
Some(Test::<L, F> {
|
||||
metadata: metadata_file,
|
||||
metadata_file_path: metadata_file.metadata_file_path.as_path(),
|
||||
mode: mode.clone(),
|
||||
case_idx: CaseIdx::new(case_idx),
|
||||
case,
|
||||
leader_node,
|
||||
follower_node,
|
||||
leader_compiler,
|
||||
follower_compiler,
|
||||
reporter,
|
||||
})
|
||||
},
|
||||
)
|
||||
.filter_map(move |test| async move {
|
||||
match test.check_compatibility() {
|
||||
Ok(()) => Some(test),
|
||||
Err((reason, additional_information)) => {
|
||||
debug!(
|
||||
metadata_file_path = %test.metadata.metadata_file_path.display(),
|
||||
case_idx = %test.case_idx,
|
||||
mode = %test.mode,
|
||||
reason,
|
||||
additional_information =
|
||||
serde_json::to_string(&additional_information).unwrap(),
|
||||
|
||||
"Ignoring Test Case"
|
||||
);
|
||||
test.reporter
|
||||
.report_test_ignored_event(
|
||||
reason.to_string(),
|
||||
additional_information
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.into(), v))
|
||||
.collect::<IndexMap<_, _>>(),
|
||||
)
|
||||
.expect("Can't fail");
|
||||
None
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async fn start_driver_task<'a, L, F>(
|
||||
context: &ExecutionContext,
|
||||
tests: impl Stream<Item = Test<'a, L, F>>,
|
||||
) -> anyhow::Result<impl Future<Output = ()>>
|
||||
where
|
||||
L: Platform,
|
||||
F: Platform,
|
||||
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
||||
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
||||
L::Compiler: 'a,
|
||||
F::Compiler: 'a,
|
||||
{
|
||||
info!("Starting driver task");
|
||||
|
||||
let cached_compiler = Arc::new(
|
||||
CachedCompiler::new(
|
||||
context
|
||||
.working_directory
|
||||
.as_path()
|
||||
.join("compilation_cache"),
|
||||
context
|
||||
.compilation_configuration
|
||||
.invalidate_compilation_cache,
|
||||
)
|
||||
.await
|
||||
.context("Failed to initialize cached compiler")?,
|
||||
);
|
||||
|
||||
Ok(tests.for_each_concurrent(
|
||||
context.concurrency_configuration.concurrency_limit(),
|
||||
move |test| {
|
||||
let cached_compiler = cached_compiler.clone();
|
||||
|
||||
async move {
|
||||
test.reporter
|
||||
.report_leader_node_assigned_event(
|
||||
test.leader_node.id(),
|
||||
*L::config_id(),
|
||||
test.leader_node.connection_string(),
|
||||
)
|
||||
.expect("Can't fail");
|
||||
test.reporter
|
||||
.report_follower_node_assigned_event(
|
||||
test.follower_node.id(),
|
||||
*F::config_id(),
|
||||
test.follower_node.connection_string(),
|
||||
)
|
||||
.expect("Can't fail");
|
||||
|
||||
let reporter = test.reporter.clone();
|
||||
let result = handle_case_driver::<L, F>(test, cached_compiler).await;
|
||||
|
||||
match result {
|
||||
Ok(steps_executed) => reporter
|
||||
.report_test_succeeded_event(steps_executed)
|
||||
.expect("Can't fail"),
|
||||
Err(error) => reporter
|
||||
.report_test_failed_event(format!("{error:#}"))
|
||||
.expect("Can't fail"),
|
||||
}
|
||||
}
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
#[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
|
||||
async fn start_cli_reporting_task(reporter: Reporter) {
|
||||
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
|
||||
drop(reporter);
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
const GREEN: &str = "\x1B[32m";
|
||||
const RED: &str = "\x1B[31m";
|
||||
const GREY: &str = "\x1B[90m";
|
||||
const COLOR_RESET: &str = "\x1B[0m";
|
||||
const BOLD: &str = "\x1B[1m";
|
||||
const BOLD_RESET: &str = "\x1B[22m";
|
||||
|
||||
let mut number_of_successes = 0;
|
||||
let mut number_of_failures = 0;
|
||||
|
||||
let mut buf = BufWriter::new(stderr());
|
||||
while let Ok(event) = aggregator_events_rx.recv().await {
|
||||
let ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
||||
metadata_file_path,
|
||||
mode,
|
||||
case_status,
|
||||
} = event
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
|
||||
for (case_idx, case_status) in case_status.into_iter() {
|
||||
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
|
||||
let _ = match case_status {
|
||||
TestCaseStatus::Succeeded { steps_executed } => {
|
||||
number_of_successes += 1;
|
||||
writeln!(
|
||||
buf,
|
||||
"{}{}Case Succeeded{} - Steps Executed: {}{}",
|
||||
GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET
|
||||
)
|
||||
}
|
||||
TestCaseStatus::Failed { reason } => {
|
||||
number_of_failures += 1;
|
||||
writeln!(
|
||||
buf,
|
||||
"{}{}Case Failed{} - Reason: {}{}",
|
||||
RED,
|
||||
BOLD,
|
||||
BOLD_RESET,
|
||||
reason.trim(),
|
||||
COLOR_RESET,
|
||||
)
|
||||
}
|
||||
TestCaseStatus::Ignored { reason, .. } => writeln!(
|
||||
buf,
|
||||
"{}{}Case Ignored{} - Reason: {}{}",
|
||||
GREY,
|
||||
BOLD,
|
||||
BOLD_RESET,
|
||||
reason.trim(),
|
||||
COLOR_RESET,
|
||||
),
|
||||
};
|
||||
}
|
||||
let _ = writeln!(buf);
|
||||
}
|
||||
|
||||
// Summary at the end.
|
||||
let _ = writeln!(
|
||||
buf,
|
||||
"{} cases: {}{}{} cases succeeded, {}{}{} cases failed in {} seconds",
|
||||
number_of_successes + number_of_failures,
|
||||
GREEN,
|
||||
number_of_successes,
|
||||
COLOR_RESET,
|
||||
RED,
|
||||
number_of_failures,
|
||||
COLOR_RESET,
|
||||
start.elapsed().as_secs()
|
||||
);
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(
|
||||
level = "info",
|
||||
name = "Handling Case"
|
||||
skip_all,
|
||||
fields(
|
||||
metadata_file_path = %test.metadata.relative_path().display(),
|
||||
mode = %test.mode,
|
||||
case_idx = %test.case_idx,
|
||||
case_name = test.case.name.as_deref().unwrap_or("Unnamed Case"),
|
||||
leader_node = test.leader_node.id(),
|
||||
follower_node = test.follower_node.id(),
|
||||
)
|
||||
)]
|
||||
async fn handle_case_driver<'a, L, F>(
|
||||
test: Test<'a, L, F>,
|
||||
cached_compiler: Arc<CachedCompiler<'a>>,
|
||||
) -> anyhow::Result<usize>
|
||||
where
|
||||
L: Platform,
|
||||
F: Platform,
|
||||
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
||||
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
||||
L::Compiler: 'a,
|
||||
F::Compiler: 'a,
|
||||
{
|
||||
let leader_reporter = test
|
||||
.reporter
|
||||
.execution_specific_reporter(test.leader_node.id(), NodeDesignation::Leader);
|
||||
let follower_reporter = test
|
||||
.reporter
|
||||
.execution_specific_reporter(test.follower_node.id(), NodeDesignation::Follower);
|
||||
|
||||
let (
|
||||
CompilerOutput {
|
||||
contracts: leader_pre_link_contracts,
|
||||
},
|
||||
CompilerOutput {
|
||||
contracts: follower_pre_link_contracts,
|
||||
},
|
||||
) = try_join!(
|
||||
cached_compiler.compile_contracts::<L>(
|
||||
test.metadata,
|
||||
test.metadata_file_path,
|
||||
test.mode.clone(),
|
||||
None,
|
||||
&test.leader_compiler,
|
||||
&leader_reporter,
|
||||
),
|
||||
cached_compiler.compile_contracts::<F>(
|
||||
test.metadata,
|
||||
test.metadata_file_path,
|
||||
test.mode.clone(),
|
||||
None,
|
||||
&test.follower_compiler,
|
||||
&follower_reporter
|
||||
)
|
||||
)
|
||||
.context("Failed to compile pre-link contracts for leader/follower in parallel")?;
|
||||
|
||||
let mut leader_deployed_libraries = None::<HashMap<_, _>>;
|
||||
let mut follower_deployed_libraries = None::<HashMap<_, _>>;
|
||||
let mut contract_sources = test
|
||||
.metadata
|
||||
.contract_sources()
|
||||
.context("Failed to retrieve contract sources from metadata")?;
|
||||
for library_instance in test
|
||||
.metadata
|
||||
.libraries
|
||||
.iter()
|
||||
.flatten()
|
||||
.flat_map(|(_, map)| map.values())
|
||||
{
|
||||
debug!(%library_instance, "Deploying Library Instance");
|
||||
|
||||
let ContractPathAndIdent {
|
||||
contract_source_path: library_source_path,
|
||||
contract_ident: library_ident,
|
||||
} = contract_sources
|
||||
.remove(library_instance)
|
||||
.context("Failed to find the contract source")?;
|
||||
|
||||
let (leader_code, leader_abi) = leader_pre_link_contracts
|
||||
.get(&library_source_path)
|
||||
.and_then(|contracts| contracts.get(library_ident.as_str()))
|
||||
.context("Declared library was not compiled")?;
|
||||
let (follower_code, follower_abi) = follower_pre_link_contracts
|
||||
.get(&library_source_path)
|
||||
.and_then(|contracts| contracts.get(library_ident.as_str()))
|
||||
.context("Declared library was not compiled")?;
|
||||
|
||||
let leader_code = match alloy::hex::decode(leader_code) {
|
||||
Ok(code) => code,
|
||||
Err(error) => {
|
||||
anyhow::bail!("Failed to hex-decode the byte code {}", error)
|
||||
}
|
||||
};
|
||||
let follower_code = match alloy::hex::decode(follower_code) {
|
||||
Ok(code) => code,
|
||||
Err(error) => {
|
||||
anyhow::bail!("Failed to hex-decode the byte code {}", error)
|
||||
}
|
||||
};
|
||||
|
||||
// Getting the deployer address from the cases themselves. This is to ensure that we're
|
||||
// doing the deployments from different accounts and therefore we're not slowed down by
|
||||
// the nonce.
|
||||
let deployer_address = test
|
||||
.case
|
||||
.steps
|
||||
.iter()
|
||||
.filter_map(|step| match step {
|
||||
Step::FunctionCall(input) => Some(input.caller),
|
||||
Step::BalanceAssertion(..) => None,
|
||||
Step::StorageEmptyAssertion(..) => None,
|
||||
})
|
||||
.next()
|
||||
.unwrap_or(Input::default_caller());
|
||||
let leader_tx = TransactionBuilder::<Ethereum>::with_deploy_code(
|
||||
TransactionRequest::default().from(deployer_address),
|
||||
leader_code,
|
||||
);
|
||||
let follower_tx = TransactionBuilder::<Ethereum>::with_deploy_code(
|
||||
TransactionRequest::default().from(deployer_address),
|
||||
follower_code,
|
||||
);
|
||||
|
||||
let (leader_receipt, follower_receipt) = try_join!(
|
||||
test.leader_node.execute_transaction(leader_tx),
|
||||
test.follower_node.execute_transaction(follower_tx)
|
||||
)?;
|
||||
|
||||
debug!(
|
||||
?library_instance,
|
||||
library_address = ?leader_receipt.contract_address,
|
||||
"Deployed library to leader"
|
||||
);
|
||||
debug!(
|
||||
?library_instance,
|
||||
library_address = ?follower_receipt.contract_address,
|
||||
"Deployed library to follower"
|
||||
);
|
||||
|
||||
let leader_library_address = leader_receipt
|
||||
.contract_address
|
||||
.context("Contract deployment didn't return an address")?;
|
||||
let follower_library_address = follower_receipt
|
||||
.contract_address
|
||||
.context("Contract deployment didn't return an address")?;
|
||||
|
||||
leader_deployed_libraries.get_or_insert_default().insert(
|
||||
library_instance.clone(),
|
||||
(
|
||||
library_ident.clone(),
|
||||
leader_library_address,
|
||||
leader_abi.clone(),
|
||||
),
|
||||
);
|
||||
follower_deployed_libraries.get_or_insert_default().insert(
|
||||
library_instance.clone(),
|
||||
(
|
||||
library_ident,
|
||||
follower_library_address,
|
||||
follower_abi.clone(),
|
||||
),
|
||||
);
|
||||
}
|
||||
if let Some(ref leader_deployed_libraries) = leader_deployed_libraries {
|
||||
leader_reporter.report_libraries_deployed_event(
|
||||
leader_deployed_libraries
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|(key, (_, address, _))| (key, address))
|
||||
.collect::<BTreeMap<_, _>>(),
|
||||
)?;
|
||||
}
|
||||
if let Some(ref follower_deployed_libraries) = follower_deployed_libraries {
|
||||
follower_reporter.report_libraries_deployed_event(
|
||||
follower_deployed_libraries
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|(key, (_, address, _))| (key, address))
|
||||
.collect::<BTreeMap<_, _>>(),
|
||||
)?;
|
||||
}
|
||||
|
||||
let (
|
||||
CompilerOutput {
|
||||
contracts: leader_post_link_contracts,
|
||||
},
|
||||
CompilerOutput {
|
||||
contracts: follower_post_link_contracts,
|
||||
},
|
||||
) = try_join!(
|
||||
cached_compiler.compile_contracts::<L>(
|
||||
test.metadata,
|
||||
test.metadata_file_path,
|
||||
test.mode.clone(),
|
||||
leader_deployed_libraries.as_ref(),
|
||||
&test.leader_compiler,
|
||||
&leader_reporter,
|
||||
),
|
||||
cached_compiler.compile_contracts::<F>(
|
||||
test.metadata,
|
||||
test.metadata_file_path,
|
||||
test.mode.clone(),
|
||||
follower_deployed_libraries.as_ref(),
|
||||
&test.follower_compiler,
|
||||
&follower_reporter
|
||||
)
|
||||
)
|
||||
.context("Failed to compile post-link contracts for leader/follower in parallel")?;
|
||||
|
||||
let leader_state = CaseState::<L>::new(
|
||||
test.leader_compiler.version().clone(),
|
||||
leader_post_link_contracts,
|
||||
leader_deployed_libraries.unwrap_or_default(),
|
||||
leader_reporter,
|
||||
);
|
||||
let follower_state = CaseState::<F>::new(
|
||||
test.follower_compiler.version().clone(),
|
||||
follower_post_link_contracts,
|
||||
follower_deployed_libraries.unwrap_or_default(),
|
||||
follower_reporter,
|
||||
);
|
||||
|
||||
let mut driver = CaseDriver::<L, F>::new(
|
||||
test.metadata,
|
||||
test.case,
|
||||
test.leader_node,
|
||||
test.follower_node,
|
||||
leader_state,
|
||||
follower_state,
|
||||
);
|
||||
driver
|
||||
.execute()
|
||||
.await
|
||||
.inspect(|steps_executed| info!(steps_executed, "Case succeeded"))
|
||||
}
|
||||
|
||||
async fn execute_corpus(
|
||||
context: ExecutionContext,
|
||||
tests: &[MetadataFile],
|
||||
reporter: Reporter,
|
||||
report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
|
||||
) -> anyhow::Result<()> {
|
||||
match (&context.leader, &context.follower) {
|
||||
(TestingPlatform::Geth, TestingPlatform::Kitchensink) => {
|
||||
run_driver::<Geth, Kitchensink>(context, tests, reporter, report_aggregator_task)
|
||||
.await?
|
||||
}
|
||||
(TestingPlatform::Geth, TestingPlatform::Geth) => {
|
||||
run_driver::<Geth, Geth>(context, tests, reporter, report_aggregator_task).await?
|
||||
}
|
||||
_ => unimplemented!(),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// this represents a single "test"; a mode, path and collection of cases.
|
||||
#[derive(Clone)]
|
||||
struct Test<'a, L: Platform, F: Platform> {
|
||||
metadata: &'a MetadataFile,
|
||||
metadata_file_path: &'a Path,
|
||||
mode: Cow<'a, Mode>,
|
||||
case_idx: CaseIdx,
|
||||
case: &'a Case,
|
||||
leader_node: &'a <L as Platform>::Blockchain,
|
||||
follower_node: &'a <F as Platform>::Blockchain,
|
||||
leader_compiler: L::Compiler,
|
||||
follower_compiler: F::Compiler,
|
||||
reporter: TestSpecificReporter,
|
||||
}
|
||||
|
||||
impl<'a, L: Platform, F: Platform> Test<'a, L, F> {
|
||||
/// Checks if this test can be ran with the current configuration.
|
||||
pub fn check_compatibility(&self) -> TestCheckFunctionResult {
|
||||
self.check_metadata_file_ignored()?;
|
||||
self.check_case_file_ignored()?;
|
||||
self.check_target_compatibility()?;
|
||||
self.check_evm_version_compatibility()?;
|
||||
self.check_compiler_compatibility()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks if the metadata file is ignored or not.
|
||||
fn check_metadata_file_ignored(&self) -> TestCheckFunctionResult {
|
||||
if self.metadata.ignore.is_some_and(|ignore| ignore) {
|
||||
Err(("Metadata file is ignored.", indexmap! {}))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the case file is ignored or not.
|
||||
fn check_case_file_ignored(&self) -> TestCheckFunctionResult {
|
||||
if self.case.ignore.is_some_and(|ignore| ignore) {
|
||||
Err(("Case is ignored.", indexmap! {}))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the leader and the follower both support the desired targets in the metadata file.
|
||||
fn check_target_compatibility(&self) -> TestCheckFunctionResult {
|
||||
let leader_support =
|
||||
<L::Blockchain as Node>::matches_target(self.metadata.targets.as_deref());
|
||||
let follower_support =
|
||||
<F::Blockchain as Node>::matches_target(self.metadata.targets.as_deref());
|
||||
let is_allowed = leader_support && follower_support;
|
||||
|
||||
if is_allowed {
|
||||
Ok(())
|
||||
} else {
|
||||
Err((
|
||||
"Either the leader or the follower do not support the target desired by the test.",
|
||||
indexmap! {
|
||||
"test_desired_targets" => json!(self.metadata.targets.as_ref()),
|
||||
"leader_support" => json!(leader_support),
|
||||
"follower_support" => json!(follower_support),
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// Checks for the compatibility of the EVM version with the leader and follower nodes.
|
||||
fn check_evm_version_compatibility(&self) -> TestCheckFunctionResult {
|
||||
let Some(evm_version_requirement) = self.metadata.required_evm_version else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let leader_support = evm_version_requirement
|
||||
.matches(&<L::Blockchain as revive_dt_node::Node>::evm_version());
|
||||
let follower_support = evm_version_requirement
|
||||
.matches(&<F::Blockchain as revive_dt_node::Node>::evm_version());
|
||||
let is_allowed = leader_support && follower_support;
|
||||
|
||||
if is_allowed {
|
||||
Ok(())
|
||||
} else {
|
||||
Err((
|
||||
"EVM version is incompatible with either the leader or the follower.",
|
||||
indexmap! {
|
||||
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
||||
"leader_support" => json!(leader_support),
|
||||
"follower_support" => json!(follower_support),
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the leader and follower compilers support the mode that the test is for.
|
||||
fn check_compiler_compatibility(&self) -> TestCheckFunctionResult {
|
||||
let leader_support = self
|
||||
.leader_compiler
|
||||
.supports_mode(self.mode.optimize_setting, self.mode.pipeline);
|
||||
let follower_support = self
|
||||
.follower_compiler
|
||||
.supports_mode(self.mode.optimize_setting, self.mode.pipeline);
|
||||
let is_allowed = leader_support && follower_support;
|
||||
|
||||
if is_allowed {
|
||||
Ok(())
|
||||
} else {
|
||||
Err((
|
||||
"Compilers do not support this mode either for the leader or for the follower.",
|
||||
indexmap! {
|
||||
"mode" => json!(self.mode),
|
||||
"leader_support" => json!(leader_support),
|
||||
"follower_support" => json!(follower_support),
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type TestCheckFunctionResult = Result<(), (&'static str, IndexMap<&'static str, Value>)>;
|
||||
|
||||
@@ -14,11 +14,8 @@ revive-dt-common = { workspace = true }
|
||||
revive-common = { workspace = true }
|
||||
|
||||
alloy = { workspace = true }
|
||||
alloy-primitives = { workspace = true }
|
||||
alloy-sol-types = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
regex = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
schemars = { workspace = true }
|
||||
semver = { workspace = true }
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use revive_dt_common::{macros::define_wrapper_type, types::Mode};
|
||||
|
||||
use crate::{
|
||||
input::{Expected, Step},
|
||||
mode::ParsedMode,
|
||||
use revive_dt_common::{
|
||||
macros::define_wrapper_type,
|
||||
types::{Mode, ParsedMode},
|
||||
};
|
||||
|
||||
use crate::steps::*;
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)]
|
||||
pub struct Case {
|
||||
/// An optional name of the test case.
|
||||
@@ -55,7 +55,6 @@ pub struct Case {
|
||||
}
|
||||
|
||||
impl Case {
|
||||
#[allow(irrefutable_let_patterns)]
|
||||
pub fn steps_iterator(&self) -> impl Iterator<Item = Step> {
|
||||
let steps_len = self.steps.len();
|
||||
self.steps
|
||||
@@ -84,6 +83,24 @@ impl Case {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn steps_iterator_for_benchmarks(
|
||||
&self,
|
||||
default_repeat_count: usize,
|
||||
) -> Box<dyn Iterator<Item = Step> + '_> {
|
||||
let contains_repeat = self
|
||||
.steps_iterator()
|
||||
.any(|step| matches!(&step, Step::Repeat(..)));
|
||||
if contains_repeat {
|
||||
Box::new(self.steps_iterator()) as Box<_>
|
||||
} else {
|
||||
Box::new(std::iter::once(Step::Repeat(Box::new(RepeatStep {
|
||||
comment: None,
|
||||
repeat: default_repeat_count,
|
||||
steps: self.steps_iterator().collect(),
|
||||
})))) as Box<_>
|
||||
}
|
||||
}
|
||||
|
||||
pub fn solc_modes(&self) -> Vec<Mode> {
|
||||
match &self.modes {
|
||||
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
pub mod case;
|
||||
pub mod corpus;
|
||||
pub mod input;
|
||||
pub mod metadata;
|
||||
pub mod mode;
|
||||
pub mod steps;
|
||||
pub mod traits;
|
||||
|
||||
@@ -13,12 +13,14 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
use revive_common::EVMVersion;
|
||||
use revive_dt_common::{
|
||||
cached_fs::read_to_string, iterators::FilesWithExtensionIterator, macros::define_wrapper_type,
|
||||
types::Mode,
|
||||
cached_fs::read_to_string,
|
||||
iterators::FilesWithExtensionIterator,
|
||||
macros::define_wrapper_type,
|
||||
types::{Mode, ParsedMode, VmIdentifier},
|
||||
};
|
||||
use tracing::error;
|
||||
|
||||
use crate::{case::Case, mode::ParsedMode};
|
||||
use crate::case::Case;
|
||||
|
||||
pub const METADATA_FILE_EXTENSION: &str = "json";
|
||||
pub const SOLIDITY_CASE_FILE_EXTENSION: &str = "sol";
|
||||
@@ -81,7 +83,7 @@ pub struct Metadata {
|
||||
/// example, if we wish for the metadata file's cases to only be run on PolkaVM then we'd
|
||||
/// specify a target of "PolkaVM" in here.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub targets: Option<Vec<String>>,
|
||||
pub targets: Option<Vec<VmIdentifier>>,
|
||||
|
||||
/// A vector of the test cases and workloads contained within the metadata file. This is their
|
||||
/// primary description.
|
||||
|
||||
@@ -1,257 +0,0 @@
|
||||
use anyhow::Context as _;
|
||||
use regex::Regex;
|
||||
use revive_dt_common::iterators::EitherIter;
|
||||
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashSet;
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
/// This represents a mode that has been parsed from test metadata.
|
||||
///
|
||||
/// Mode strings can take the following form (in pseudo-regex):
|
||||
///
|
||||
/// ```text
|
||||
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
||||
/// ```
|
||||
///
|
||||
/// We can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)]
|
||||
#[serde(try_from = "String", into = "String")]
|
||||
pub struct ParsedMode {
|
||||
pub pipeline: Option<ModePipeline>,
|
||||
pub optimize_flag: Option<bool>,
|
||||
pub optimize_setting: Option<ModeOptimizerSetting>,
|
||||
pub version: Option<semver::VersionReq>,
|
||||
}
|
||||
|
||||
impl FromStr for ParsedMode {
|
||||
type Err = anyhow::Error;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
static REGEX: LazyLock<Regex> = LazyLock::new(|| {
|
||||
Regex::new(r"(?x)
|
||||
^
|
||||
(?:(?P<pipeline>[YEILV])(?P<optimize_flag>[+-])?)? # Pipeline to use eg Y, E+, E-
|
||||
\s*
|
||||
(?P<optimize_setting>M[a-zA-Z0-9])? # Optimize setting eg M0, Ms, Mz
|
||||
\s*
|
||||
(?P<version>[>=<]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8
|
||||
$
|
||||
").unwrap()
|
||||
});
|
||||
|
||||
let Some(caps) = REGEX.captures(s) else {
|
||||
anyhow::bail!("Cannot parse mode '{s}' from string");
|
||||
};
|
||||
|
||||
let pipeline = match caps.name("pipeline") {
|
||||
Some(m) => Some(
|
||||
ModePipeline::from_str(m.as_str())
|
||||
.context("Failed to parse mode pipeline from string")?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
|
||||
|
||||
let optimize_setting = match caps.name("optimize_setting") {
|
||||
Some(m) => Some(
|
||||
ModeOptimizerSetting::from_str(m.as_str())
|
||||
.context("Failed to parse optimizer setting from string")?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let version = match caps.name("version") {
|
||||
Some(m) => Some(
|
||||
semver::VersionReq::parse(m.as_str())
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"Cannot parse the version requirement '{}': {e}",
|
||||
m.as_str()
|
||||
)
|
||||
})
|
||||
.context("Failed to parse semver requirement from mode string")?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
|
||||
Ok(ParsedMode {
|
||||
pipeline,
|
||||
optimize_flag,
|
||||
optimize_setting,
|
||||
version,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ParsedMode {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let mut has_written = false;
|
||||
|
||||
if let Some(pipeline) = self.pipeline {
|
||||
pipeline.fmt(f)?;
|
||||
if let Some(optimize_flag) = self.optimize_flag {
|
||||
f.write_str(if optimize_flag { "+" } else { "-" })?;
|
||||
}
|
||||
has_written = true;
|
||||
}
|
||||
|
||||
if let Some(optimize_setting) = self.optimize_setting {
|
||||
if has_written {
|
||||
f.write_str(" ")?;
|
||||
}
|
||||
optimize_setting.fmt(f)?;
|
||||
has_written = true;
|
||||
}
|
||||
|
||||
if let Some(version) = &self.version {
|
||||
if has_written {
|
||||
f.write_str(" ")?;
|
||||
}
|
||||
version.fmt(f)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ParsedMode> for String {
|
||||
fn from(parsed_mode: ParsedMode) -> Self {
|
||||
parsed_mode.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for ParsedMode {
|
||||
type Error = anyhow::Error;
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
ParsedMode::from_str(&value)
|
||||
}
|
||||
}
|
||||
|
||||
impl ParsedMode {
|
||||
/// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try.
|
||||
pub fn to_modes(&self) -> impl Iterator<Item = Mode> {
|
||||
let pipeline_iter = self.pipeline.as_ref().map_or_else(
|
||||
|| EitherIter::A(ModePipeline::test_cases()),
|
||||
|p| EitherIter::B(std::iter::once(*p)),
|
||||
);
|
||||
|
||||
let optimize_flag_setting = self.optimize_flag.map(|flag| {
|
||||
if flag {
|
||||
ModeOptimizerSetting::M3
|
||||
} else {
|
||||
ModeOptimizerSetting::M0
|
||||
}
|
||||
});
|
||||
|
||||
let optimize_flag_iter = match optimize_flag_setting {
|
||||
Some(setting) => EitherIter::A(std::iter::once(setting)),
|
||||
None => EitherIter::B(ModeOptimizerSetting::test_cases()),
|
||||
};
|
||||
|
||||
let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else(
|
||||
|| EitherIter::A(optimize_flag_iter),
|
||||
|s| EitherIter::B(std::iter::once(*s)),
|
||||
);
|
||||
|
||||
pipeline_iter.flat_map(move |pipeline| {
|
||||
optimize_settings_iter
|
||||
.clone()
|
||||
.map(move |optimize_setting| Mode {
|
||||
pipeline,
|
||||
optimize_setting,
|
||||
version: self.version.clone(),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s.
|
||||
/// This avoids any duplicate entries.
|
||||
pub fn many_to_modes<'a>(
|
||||
parsed: impl Iterator<Item = &'a ParsedMode>,
|
||||
) -> impl Iterator<Item = Mode> {
|
||||
let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect();
|
||||
modes.into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parsed_mode_from_str() {
|
||||
let strings = vec![
|
||||
("Mz", "Mz"),
|
||||
("Y", "Y"),
|
||||
("Y+", "Y+"),
|
||||
("Y-", "Y-"),
|
||||
("E", "E"),
|
||||
("E+", "E+"),
|
||||
("E-", "E-"),
|
||||
("Y M0", "Y M0"),
|
||||
("Y M1", "Y M1"),
|
||||
("Y M2", "Y M2"),
|
||||
("Y M3", "Y M3"),
|
||||
("Y Ms", "Y Ms"),
|
||||
("Y Mz", "Y Mz"),
|
||||
("E M0", "E M0"),
|
||||
("E M1", "E M1"),
|
||||
("E M2", "E M2"),
|
||||
("E M3", "E M3"),
|
||||
("E Ms", "E Ms"),
|
||||
("E Mz", "E Mz"),
|
||||
// When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning)
|
||||
("Y 0.8.0", "Y ^0.8.0"),
|
||||
("E+ 0.8.0", "E+ ^0.8.0"),
|
||||
("Y M3 >=0.8.0", "Y M3 >=0.8.0"),
|
||||
("E Mz <0.7.0", "E Mz <0.7.0"),
|
||||
// We can parse +- _and_ M1/M2 but the latter takes priority.
|
||||
("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"),
|
||||
("E- M2 0.7.0", "E- M2 ^0.7.0"),
|
||||
// We don't see this in the wild but it is parsed.
|
||||
("<=0.8", "<=0.8"),
|
||||
];
|
||||
|
||||
for (actual, expected) in strings {
|
||||
let parsed = ParsedMode::from_str(actual)
|
||||
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||
assert_eq!(
|
||||
expected,
|
||||
parsed.to_string(),
|
||||
"Mode string '{actual}' did not parse to '{expected}': got '{parsed}'"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parsed_mode_to_test_modes() {
|
||||
let strings = vec![
|
||||
("Mz", vec!["Y Mz", "E Mz"]),
|
||||
("Y", vec!["Y M0", "Y M3"]),
|
||||
("E", vec!["E M0", "E M3"]),
|
||||
("Y+", vec!["Y M3"]),
|
||||
("Y-", vec!["Y M0"]),
|
||||
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
|
||||
(
|
||||
"<=0.8",
|
||||
vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"],
|
||||
),
|
||||
];
|
||||
|
||||
for (actual, expected) in strings {
|
||||
let parsed = ParsedMode::from_str(actual)
|
||||
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||
let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect();
|
||||
let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect();
|
||||
|
||||
assert_eq!(
|
||||
expected_set, actual_set,
|
||||
"Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
use std::collections::HashMap;
|
||||
use std::{collections::HashMap, fmt::Display, str::FromStr};
|
||||
|
||||
use alloy::primitives::{FixedBytes, utils::parse_units};
|
||||
use alloy::{
|
||||
eips::BlockNumberOrTag,
|
||||
json_abi::Function,
|
||||
@@ -7,7 +8,6 @@ use alloy::{
|
||||
primitives::{Address, Bytes, U256},
|
||||
rpc::types::TransactionRequest,
|
||||
};
|
||||
use alloy_primitives::{FixedBytes, utils::parse_units};
|
||||
use anyhow::Context as _;
|
||||
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, stream};
|
||||
use schemars::JsonSchema;
|
||||
@@ -28,33 +28,104 @@ use crate::{metadata::ContractInstance, traits::ResolutionContext};
|
||||
#[serde(untagged)]
|
||||
pub enum Step {
|
||||
/// A function call or an invocation to some function on some smart contract.
|
||||
FunctionCall(Box<Input>),
|
||||
FunctionCall(Box<FunctionCallStep>),
|
||||
|
||||
/// A step for performing a balance assertion on some account or contract.
|
||||
BalanceAssertion(Box<BalanceAssertion>),
|
||||
BalanceAssertion(Box<BalanceAssertionStep>),
|
||||
|
||||
/// A step for asserting that the storage of some contract or account is empty.
|
||||
StorageEmptyAssertion(Box<StorageEmptyAssertion>),
|
||||
StorageEmptyAssertion(Box<StorageEmptyAssertionStep>),
|
||||
|
||||
/// A special step for repeating a bunch of steps a certain number of times.
|
||||
Repeat(Box<RepeatStep>),
|
||||
|
||||
/// A step type that allows for a new account address to be allocated and to later on be used
|
||||
/// as the caller in another step.
|
||||
AllocateAccount(Box<AllocateAccountStep>),
|
||||
}
|
||||
|
||||
define_wrapper_type!(
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct StepIdx(usize) impl Display;
|
||||
pub struct StepIdx(usize) impl Display, FromStr;
|
||||
);
|
||||
|
||||
define_wrapper_type!(
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
#[serde(try_from = "String", into = "String")]
|
||||
pub struct StepPath(Vec<StepIdx>);
|
||||
);
|
||||
|
||||
impl StepPath {
|
||||
pub fn from_iterator(path: impl IntoIterator<Item = impl Into<StepIdx>>) -> Self {
|
||||
Self(path.into_iter().map(|value| value.into()).collect())
|
||||
}
|
||||
|
||||
pub fn increment(&self) -> Self {
|
||||
let mut this = self.clone();
|
||||
if let Some(last) = this.last_mut() {
|
||||
last.0 += 1
|
||||
}
|
||||
this
|
||||
}
|
||||
|
||||
pub fn append(&self, step_idx: impl Into<StepIdx>) -> Self {
|
||||
let mut this = self.clone();
|
||||
this.0.push(step_idx.into());
|
||||
this
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for StepPath {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.0
|
||||
.iter()
|
||||
.map(|idx| idx.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(".")
|
||||
.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for StepPath {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
s.split(".")
|
||||
.map(StepIdx::from_str)
|
||||
.collect::<anyhow::Result<Vec<_>>>()
|
||||
.map(Self)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StepPath> for String {
|
||||
fn from(value: StepPath) -> Self {
|
||||
value.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for StepPath {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
value.parse()
|
||||
}
|
||||
}
|
||||
|
||||
/// This is an input step which is a transaction description that the framework translates into a
|
||||
/// transaction and executes on the nodes.
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
|
||||
pub struct Input {
|
||||
pub struct FunctionCallStep {
|
||||
/// The address of the account performing the call and paying the fees for it.
|
||||
#[serde(default = "Input::default_caller")]
|
||||
#[serde(default = "FunctionCallStep::default_caller")]
|
||||
#[schemars(with = "String")]
|
||||
pub caller: Address,
|
||||
pub caller: StepAddress,
|
||||
|
||||
/// An optional comment on the step which has no impact on the execution in any way.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
|
||||
/// The contract instance that's being called in this transaction step.
|
||||
#[serde(default = "Input::default_instance")]
|
||||
#[serde(default = "FunctionCallStep::default_instance")]
|
||||
pub instance: ContractInstance,
|
||||
|
||||
/// The method that's being called in this step.
|
||||
@@ -84,8 +155,8 @@ pub struct Input {
|
||||
|
||||
/// This represents a balance assertion step where the framework needs to query the balance of some
|
||||
/// account or contract and assert that it's some amount.
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
|
||||
pub struct BalanceAssertion {
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
|
||||
pub struct BalanceAssertionStep {
|
||||
/// An optional comment on the balance assertion.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
@@ -96,7 +167,7 @@ pub struct BalanceAssertion {
|
||||
/// this could be a normal hex address, a variable such as `Test.address`, or perhaps even a
|
||||
/// full on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are
|
||||
/// followed in the calldata.
|
||||
pub address: String,
|
||||
pub address: StepAddress,
|
||||
|
||||
/// The amount of balance to assert that the account or contract has. This is a 256 bit string
|
||||
/// that's serialized and deserialized into a decimal string.
|
||||
@@ -104,8 +175,10 @@ pub struct BalanceAssertion {
|
||||
pub expected_balance: U256,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
|
||||
pub struct StorageEmptyAssertion {
|
||||
/// This represents an assertion for the storage of some contract or account and whether it's empty
|
||||
/// or not.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
|
||||
pub struct StorageEmptyAssertionStep {
|
||||
/// An optional comment on the storage empty assertion.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
@@ -116,12 +189,40 @@ pub struct StorageEmptyAssertion {
|
||||
/// this could be a normal hex address, a variable such as `Test.address`, or perhaps even a
|
||||
/// full on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are
|
||||
/// followed in the calldata.
|
||||
pub address: String,
|
||||
pub address: StepAddress,
|
||||
|
||||
/// A boolean of whether the storage of the address is empty or not.
|
||||
pub is_storage_empty: bool,
|
||||
}
|
||||
|
||||
/// This represents a repetition step which is a special step type that allows for a sequence of
|
||||
/// steps to be repeated (on different drivers) a certain number of times.
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
|
||||
pub struct RepeatStep {
|
||||
/// An optional comment on the repetition step.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
|
||||
/// The number of repetitions that the steps should be repeated for.
|
||||
pub repeat: usize,
|
||||
|
||||
/// The sequence of steps to repeat for the above defined number of repetitions.
|
||||
pub steps: Vec<Step>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
|
||||
pub struct AllocateAccountStep {
|
||||
/// An optional comment on the account allocation step.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
|
||||
/// An instruction to allocate a new account with the value being the variable name of that
|
||||
/// account. This must start with `$VARIABLE:` and then be followed by the variable name of the
|
||||
/// account.
|
||||
#[serde(rename = "allocate_account")]
|
||||
pub variable_name: String,
|
||||
}
|
||||
|
||||
/// A set of expectations and assertions to make about the transaction after it ran.
|
||||
///
|
||||
/// If this is not specified then the only assertion that will be ran is that the transaction
|
||||
@@ -162,7 +263,7 @@ pub struct ExpectedOutput {
|
||||
pub struct Event {
|
||||
/// An optional field of the address of the emitter of the event.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub address: Option<String>,
|
||||
pub address: Option<StepAddress>,
|
||||
|
||||
/// The set of topics to expect the event to have.
|
||||
pub topics: Vec<String>,
|
||||
@@ -295,20 +396,81 @@ pub struct VariableAssignments {
|
||||
pub return_data: Vec<String>,
|
||||
}
|
||||
|
||||
impl Input {
|
||||
pub const fn default_caller() -> Address {
|
||||
/// An address type that might either be an address literal or a resolvable address.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
|
||||
#[schemars(with = "String")]
|
||||
#[serde(untagged)]
|
||||
pub enum StepAddress {
|
||||
Address(Address),
|
||||
ResolvableAddress(String),
|
||||
}
|
||||
|
||||
impl Default for StepAddress {
|
||||
fn default() -> Self {
|
||||
Self::Address(Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for StepAddress {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
StepAddress::Address(address) => Display::fmt(address, f),
|
||||
StepAddress::ResolvableAddress(address) => Display::fmt(address, f),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StepAddress {
|
||||
pub fn as_address(&self) -> Option<&Address> {
|
||||
match self {
|
||||
StepAddress::Address(address) => Some(address),
|
||||
StepAddress::ResolvableAddress(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_resolvable_address(&self) -> Option<&str> {
|
||||
match self {
|
||||
StepAddress::ResolvableAddress(address) => Some(address),
|
||||
StepAddress::Address(..) => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn resolve_address(
|
||||
&self,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
context: ResolutionContext<'_>,
|
||||
) -> anyhow::Result<Address> {
|
||||
match self {
|
||||
StepAddress::Address(address) => Ok(*address),
|
||||
StepAddress::ResolvableAddress(address) => Ok(Address::from_slice(
|
||||
Calldata::new_compound([address])
|
||||
.calldata(resolver, context)
|
||||
.await?
|
||||
.get(12..32)
|
||||
.expect("Can't fail"),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FunctionCallStep {
|
||||
pub const fn default_caller_address() -> Address {
|
||||
Address(FixedBytes(alloy::hex!(
|
||||
"0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1"
|
||||
)))
|
||||
}
|
||||
|
||||
pub const fn default_caller() -> StepAddress {
|
||||
StepAddress::Address(Self::default_caller_address())
|
||||
}
|
||||
|
||||
fn default_instance() -> ContractInstance {
|
||||
ContractInstance::new("Test")
|
||||
}
|
||||
|
||||
pub async fn encoded_input(
|
||||
&self,
|
||||
resolver: &impl ResolverApi,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
context: ResolutionContext<'_>,
|
||||
) -> anyhow::Result<Bytes> {
|
||||
match self.method {
|
||||
@@ -375,16 +537,17 @@ impl Input {
|
||||
}
|
||||
|
||||
/// Parse this input into a legacy transaction.
|
||||
pub async fn legacy_transaction(
|
||||
pub async fn as_transaction(
|
||||
&self,
|
||||
resolver: &impl ResolverApi,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
context: ResolutionContext<'_>,
|
||||
) -> anyhow::Result<TransactionRequest> {
|
||||
let input_data = self
|
||||
.encoded_input(resolver, context)
|
||||
.await
|
||||
.context("Failed to encode input bytes for transaction request")?;
|
||||
let transaction_request = TransactionRequest::default().from(self.caller).value(
|
||||
let caller = self.caller.resolve_address(resolver, context).await?;
|
||||
let transaction_request = TransactionRequest::default().from(caller).value(
|
||||
self.value
|
||||
.map(|value| value.into_inner())
|
||||
.unwrap_or_default(),
|
||||
@@ -466,7 +629,7 @@ impl Calldata {
|
||||
|
||||
pub async fn calldata(
|
||||
&self,
|
||||
resolver: &impl ResolverApi,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
context: ResolutionContext<'_>,
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let mut buffer = Vec::<u8>::with_capacity(self.size_requirement());
|
||||
@@ -478,7 +641,7 @@ impl Calldata {
|
||||
pub async fn calldata_into_slice(
|
||||
&self,
|
||||
buffer: &mut Vec<u8>,
|
||||
resolver: &impl ResolverApi,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
context: ResolutionContext<'_>,
|
||||
) -> anyhow::Result<()> {
|
||||
match self {
|
||||
@@ -515,7 +678,7 @@ impl Calldata {
|
||||
pub async fn is_equivalent(
|
||||
&self,
|
||||
other: &[u8],
|
||||
resolver: &impl ResolverApi,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
context: ResolutionContext<'_>,
|
||||
) -> anyhow::Result<bool> {
|
||||
match self {
|
||||
@@ -557,7 +720,7 @@ impl CalldataItem {
|
||||
#[instrument(level = "info", skip_all, err)]
|
||||
async fn resolve(
|
||||
&self,
|
||||
resolver: &impl ResolverApi,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
context: ResolutionContext<'_>,
|
||||
) -> anyhow::Result<U256> {
|
||||
let mut stack = Vec::<CalldataToken<U256>>::new();
|
||||
@@ -662,7 +825,7 @@ impl<T: AsRef<str>> CalldataToken<T> {
|
||||
/// https://github.com/matter-labs/era-compiler-tester/blob/0ed598a27f6eceee7008deab3ff2311075a2ec69/compiler_tester/src/test/case/input/value.rs#L43-L146
|
||||
async fn resolve(
|
||||
self,
|
||||
resolver: &impl ResolverApi,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
context: ResolutionContext<'_>,
|
||||
) -> anyhow::Result<CalldataToken<U256>> {
|
||||
match self {
|
||||
@@ -695,7 +858,7 @@ impl<T: AsRef<str>> CalldataToken<T> {
|
||||
context
|
||||
.transaction_hash()
|
||||
.context("No transaction hash provided to get the transaction gas price")
|
||||
.map(|tx_hash| resolver.transaction_gas_price(tx_hash))?
|
||||
.map(|tx_hash| resolver.transaction_gas_price(*tx_hash))?
|
||||
.await
|
||||
.map(U256::from)
|
||||
} else if item == Self::GAS_LIMIT_VARIABLE {
|
||||
@@ -796,10 +959,10 @@ impl<'de> Deserialize<'de> for EtherValue {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use alloy::primitives::{BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, address};
|
||||
use alloy::sol_types::SolValue;
|
||||
use alloy::{eips::BlockNumberOrTag, json_abi::JsonAbi};
|
||||
use alloy_primitives::{BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, address};
|
||||
use alloy_sol_types::SolValue;
|
||||
use std::collections::HashMap;
|
||||
use std::{collections::HashMap, pin::Pin};
|
||||
|
||||
use super::*;
|
||||
use crate::metadata::ContractIdent;
|
||||
@@ -807,40 +970,63 @@ mod tests {
|
||||
struct MockResolver;
|
||||
|
||||
impl ResolverApi for MockResolver {
|
||||
async fn chain_id(&self) -> anyhow::Result<ChainId> {
|
||||
Ok(0x123)
|
||||
fn chain_id(&self) -> Pin<Box<dyn Future<Output = anyhow::Result<ChainId>> + '_>> {
|
||||
Box::pin(async move { Ok(0x123) })
|
||||
}
|
||||
|
||||
async fn block_gas_limit(&self, _: BlockNumberOrTag) -> anyhow::Result<u128> {
|
||||
Ok(0x1234)
|
||||
fn block_gas_limit(
|
||||
&self,
|
||||
_: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<u128>> + '_>> {
|
||||
Box::pin(async move { Ok(0x1234) })
|
||||
}
|
||||
|
||||
async fn block_coinbase(&self, _: BlockNumberOrTag) -> anyhow::Result<Address> {
|
||||
Ok(Address::ZERO)
|
||||
fn block_coinbase(
|
||||
&self,
|
||||
_: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Address>> + '_>> {
|
||||
Box::pin(async move { Ok(Address::ZERO) })
|
||||
}
|
||||
|
||||
async fn block_difficulty(&self, _: BlockNumberOrTag) -> anyhow::Result<U256> {
|
||||
Ok(U256::from(0x12345u128))
|
||||
fn block_difficulty(
|
||||
&self,
|
||||
_: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
|
||||
Box::pin(async move { Ok(U256::from(0x12345u128)) })
|
||||
}
|
||||
|
||||
async fn block_base_fee(&self, _: BlockNumberOrTag) -> anyhow::Result<u64> {
|
||||
Ok(0x100)
|
||||
fn block_base_fee(
|
||||
&self,
|
||||
_: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<u64>> + '_>> {
|
||||
Box::pin(async move { Ok(0x100) })
|
||||
}
|
||||
|
||||
async fn block_hash(&self, _: BlockNumberOrTag) -> anyhow::Result<BlockHash> {
|
||||
Ok([0xEE; 32].into())
|
||||
fn block_hash(
|
||||
&self,
|
||||
_: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockHash>> + '_>> {
|
||||
Box::pin(async move { Ok([0xEE; 32].into()) })
|
||||
}
|
||||
|
||||
async fn block_timestamp(&self, _: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> {
|
||||
Ok(0x123456)
|
||||
fn block_timestamp(
|
||||
&self,
|
||||
_: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockTimestamp>> + '_>> {
|
||||
Box::pin(async move { Ok(0x123456) })
|
||||
}
|
||||
|
||||
async fn last_block_number(&self) -> anyhow::Result<BlockNumber> {
|
||||
Ok(0x1234567)
|
||||
fn last_block_number(
|
||||
&self,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockNumber>> + '_>> {
|
||||
Box::pin(async move { Ok(0x1234567) })
|
||||
}
|
||||
|
||||
async fn transaction_gas_price(&self, _: &TxHash) -> anyhow::Result<u128> {
|
||||
Ok(0x200)
|
||||
fn transaction_gas_price(
|
||||
&self,
|
||||
_: TxHash,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<u128>> + '_>> {
|
||||
Box::pin(async move { Ok(0x200) })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -867,7 +1053,7 @@ mod tests {
|
||||
.selector()
|
||||
.0;
|
||||
|
||||
let input = Input {
|
||||
let input = FunctionCallStep {
|
||||
instance: ContractInstance::new("Contract"),
|
||||
method: Method::FunctionName("store".to_owned()),
|
||||
calldata: Calldata::new_compound(["42"]),
|
||||
@@ -911,7 +1097,7 @@ mod tests {
|
||||
.selector()
|
||||
.0;
|
||||
|
||||
let input: Input = Input {
|
||||
let input: FunctionCallStep = FunctionCallStep {
|
||||
instance: "Contract".to_owned().into(),
|
||||
method: Method::FunctionName("send(address)".to_owned()),
|
||||
calldata: Calldata::new_compound(["0x1000000000000000000000000000000000000001"]),
|
||||
@@ -929,7 +1115,7 @@ mod tests {
|
||||
let encoded = input.encoded_input(&resolver, context).await.unwrap();
|
||||
assert!(encoded.0.starts_with(&selector));
|
||||
|
||||
type T = (alloy_primitives::Address,);
|
||||
type T = (alloy::primitives::Address,);
|
||||
let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap();
|
||||
assert_eq!(
|
||||
decoded.0,
|
||||
@@ -958,7 +1144,7 @@ mod tests {
|
||||
.selector()
|
||||
.0;
|
||||
|
||||
let input: Input = Input {
|
||||
let input: FunctionCallStep = FunctionCallStep {
|
||||
instance: ContractInstance::new("Contract"),
|
||||
method: Method::FunctionName("send".to_owned()),
|
||||
calldata: Calldata::new_compound(["0x1000000000000000000000000000000000000001"]),
|
||||
@@ -976,7 +1162,7 @@ mod tests {
|
||||
let encoded = input.encoded_input(&resolver, context).await.unwrap();
|
||||
assert!(encoded.0.starts_with(&selector));
|
||||
|
||||
type T = (alloy_primitives::Address,);
|
||||
type T = (alloy::primitives::Address,);
|
||||
let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap();
|
||||
assert_eq!(
|
||||
decoded.0,
|
||||
@@ -987,7 +1173,7 @@ mod tests {
|
||||
async fn resolve_calldata_item(
|
||||
input: &str,
|
||||
deployed_contracts: &HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||
resolver: &impl ResolverApi,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
) -> anyhow::Result<U256> {
|
||||
let context = ResolutionContext::default().with_deployed_contracts(deployed_contracts);
|
||||
CalldataItem::new(input).resolve(resolver, context).await
|
||||
+30
-11
@@ -1,9 +1,10 @@
|
||||
use std::collections::HashMap;
|
||||
use std::pin::Pin;
|
||||
|
||||
use alloy::eips::BlockNumberOrTag;
|
||||
use alloy::json_abi::JsonAbi;
|
||||
use alloy::primitives::TxHash;
|
||||
use alloy::primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, U256};
|
||||
use alloy_primitives::TxHash;
|
||||
use anyhow::Result;
|
||||
|
||||
use crate::metadata::{ContractIdent, ContractInstance};
|
||||
@@ -12,36 +13,54 @@ use crate::metadata::{ContractIdent, ContractInstance};
|
||||
/// crate implements to go from string calldata and into the bytes calldata.
|
||||
pub trait ResolverApi {
|
||||
/// Returns the ID of the chain that the node is on.
|
||||
fn chain_id(&self) -> impl Future<Output = Result<ChainId>>;
|
||||
fn chain_id(&self) -> Pin<Box<dyn Future<Output = Result<ChainId>> + '_>>;
|
||||
|
||||
/// Returns the gas price for the specified transaction.
|
||||
fn transaction_gas_price(&self, tx_hash: &TxHash) -> impl Future<Output = Result<u128>>;
|
||||
fn transaction_gas_price(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
) -> Pin<Box<dyn Future<Output = Result<u128>> + '_>>;
|
||||
|
||||
// TODO: This is currently a u128 due to Kitchensink needing more than 64 bits for its gas limit
|
||||
// TODO: This is currently a u128 due to substrate needing more than 64 bits for its gas limit
|
||||
// when we implement the changes to the gas we need to adjust this to be a u64.
|
||||
/// Returns the gas limit of the specified block.
|
||||
fn block_gas_limit(&self, number: BlockNumberOrTag) -> impl Future<Output = Result<u128>>;
|
||||
fn block_gas_limit(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<u128>> + '_>>;
|
||||
|
||||
/// Returns the coinbase of the specified block.
|
||||
fn block_coinbase(&self, number: BlockNumberOrTag) -> impl Future<Output = Result<Address>>;
|
||||
fn block_coinbase(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<Address>> + '_>>;
|
||||
|
||||
/// Returns the difficulty of the specified block.
|
||||
fn block_difficulty(&self, number: BlockNumberOrTag) -> impl Future<Output = Result<U256>>;
|
||||
fn block_difficulty(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<U256>> + '_>>;
|
||||
|
||||
/// Returns the base fee of the specified block.
|
||||
fn block_base_fee(&self, number: BlockNumberOrTag) -> impl Future<Output = Result<u64>>;
|
||||
fn block_base_fee(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<u64>> + '_>>;
|
||||
|
||||
/// Returns the hash of the specified block.
|
||||
fn block_hash(&self, number: BlockNumberOrTag) -> impl Future<Output = Result<BlockHash>>;
|
||||
fn block_hash(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<BlockHash>> + '_>>;
|
||||
|
||||
/// Returns the timestamp of the specified block,
|
||||
fn block_timestamp(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> impl Future<Output = Result<BlockTimestamp>>;
|
||||
) -> Pin<Box<dyn Future<Output = Result<BlockTimestamp>> + '_>>;
|
||||
|
||||
/// Returns the number of the last block.
|
||||
fn last_block_number(&self) -> impl Future<Output = Result<BlockNumber>>;
|
||||
fn last_block_number(&self) -> Pin<Box<dyn Future<Output = Result<BlockNumber>> + '_>>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
|
||||
@@ -9,8 +9,13 @@ repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
revive-common = { workspace = true }
|
||||
|
||||
revive-dt-format = { workspace = true }
|
||||
|
||||
alloy = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -1,35 +1,95 @@
|
||||
//! This crate implements all node interactions.
|
||||
|
||||
use alloy::primitives::{Address, StorageKey, U256};
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use alloy::primitives::{Address, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256};
|
||||
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace};
|
||||
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest};
|
||||
use anyhow::Result;
|
||||
|
||||
use futures::Stream;
|
||||
use revive_common::EVMVersion;
|
||||
use revive_dt_format::traits::ResolverApi;
|
||||
|
||||
/// An interface for all interactions with Ethereum compatible nodes.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub trait EthereumNode {
|
||||
/// A function to run post spawning the nodes and before any transactions are run on the node.
|
||||
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>>;
|
||||
|
||||
fn id(&self) -> usize;
|
||||
|
||||
/// Returns the nodes connection string.
|
||||
fn connection_string(&self) -> &str;
|
||||
|
||||
fn submit_transaction(
|
||||
&self,
|
||||
transaction: TransactionRequest,
|
||||
) -> Pin<Box<dyn Future<Output = Result<TxHash>> + '_>>;
|
||||
|
||||
fn get_receipt(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
) -> Pin<Box<dyn Future<Output = Result<TransactionReceipt>> + '_>>;
|
||||
|
||||
/// Execute the [TransactionRequest] and return a [TransactionReceipt].
|
||||
fn execute_transaction(
|
||||
&self,
|
||||
transaction: TransactionRequest,
|
||||
) -> impl Future<Output = Result<TransactionReceipt>>;
|
||||
) -> Pin<Box<dyn Future<Output = Result<TransactionReceipt>> + '_>>;
|
||||
|
||||
/// Trace the transaction in the [TransactionReceipt] and return a [GethTrace].
|
||||
fn trace_transaction(
|
||||
&self,
|
||||
receipt: &TransactionReceipt,
|
||||
tx_hash: TxHash,
|
||||
trace_options: GethDebugTracingOptions,
|
||||
) -> impl Future<Output = Result<GethTrace>>;
|
||||
) -> Pin<Box<dyn Future<Output = Result<GethTrace>> + '_>>;
|
||||
|
||||
/// Returns the state diff of the transaction hash in the [TransactionReceipt].
|
||||
fn state_diff(&self, receipt: &TransactionReceipt) -> impl Future<Output = Result<DiffMode>>;
|
||||
fn state_diff(&self, tx_hash: TxHash) -> Pin<Box<dyn Future<Output = Result<DiffMode>> + '_>>;
|
||||
|
||||
/// Returns the balance of the provided [`Address`] back.
|
||||
fn balance_of(&self, address: Address) -> impl Future<Output = Result<U256>>;
|
||||
fn balance_of(&self, address: Address) -> Pin<Box<dyn Future<Output = Result<U256>> + '_>>;
|
||||
|
||||
/// Returns the latest storage proof of the provided [`Address`]
|
||||
fn latest_state_proof(
|
||||
&self,
|
||||
address: Address,
|
||||
keys: Vec<StorageKey>,
|
||||
) -> impl Future<Output = Result<EIP1186AccountProofResponse>>;
|
||||
) -> Pin<Box<dyn Future<Output = Result<EIP1186AccountProofResponse>> + '_>>;
|
||||
|
||||
/// Returns the resolver that is to use with this ethereum node.
|
||||
fn resolver(&self) -> Pin<Box<dyn Future<Output = Result<Arc<dyn ResolverApi + '_>>> + '_>>;
|
||||
|
||||
/// Returns the EVM version of the node.
|
||||
fn evm_version(&self) -> EVMVersion;
|
||||
|
||||
/// Returns a stream of the blocks that were mined by the node.
|
||||
fn subscribe_to_full_blocks_information(
|
||||
&self,
|
||||
) -> Pin<
|
||||
Box<
|
||||
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
|
||||
+ '_,
|
||||
>,
|
||||
>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct MinedBlockInformation {
|
||||
/// The block number.
|
||||
pub block_number: BlockNumber,
|
||||
|
||||
/// The block timestamp.
|
||||
pub block_timestamp: BlockTimestamp,
|
||||
|
||||
/// The amount of gas mined in the block.
|
||||
pub mined_gas: u128,
|
||||
|
||||
/// The gas limit of the block.
|
||||
pub block_gas_limit: u128,
|
||||
|
||||
/// The hashes of the transactions that were mined as part of the block.
|
||||
pub transaction_hashes: Vec<TxHash>,
|
||||
}
|
||||
|
||||
@@ -11,7 +11,10 @@ rust-version.workspace = true
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
alloy = { workspace = true }
|
||||
async-stream = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tower = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
|
||||
revive-common = { workspace = true }
|
||||
@@ -22,9 +25,12 @@ revive-dt-node-interaction = { workspace = true }
|
||||
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde_with = { workspace = true }
|
||||
serde_yaml_ng = { workspace = true }
|
||||
|
||||
sp-core = { workspace = true }
|
||||
sp-runtime = { workspace = true }
|
||||
zombienet-sdk = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
temp-dir = { workspace = true }
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
use alloy::primitives::ChainId;
|
||||
|
||||
/// This constant defines how much Wei accounts are pre-seeded with in genesis.
|
||||
///
|
||||
/// Note: After changing this number, check that the tests for kitchensink work as we encountered
|
||||
/// some issues with different values of the initial balance on Kitchensink.
|
||||
/// Note: After changing this number, check that the tests for substrate work as we encountered
|
||||
/// some issues with different values of the initial balance on substrate.
|
||||
pub const INITIAL_BALANCE: u128 = 10u128.pow(37);
|
||||
|
||||
/// The chain id used for all of the chains spawned by the framework.
|
||||
pub const CHAIN_ID: ChainId = 420420420;
|
||||
|
||||
@@ -1,783 +0,0 @@
|
||||
//! The go-ethereum node implementation.
|
||||
|
||||
use std::{
|
||||
fs::{File, OpenOptions, create_dir_all, remove_dir_all},
|
||||
io::{BufRead, BufReader, Read, Write},
|
||||
ops::ControlFlow,
|
||||
path::PathBuf,
|
||||
process::{Child, Command, Stdio},
|
||||
sync::{
|
||||
Arc,
|
||||
atomic::{AtomicU32, Ordering},
|
||||
},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use alloy::{
|
||||
eips::BlockNumberOrTag,
|
||||
genesis::{Genesis, GenesisAccount},
|
||||
network::{Ethereum, EthereumWallet, NetworkWallet},
|
||||
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
|
||||
providers::{
|
||||
Provider, ProviderBuilder,
|
||||
ext::DebugApi,
|
||||
fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller},
|
||||
},
|
||||
rpc::types::{
|
||||
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
|
||||
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
|
||||
},
|
||||
};
|
||||
use anyhow::Context as _;
|
||||
use revive_common::EVMVersion;
|
||||
use tracing::{Instrument, instrument};
|
||||
|
||||
use revive_dt_common::{
|
||||
fs::clear_directory,
|
||||
futures::{PollingWaitBehavior, poll},
|
||||
};
|
||||
use revive_dt_config::*;
|
||||
use revive_dt_format::traits::ResolverApi;
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
|
||||
use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE};
|
||||
|
||||
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
|
||||
|
||||
/// The go-ethereum node instance implementation.
|
||||
///
|
||||
/// Implements helpers to initialize, spawn and wait the node.
|
||||
///
|
||||
/// Assumes dev mode and IPC only (`P2P`, `http`` etc. are kept disabled).
|
||||
///
|
||||
/// Prunes the child process and the base directory on drop.
|
||||
#[derive(Debug)]
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub struct GethNode {
|
||||
connection_string: String,
|
||||
base_directory: PathBuf,
|
||||
data_directory: PathBuf,
|
||||
logs_directory: PathBuf,
|
||||
geth: PathBuf,
|
||||
id: u32,
|
||||
handle: Option<Child>,
|
||||
start_timeout: Duration,
|
||||
wallet: Arc<EthereumWallet>,
|
||||
nonce_manager: CachedNonceManager,
|
||||
chain_id_filler: ChainIdFiller,
|
||||
/// This vector stores [`File`] objects that we use for logging which we want to flush when the
|
||||
/// node object is dropped. We do not store them in a structured fashion at the moment (in
|
||||
/// separate fields) as the logic that we need to apply to them is all the same regardless of
|
||||
/// what it belongs to, we just want to flush them on [`Drop`] of the node.
|
||||
logs_file_to_flush: Vec<File>,
|
||||
}
|
||||
|
||||
impl GethNode {
|
||||
const BASE_DIRECTORY: &str = "geth";
|
||||
const DATA_DIRECTORY: &str = "data";
|
||||
const LOGS_DIRECTORY: &str = "logs";
|
||||
|
||||
const IPC_FILE: &str = "geth.ipc";
|
||||
const GENESIS_JSON_FILE: &str = "genesis.json";
|
||||
|
||||
const READY_MARKER: &str = "IPC endpoint opened";
|
||||
const ERROR_MARKER: &str = "Fatal:";
|
||||
|
||||
const GETH_STDOUT_LOG_FILE_NAME: &str = "node_stdout.log";
|
||||
const GETH_STDERR_LOG_FILE_NAME: &str = "node_stderr.log";
|
||||
|
||||
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
|
||||
const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet";
|
||||
|
||||
const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60);
|
||||
const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60);
|
||||
|
||||
/// Create the node directory and call `geth init` to configure the genesis.
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
|
||||
let _ = clear_directory(&self.base_directory);
|
||||
let _ = clear_directory(&self.logs_directory);
|
||||
|
||||
create_dir_all(&self.base_directory)
|
||||
.context("Failed to create base directory for geth node")?;
|
||||
create_dir_all(&self.logs_directory)
|
||||
.context("Failed to create logs directory for geth node")?;
|
||||
|
||||
for signer_address in
|
||||
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
|
||||
{
|
||||
// Note, the use of the entry API here means that we only modify the entries for any
|
||||
// account that is not in the `alloc` field of the genesis state.
|
||||
genesis
|
||||
.alloc
|
||||
.entry(signer_address)
|
||||
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
|
||||
}
|
||||
let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE);
|
||||
serde_json::to_writer(
|
||||
File::create(&genesis_path).context("Failed to create geth genesis file")?,
|
||||
&genesis,
|
||||
)
|
||||
.context("Failed to serialize geth genesis JSON to file")?;
|
||||
|
||||
let mut child = Command::new(&self.geth)
|
||||
.arg("--state.scheme")
|
||||
.arg("hash")
|
||||
.arg("init")
|
||||
.arg("--datadir")
|
||||
.arg(&self.data_directory)
|
||||
.arg(genesis_path)
|
||||
.stderr(Stdio::piped())
|
||||
.stdout(Stdio::null())
|
||||
.spawn()
|
||||
.context("Failed to spawn geth --init process")?;
|
||||
|
||||
let mut stderr = String::new();
|
||||
child
|
||||
.stderr
|
||||
.take()
|
||||
.expect("should be piped")
|
||||
.read_to_string(&mut stderr)
|
||||
.context("Failed to read geth --init stderr")?;
|
||||
|
||||
if !child
|
||||
.wait()
|
||||
.context("Failed waiting for geth --init process to finish")?
|
||||
.success()
|
||||
{
|
||||
anyhow::bail!("failed to initialize geth node #{:?}: {stderr}", &self.id);
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Spawn the go-ethereum node child process.
|
||||
///
|
||||
/// [Instance::init] must be called prior.
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn spawn_process(&mut self) -> anyhow::Result<&mut Self> {
|
||||
// This is the `OpenOptions` that we wish to use for all of the log files that we will be
|
||||
// opening in this method. We need to construct it in this way to:
|
||||
// 1. Be consistent
|
||||
// 2. Less verbose and more dry
|
||||
// 3. Because the builder pattern uses mutable references so we need to get around that.
|
||||
let open_options = {
|
||||
let mut options = OpenOptions::new();
|
||||
options.create(true).truncate(true).write(true);
|
||||
options
|
||||
};
|
||||
|
||||
let stdout_logs_file = open_options
|
||||
.clone()
|
||||
.open(self.geth_stdout_log_file_path())
|
||||
.context("Failed to open geth stdout logs file")?;
|
||||
let stderr_logs_file = open_options
|
||||
.open(self.geth_stderr_log_file_path())
|
||||
.context("Failed to open geth stderr logs file")?;
|
||||
self.handle = Command::new(&self.geth)
|
||||
.arg("--dev")
|
||||
.arg("--datadir")
|
||||
.arg(&self.data_directory)
|
||||
.arg("--ipcpath")
|
||||
.arg(&self.connection_string)
|
||||
.arg("--nodiscover")
|
||||
.arg("--maxpeers")
|
||||
.arg("0")
|
||||
.arg("--txlookuplimit")
|
||||
.arg("0")
|
||||
.arg("--cache.blocklogs")
|
||||
.arg("512")
|
||||
.arg("--state.scheme")
|
||||
.arg("hash")
|
||||
.arg("--syncmode")
|
||||
.arg("full")
|
||||
.arg("--gcmode")
|
||||
.arg("archive")
|
||||
.stderr(
|
||||
stderr_logs_file
|
||||
.try_clone()
|
||||
.context("Failed to clone geth stderr log file handle")?,
|
||||
)
|
||||
.stdout(
|
||||
stdout_logs_file
|
||||
.try_clone()
|
||||
.context("Failed to clone geth stdout log file handle")?,
|
||||
)
|
||||
.spawn()
|
||||
.context("Failed to spawn geth node process")?
|
||||
.into();
|
||||
|
||||
if let Err(error) = self.wait_ready() {
|
||||
tracing::error!(?error, "Failed to start geth, shutting down gracefully");
|
||||
self.shutdown()
|
||||
.context("Failed to gracefully shutdown after geth start error")?;
|
||||
return Err(error);
|
||||
}
|
||||
|
||||
self.logs_file_to_flush
|
||||
.extend([stderr_logs_file, stdout_logs_file]);
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Wait for the g-ethereum node child process getting ready.
|
||||
///
|
||||
/// [Instance::spawn_process] must be called priorly.
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn wait_ready(&mut self) -> anyhow::Result<&mut Self> {
|
||||
let start_time = Instant::now();
|
||||
|
||||
let logs_file = OpenOptions::new()
|
||||
.read(true)
|
||||
.write(false)
|
||||
.append(false)
|
||||
.truncate(false)
|
||||
.open(self.geth_stderr_log_file_path())
|
||||
.context("Failed to open geth stderr logs file for readiness check")?;
|
||||
|
||||
let maximum_wait_time = self.start_timeout;
|
||||
let mut stderr = BufReader::new(logs_file).lines();
|
||||
let mut lines = vec![];
|
||||
loop {
|
||||
if let Some(Ok(line)) = stderr.next() {
|
||||
if line.contains(Self::ERROR_MARKER) {
|
||||
anyhow::bail!("Failed to start geth {line}");
|
||||
}
|
||||
if line.contains(Self::READY_MARKER) {
|
||||
return Ok(self);
|
||||
}
|
||||
lines.push(line);
|
||||
}
|
||||
if Instant::now().duration_since(start_time) > maximum_wait_time {
|
||||
anyhow::bail!(
|
||||
"Timeout in starting geth: took longer than {}ms. stdout:\n\n{}\n",
|
||||
self.start_timeout.as_millis(),
|
||||
lines.join("\n")
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn geth_stdout_log_file_path(&self) -> PathBuf {
|
||||
self.logs_directory.join(Self::GETH_STDOUT_LOG_FILE_NAME)
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn geth_stderr_log_file_path(&self) -> PathBuf {
|
||||
self.logs_directory.join(Self::GETH_STDERR_LOG_FILE_NAME)
|
||||
}
|
||||
|
||||
async fn provider(
|
||||
&self,
|
||||
) -> anyhow::Result<FillProvider<impl TxFiller<Ethereum>, impl Provider<Ethereum>, Ethereum>>
|
||||
{
|
||||
ProviderBuilder::new()
|
||||
.disable_recommended_fillers()
|
||||
.filler(FallbackGasFiller::new(
|
||||
25_000_000,
|
||||
1_000_000_000,
|
||||
1_000_000_000,
|
||||
))
|
||||
.filler(self.chain_id_filler.clone())
|
||||
.filler(NonceFiller::new(self.nonce_manager.clone()))
|
||||
.wallet(self.wallet.clone())
|
||||
.connect(&self.connection_string)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl EthereumNode for GethNode {
|
||||
#[instrument(
|
||||
level = "info",
|
||||
skip_all,
|
||||
fields(geth_node_id = self.id, connection_string = self.connection_string),
|
||||
err,
|
||||
)]
|
||||
async fn execute_transaction(
|
||||
&self,
|
||||
transaction: TransactionRequest,
|
||||
) -> anyhow::Result<alloy::rpc::types::TransactionReceipt> {
|
||||
let provider = self
|
||||
.provider()
|
||||
.await
|
||||
.context("Failed to create provider for transaction submission")?;
|
||||
|
||||
let pending_transaction = provider
|
||||
.send_transaction(transaction)
|
||||
.await
|
||||
.inspect_err(
|
||||
|err| tracing::error!(%err, "Encountered an error when submitting the transaction"),
|
||||
)
|
||||
.context("Failed to submit transaction to geth node")?;
|
||||
let transaction_hash = *pending_transaction.tx_hash();
|
||||
|
||||
// The following is a fix for the "transaction indexing is in progress" error that we used
|
||||
// to get. You can find more information on this in the following GH issue in geth
|
||||
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
|
||||
// before we can get the receipt of the transaction it needs to have been indexed by the
|
||||
// node's indexer. Just because the transaction has been confirmed it doesn't mean that it
|
||||
// has been indexed. When we call alloy's `get_receipt` it checks if the transaction was
|
||||
// confirmed. If it has been, then it will call `eth_getTransactionReceipt` method which
|
||||
// _might_ return the above error if the tx has not yet been indexed yet. So, we need to
|
||||
// implement a retry mechanism for the receipt to keep retrying to get it until it
|
||||
// eventually works, but we only do that if the error we get back is the "transaction
|
||||
// indexing is in progress" error or if the receipt is None.
|
||||
//
|
||||
// Getting the transaction indexed and taking a receipt can take a long time especially when
|
||||
// a lot of transactions are being submitted to the node. Thus, while initially we only
|
||||
// allowed for 60 seconds of waiting with a 1 second delay in polling, we need to allow for
|
||||
// a larger wait time. Therefore, in here we allow for 5 minutes of waiting with exponential
|
||||
// backoff each time we attempt to get the receipt and find that it's not available.
|
||||
let provider = Arc::new(provider);
|
||||
poll(
|
||||
Self::RECEIPT_POLLING_DURATION,
|
||||
PollingWaitBehavior::Constant(Duration::from_millis(200)),
|
||||
move || {
|
||||
let provider = provider.clone();
|
||||
async move {
|
||||
match provider.get_transaction_receipt(transaction_hash).await {
|
||||
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
|
||||
Ok(None) => Ok(ControlFlow::Continue(())),
|
||||
Err(error) => {
|
||||
let error_string = error.to_string();
|
||||
match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) {
|
||||
true => Ok(ControlFlow::Continue(())),
|
||||
false => Err(error.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
.instrument(tracing::info_span!(
|
||||
"Awaiting transaction receipt",
|
||||
?transaction_hash
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
async fn trace_transaction(
|
||||
&self,
|
||||
transaction: &TransactionReceipt,
|
||||
trace_options: GethDebugTracingOptions,
|
||||
) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> {
|
||||
let provider = Arc::new(
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to create provider for tracing")?,
|
||||
);
|
||||
poll(
|
||||
Self::TRACE_POLLING_DURATION,
|
||||
PollingWaitBehavior::Constant(Duration::from_millis(200)),
|
||||
move || {
|
||||
let provider = provider.clone();
|
||||
let trace_options = trace_options.clone();
|
||||
async move {
|
||||
match provider
|
||||
.debug_trace_transaction(transaction.transaction_hash, trace_options)
|
||||
.await
|
||||
{
|
||||
Ok(trace) => Ok(ControlFlow::Break(trace)),
|
||||
Err(error) => {
|
||||
let error_string = error.to_string();
|
||||
match error_string.contains(Self::TRANSACTION_TRACING_ERROR) {
|
||||
true => Ok(ControlFlow::Continue(())),
|
||||
false => Err(error.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result<DiffMode> {
|
||||
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
|
||||
diff_mode: Some(true),
|
||||
disable_code: None,
|
||||
disable_storage: None,
|
||||
});
|
||||
match self
|
||||
.trace_transaction(transaction, trace_options)
|
||||
.await
|
||||
.context("Failed to trace transaction for prestate diff")?
|
||||
.try_into_pre_state_frame()
|
||||
.context("Failed to convert trace into pre-state frame")?
|
||||
{
|
||||
PreStateFrame::Diff(diff) => Ok(diff),
|
||||
_ => anyhow::bail!("expected a diff mode trace"),
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
async fn balance_of(&self, address: Address) -> anyhow::Result<U256> {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to get the Geth provider")?
|
||||
.get_balance(address)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
async fn latest_state_proof(
|
||||
&self,
|
||||
address: Address,
|
||||
keys: Vec<StorageKey>,
|
||||
) -> anyhow::Result<EIP1186AccountProofResponse> {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to get the Geth provider")?
|
||||
.get_proof(address, keys)
|
||||
.latest()
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl ResolverApi for GethNode {
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
async fn chain_id(&self) -> anyhow::Result<alloy::primitives::ChainId> {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to get the Geth provider")?
|
||||
.get_chain_id()
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result<u128> {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to get the Geth provider")?
|
||||
.get_transaction_receipt(*tx_hash)
|
||||
.await?
|
||||
.context("Failed to get the transaction receipt")
|
||||
.map(|receipt| receipt.effective_gas_price)
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result<u128> {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to get the Geth provider")?
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the geth block")?
|
||||
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||
.map(|block| block.header.gas_limit as _)
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result<Address> {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to get the Geth provider")?
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the geth block")?
|
||||
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||
.map(|block| block.header.beneficiary)
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result<U256> {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to get the Geth provider")?
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the geth block")?
|
||||
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||
.map(|block| U256::from_be_bytes(block.header.mix_hash.0))
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result<u64> {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to get the Geth provider")?
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the geth block")?
|
||||
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||
.and_then(|block| {
|
||||
block
|
||||
.header
|
||||
.base_fee_per_gas
|
||||
.context("Failed to get the base fee per gas")
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockHash> {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to get the Geth provider")?
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the geth block")?
|
||||
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||
.map(|block| block.header.hash)
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to get the Geth provider")?
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the geth block")?
|
||||
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||
.map(|block| block.header.timestamp)
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
async fn last_block_number(&self) -> anyhow::Result<BlockNumber> {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to get the Geth provider")?
|
||||
.get_block_number()
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl Node for GethNode {
|
||||
fn new(
|
||||
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||
+ AsRef<ConcurrencyConfiguration>
|
||||
+ AsRef<GenesisConfiguration>
|
||||
+ AsRef<WalletConfiguration>
|
||||
+ AsRef<GethConfiguration>
|
||||
+ AsRef<KitchensinkConfiguration>
|
||||
+ AsRef<ReviveDevNodeConfiguration>
|
||||
+ AsRef<EthRpcConfiguration>
|
||||
+ Clone,
|
||||
) -> Self {
|
||||
let working_directory_configuration =
|
||||
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
||||
let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context);
|
||||
let geth_configuration = AsRef::<GethConfiguration>::as_ref(&context);
|
||||
|
||||
let geth_directory = working_directory_configuration
|
||||
.as_path()
|
||||
.join(Self::BASE_DIRECTORY);
|
||||
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||
let base_directory = geth_directory.join(id.to_string());
|
||||
|
||||
let wallet = wallet_configuration.wallet();
|
||||
|
||||
Self {
|
||||
connection_string: base_directory.join(Self::IPC_FILE).display().to_string(),
|
||||
data_directory: base_directory.join(Self::DATA_DIRECTORY),
|
||||
logs_directory: base_directory.join(Self::LOGS_DIRECTORY),
|
||||
base_directory,
|
||||
geth: geth_configuration.path.clone(),
|
||||
id,
|
||||
handle: None,
|
||||
start_timeout: geth_configuration.start_timeout_ms,
|
||||
wallet: wallet.clone(),
|
||||
chain_id_filler: Default::default(),
|
||||
nonce_manager: Default::default(),
|
||||
// We know that we only need to be storing 2 files so we can specify that when creating
|
||||
// the vector. It's the stdout and stderr of the geth node.
|
||||
logs_file_to_flush: Vec::with_capacity(2),
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn id(&self) -> usize {
|
||||
self.id as _
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn connection_string(&self) -> String {
|
||||
self.connection_string.clone()
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn shutdown(&mut self) -> anyhow::Result<()> {
|
||||
// Terminate the processes in a graceful manner to allow for the output to be flushed.
|
||||
if let Some(mut child) = self.handle.take() {
|
||||
child
|
||||
.kill()
|
||||
.map_err(|error| anyhow::anyhow!("Failed to kill the geth process: {error:?}"))?;
|
||||
}
|
||||
|
||||
// Flushing the files that we're using for keeping the logs before shutdown.
|
||||
for file in self.logs_file_to_flush.iter_mut() {
|
||||
file.flush()?
|
||||
}
|
||||
|
||||
// Remove the node's database so that subsequent runs do not run on the same database. We
|
||||
// ignore the error just in case the directory didn't exist in the first place and therefore
|
||||
// there's nothing to be deleted.
|
||||
let _ = remove_dir_all(self.base_directory.join(Self::DATA_DIRECTORY));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
|
||||
self.init(genesis)?.spawn_process()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn version(&self) -> anyhow::Result<String> {
|
||||
let output = Command::new(&self.geth)
|
||||
.arg("--version")
|
||||
.stdin(Stdio::null())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::null())
|
||||
.spawn()
|
||||
.context("Failed to spawn geth --version process")?
|
||||
.wait_with_output()
|
||||
.context("Failed to wait for geth --version output")?
|
||||
.stdout;
|
||||
Ok(String::from_utf8_lossy(&output).into())
|
||||
}
|
||||
|
||||
fn matches_target(targets: Option<&[String]>) -> bool {
|
||||
match targets {
|
||||
None => true,
|
||||
Some(targets) => targets.iter().any(|str| str.as_str() == "evm"),
|
||||
}
|
||||
}
|
||||
|
||||
fn evm_version() -> EVMVersion {
|
||||
EVMVersion::Cancun
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for GethNode {
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn drop(&mut self) {
|
||||
self.shutdown().expect("Failed to shutdown")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn test_config() -> ExecutionContext {
|
||||
ExecutionContext::default()
|
||||
}
|
||||
|
||||
fn new_node() -> (ExecutionContext, GethNode) {
|
||||
let context = test_config();
|
||||
let mut node = GethNode::new(&context);
|
||||
node.init(context.genesis_configuration.genesis().unwrap().clone())
|
||||
.expect("Failed to initialize the node")
|
||||
.spawn_process()
|
||||
.expect("Failed to spawn the node process");
|
||||
(context, node)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn version_works() {
|
||||
let version = GethNode::new(&test_config()).version().unwrap();
|
||||
assert!(
|
||||
version.starts_with("geth version"),
|
||||
"expected version string, got: '{version}'"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_get_chain_id_from_node() {
|
||||
// Arrange
|
||||
let (_context, node) = new_node();
|
||||
|
||||
// Act
|
||||
let chain_id = node.chain_id().await;
|
||||
|
||||
// Assert
|
||||
let chain_id = chain_id.expect("Failed to get the chain id");
|
||||
assert_eq!(chain_id, 420_420_420);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_get_gas_limit_from_node() {
|
||||
// Arrange
|
||||
let (_context, node) = new_node();
|
||||
|
||||
// Act
|
||||
let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest).await;
|
||||
|
||||
// Assert
|
||||
let gas_limit = gas_limit.expect("Failed to get the gas limit");
|
||||
assert_eq!(gas_limit, u32::MAX as u128)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_get_coinbase_from_node() {
|
||||
// Arrange
|
||||
let (_context, node) = new_node();
|
||||
|
||||
// Act
|
||||
let coinbase = node.block_coinbase(BlockNumberOrTag::Latest).await;
|
||||
|
||||
// Assert
|
||||
let coinbase = coinbase.expect("Failed to get the coinbase");
|
||||
assert_eq!(coinbase, Address::new([0xFF; 20]))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_get_block_difficulty_from_node() {
|
||||
// Arrange
|
||||
let (_context, node) = new_node();
|
||||
|
||||
// Act
|
||||
let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest).await;
|
||||
|
||||
// Assert
|
||||
let block_difficulty = block_difficulty.expect("Failed to get the block difficulty");
|
||||
assert_eq!(block_difficulty, U256::ZERO)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_get_block_hash_from_node() {
|
||||
// Arrange
|
||||
let (_context, node) = new_node();
|
||||
|
||||
// Act
|
||||
let block_hash = node.block_hash(BlockNumberOrTag::Latest).await;
|
||||
|
||||
// Assert
|
||||
let _ = block_hash.expect("Failed to get the block hash");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_get_block_timestamp_from_node() {
|
||||
// Arrange
|
||||
let (_context, node) = new_node();
|
||||
|
||||
// Act
|
||||
let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest).await;
|
||||
|
||||
// Assert
|
||||
let _ = block_timestamp.expect("Failed to get the block timestamp");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_get_block_number_from_node() {
|
||||
// Arrange
|
||||
let (_context, node) = new_node();
|
||||
|
||||
// Act
|
||||
let block_number = node.last_block_number().await;
|
||||
|
||||
// Assert
|
||||
let block_number = block_number.expect("Failed to get the block number");
|
||||
assert_eq!(block_number, 0)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
mod process;
|
||||
|
||||
pub use process::*;
|
||||
@@ -0,0 +1,191 @@
|
||||
use std::{
|
||||
fs::{File, OpenOptions},
|
||||
io::{BufRead, BufReader, Write},
|
||||
path::Path,
|
||||
process::{Child, Command},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use anyhow::{Context, Result, bail};
|
||||
|
||||
/// A wrapper around processes which allows for their stdout and stderr to be logged and flushed
|
||||
/// when the process is dropped.
|
||||
#[derive(Debug)]
|
||||
pub struct Process {
|
||||
/// The handle of the child process.
|
||||
child: Child,
|
||||
|
||||
/// The file that stdout is being logged to.
|
||||
stdout_logs_file: File,
|
||||
|
||||
/// The file that stderr is being logged to.
|
||||
stderr_logs_file: File,
|
||||
}
|
||||
|
||||
impl Process {
|
||||
pub fn new(
|
||||
log_file_prefix: impl Into<Option<&'static str>>,
|
||||
logs_directory: impl AsRef<Path>,
|
||||
binary_path: impl AsRef<Path>,
|
||||
command_building_callback: impl FnOnce(&mut Command, File, File),
|
||||
process_readiness_wait_behavior: ProcessReadinessWaitBehavior,
|
||||
) -> Result<Self> {
|
||||
let log_file_prefix = log_file_prefix.into();
|
||||
|
||||
let (stdout_file_name, stderr_file_name) = match log_file_prefix {
|
||||
Some(prefix) => (
|
||||
format!("{prefix}_stdout.log"),
|
||||
format!("{prefix}_stderr.log"),
|
||||
),
|
||||
None => ("stdout.log".to_string(), "stderr.log".to_string()),
|
||||
};
|
||||
|
||||
let stdout_logs_file_path = logs_directory.as_ref().join(stdout_file_name);
|
||||
let stderr_logs_file_path = logs_directory.as_ref().join(stderr_file_name);
|
||||
|
||||
let stdout_logs_file = OpenOptions::new()
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.create(true)
|
||||
.open(stdout_logs_file_path.as_path())
|
||||
.context("Failed to open the stdout logs file")?;
|
||||
let stderr_logs_file = OpenOptions::new()
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.create(true)
|
||||
.open(stderr_logs_file_path.as_path())
|
||||
.context("Failed to open the stderr logs file")?;
|
||||
|
||||
let mut command = {
|
||||
let stdout_logs_file = stdout_logs_file
|
||||
.try_clone()
|
||||
.context("Failed to clone the stdout logs file")?;
|
||||
let stderr_logs_file = stderr_logs_file
|
||||
.try_clone()
|
||||
.context("Failed to clone the stderr logs file")?;
|
||||
|
||||
let mut command = Command::new(binary_path.as_ref());
|
||||
command_building_callback(&mut command, stdout_logs_file, stderr_logs_file);
|
||||
command
|
||||
};
|
||||
let mut child = command
|
||||
.spawn()
|
||||
.context("Failed to spawn the built command")?;
|
||||
|
||||
match process_readiness_wait_behavior {
|
||||
ProcessReadinessWaitBehavior::NoStartupWait => {}
|
||||
ProcessReadinessWaitBehavior::WaitDuration(duration) => std::thread::sleep(duration),
|
||||
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
|
||||
max_wait_duration,
|
||||
mut check_function,
|
||||
} => {
|
||||
let spawn_time = Instant::now();
|
||||
|
||||
let stdout_logs_file = OpenOptions::new()
|
||||
.read(true)
|
||||
.open(stdout_logs_file_path)
|
||||
.context("Failed to open the stdout logs file")?;
|
||||
let stderr_logs_file = OpenOptions::new()
|
||||
.read(true)
|
||||
.open(stderr_logs_file_path)
|
||||
.context("Failed to open the stderr logs file")?;
|
||||
|
||||
let mut stdout_lines = BufReader::new(stdout_logs_file).lines();
|
||||
let mut stderr_lines = BufReader::new(stderr_logs_file).lines();
|
||||
|
||||
let mut stdout = String::new();
|
||||
let mut stderr = String::new();
|
||||
|
||||
loop {
|
||||
let stdout_line = stdout_lines.next().and_then(Result::ok);
|
||||
let stderr_line = stderr_lines.next().and_then(Result::ok);
|
||||
|
||||
if let Some(stdout_line) = stdout_line.as_ref() {
|
||||
stdout.push_str(stdout_line);
|
||||
stdout.push('\n');
|
||||
}
|
||||
if let Some(stderr_line) = stderr_line.as_ref() {
|
||||
stderr.push_str(stderr_line);
|
||||
stderr.push('\n');
|
||||
}
|
||||
|
||||
let check_result =
|
||||
check_function(stdout_line.as_deref(), stderr_line.as_deref()).context(
|
||||
format!(
|
||||
"Failed to wait for the process to be ready - {stdout} - {stderr}"
|
||||
),
|
||||
)?;
|
||||
|
||||
if check_result {
|
||||
break;
|
||||
}
|
||||
|
||||
if Instant::now().duration_since(spawn_time) > max_wait_duration {
|
||||
bail!(
|
||||
"Waited for the process to start but it failed to start in time. stderr {stderr} - stdout {stdout}"
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
ProcessReadinessWaitBehavior::WaitForCommandToExit => {
|
||||
if !child
|
||||
.wait()
|
||||
.context("Failed waiting for process to finish")?
|
||||
.success()
|
||||
{
|
||||
anyhow::bail!("Failed to spawn command");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
child,
|
||||
stdout_logs_file,
|
||||
stderr_logs_file,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Process {
|
||||
fn drop(&mut self) {
|
||||
self.child.kill().expect("Failed to kill the process");
|
||||
self.stdout_logs_file
|
||||
.flush()
|
||||
.expect("Failed to flush the stdout logs file");
|
||||
self.stderr_logs_file
|
||||
.flush()
|
||||
.expect("Failed to flush the stderr logs file");
|
||||
}
|
||||
}
|
||||
|
||||
pub enum ProcessReadinessWaitBehavior {
|
||||
/// The process does not require any kind of wait after it's been spawned and can be used
|
||||
/// straight away.
|
||||
NoStartupWait,
|
||||
|
||||
/// Waits for the command to exit.
|
||||
WaitForCommandToExit,
|
||||
|
||||
/// The process does require some amount of wait duration after it's been started.
|
||||
WaitDuration(Duration),
|
||||
|
||||
/// The process requires a time bounded wait function which is a function of the lines that
|
||||
/// appear in the log files.
|
||||
TimeBoundedWaitFunction {
|
||||
/// The maximum amount of time to wait for the check function to return true.
|
||||
max_wait_duration: Duration,
|
||||
|
||||
/// The function to use to check if the process spawned is ready to use or not. This
|
||||
/// function should return the following in the following cases:
|
||||
///
|
||||
/// - `Ok(true)`: Returned when the condition the process is waiting for has been fulfilled
|
||||
/// and the wait is completed.
|
||||
/// - `Ok(false)`: The process is not ready yet but it might be ready in the future.
|
||||
/// - `Err`: The process is not ready yet and will not be ready in the future as it appears
|
||||
/// that it has encountered an error when it was being spawned.
|
||||
///
|
||||
/// The first argument is a line from stdout and the second argument is a line from stderr.
|
||||
#[allow(clippy::type_complexity)]
|
||||
check_function: Box<dyn FnMut(Option<&str>, Option<&str>) -> anyhow::Result<bool>>,
|
||||
},
|
||||
}
|
||||
+3
-32
@@ -1,34 +1,15 @@
|
||||
//! This crate implements the testing nodes.
|
||||
|
||||
use alloy::genesis::Genesis;
|
||||
use revive_common::EVMVersion;
|
||||
use revive_dt_config::*;
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
|
||||
pub mod common;
|
||||
pub mod constants;
|
||||
pub mod geth;
|
||||
pub mod kitchensink;
|
||||
pub mod pool;
|
||||
pub mod helpers;
|
||||
pub mod node_implementations;
|
||||
pub mod provider_utils;
|
||||
|
||||
/// An abstract interface for testing nodes.
|
||||
pub trait Node: EthereumNode {
|
||||
/// Create a new uninitialized instance.
|
||||
fn new(
|
||||
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||
+ AsRef<ConcurrencyConfiguration>
|
||||
+ AsRef<GenesisConfiguration>
|
||||
+ AsRef<WalletConfiguration>
|
||||
+ AsRef<GethConfiguration>
|
||||
+ AsRef<KitchensinkConfiguration>
|
||||
+ AsRef<ReviveDevNodeConfiguration>
|
||||
+ AsRef<EthRpcConfiguration>
|
||||
+ Clone,
|
||||
) -> Self;
|
||||
|
||||
/// Returns the identifier of the node.
|
||||
fn id(&self) -> usize;
|
||||
|
||||
/// Spawns a node configured according to the genesis json.
|
||||
///
|
||||
/// Blocking until it's ready to accept transactions.
|
||||
@@ -39,16 +20,6 @@ pub trait Node: EthereumNode {
|
||||
/// Blocking until it's completely stopped.
|
||||
fn shutdown(&mut self) -> anyhow::Result<()>;
|
||||
|
||||
/// Returns the nodes connection string.
|
||||
fn connection_string(&self) -> String;
|
||||
|
||||
/// Returns the node version.
|
||||
fn version(&self) -> anyhow::Result<String>;
|
||||
|
||||
/// Given a list of targets from the metadata file, this function determines if the metadata
|
||||
/// file can be ran on this node or not.
|
||||
fn matches_target(targets: Option<&[String]>) -> bool;
|
||||
|
||||
/// Returns the EVM version of the node.
|
||||
fn evm_version() -> EVMVersion;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,900 @@
|
||||
//! The go-ethereum node implementation.
|
||||
|
||||
use std::{
|
||||
fs::{File, create_dir_all, remove_dir_all},
|
||||
io::Read,
|
||||
ops::ControlFlow,
|
||||
path::PathBuf,
|
||||
pin::Pin,
|
||||
process::{Command, Stdio},
|
||||
sync::{
|
||||
Arc,
|
||||
atomic::{AtomicU32, Ordering},
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use alloy::{
|
||||
eips::BlockNumberOrTag,
|
||||
genesis::{Genesis, GenesisAccount},
|
||||
network::{Ethereum, EthereumWallet, NetworkWallet},
|
||||
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
|
||||
providers::{
|
||||
Provider,
|
||||
ext::DebugApi,
|
||||
fillers::{CachedNonceManager, ChainIdFiller, NonceFiller},
|
||||
},
|
||||
rpc::types::{
|
||||
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
|
||||
trace::geth::{
|
||||
DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame,
|
||||
},
|
||||
},
|
||||
};
|
||||
use anyhow::Context as _;
|
||||
use futures::{Stream, StreamExt};
|
||||
use revive_common::EVMVersion;
|
||||
use tokio::sync::OnceCell;
|
||||
use tracing::{Instrument, error, instrument};
|
||||
|
||||
use revive_dt_common::{
|
||||
fs::clear_directory,
|
||||
futures::{PollingWaitBehavior, poll},
|
||||
};
|
||||
use revive_dt_config::*;
|
||||
use revive_dt_format::traits::ResolverApi;
|
||||
use revive_dt_node_interaction::{EthereumNode, MinedBlockInformation};
|
||||
|
||||
use crate::{
|
||||
Node,
|
||||
constants::{CHAIN_ID, INITIAL_BALANCE},
|
||||
helpers::{Process, ProcessReadinessWaitBehavior},
|
||||
provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider},
|
||||
};
|
||||
|
||||
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
|
||||
|
||||
/// The go-ethereum node instance implementation.
|
||||
///
|
||||
/// Implements helpers to initialize, spawn and wait the node.
|
||||
///
|
||||
/// Assumes dev mode and IPC only (`P2P`, `http`` etc. are kept disabled).
|
||||
///
|
||||
/// Prunes the child process and the base directory on drop.
|
||||
#[derive(Debug)]
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub struct GethNode {
|
||||
connection_string: String,
|
||||
base_directory: PathBuf,
|
||||
data_directory: PathBuf,
|
||||
logs_directory: PathBuf,
|
||||
geth: PathBuf,
|
||||
id: u32,
|
||||
handle: Option<Process>,
|
||||
start_timeout: Duration,
|
||||
wallet: Arc<EthereumWallet>,
|
||||
nonce_manager: CachedNonceManager,
|
||||
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
|
||||
}
|
||||
|
||||
impl GethNode {
|
||||
const BASE_DIRECTORY: &str = "geth";
|
||||
const DATA_DIRECTORY: &str = "data";
|
||||
const LOGS_DIRECTORY: &str = "logs";
|
||||
|
||||
const IPC_FILE: &str = "geth.ipc";
|
||||
const GENESIS_JSON_FILE: &str = "genesis.json";
|
||||
|
||||
const READY_MARKER: &str = "IPC endpoint opened";
|
||||
const ERROR_MARKER: &str = "Fatal:";
|
||||
|
||||
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
|
||||
const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet";
|
||||
|
||||
const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60);
|
||||
const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60);
|
||||
|
||||
pub fn new(
|
||||
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||
+ AsRef<WalletConfiguration>
|
||||
+ AsRef<GethConfiguration>
|
||||
+ Clone,
|
||||
) -> Self {
|
||||
let working_directory_configuration =
|
||||
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
||||
let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context);
|
||||
let geth_configuration = AsRef::<GethConfiguration>::as_ref(&context);
|
||||
|
||||
let geth_directory = working_directory_configuration
|
||||
.as_path()
|
||||
.join(Self::BASE_DIRECTORY);
|
||||
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||
let base_directory = geth_directory.join(id.to_string());
|
||||
|
||||
let wallet = wallet_configuration.wallet();
|
||||
|
||||
Self {
|
||||
connection_string: base_directory.join(Self::IPC_FILE).display().to_string(),
|
||||
data_directory: base_directory.join(Self::DATA_DIRECTORY),
|
||||
logs_directory: base_directory.join(Self::LOGS_DIRECTORY),
|
||||
base_directory,
|
||||
geth: geth_configuration.path.clone(),
|
||||
id,
|
||||
handle: None,
|
||||
start_timeout: geth_configuration.start_timeout_ms,
|
||||
wallet: wallet.clone(),
|
||||
nonce_manager: Default::default(),
|
||||
provider: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create the node directory and call `geth init` to configure the genesis.
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
|
||||
let _ = clear_directory(&self.base_directory);
|
||||
let _ = clear_directory(&self.logs_directory);
|
||||
|
||||
create_dir_all(&self.base_directory)
|
||||
.context("Failed to create base directory for geth node")?;
|
||||
create_dir_all(&self.logs_directory)
|
||||
.context("Failed to create logs directory for geth node")?;
|
||||
|
||||
for signer_address in
|
||||
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
|
||||
{
|
||||
// Note, the use of the entry API here means that we only modify the entries for any
|
||||
// account that is not in the `alloc` field of the genesis state.
|
||||
genesis
|
||||
.alloc
|
||||
.entry(signer_address)
|
||||
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
|
||||
}
|
||||
let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE);
|
||||
serde_json::to_writer(
|
||||
File::create(&genesis_path).context("Failed to create geth genesis file")?,
|
||||
&genesis,
|
||||
)
|
||||
.context("Failed to serialize geth genesis JSON to file")?;
|
||||
|
||||
let mut child = Command::new(&self.geth)
|
||||
.arg("--state.scheme")
|
||||
.arg("hash")
|
||||
.arg("init")
|
||||
.arg("--datadir")
|
||||
.arg(&self.data_directory)
|
||||
.arg(genesis_path)
|
||||
.stderr(Stdio::piped())
|
||||
.stdout(Stdio::null())
|
||||
.spawn()
|
||||
.context("Failed to spawn geth --init process")?;
|
||||
|
||||
let mut stderr = String::new();
|
||||
child
|
||||
.stderr
|
||||
.take()
|
||||
.expect("should be piped")
|
||||
.read_to_string(&mut stderr)
|
||||
.context("Failed to read geth --init stderr")?;
|
||||
|
||||
if !child
|
||||
.wait()
|
||||
.context("Failed waiting for geth --init process to finish")?
|
||||
.success()
|
||||
{
|
||||
anyhow::bail!("failed to initialize geth node #{:?}: {stderr}", &self.id);
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Spawn the go-ethereum node child process.
|
||||
///
|
||||
/// [Instance::init] must be called prior.
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn spawn_process(&mut self) -> anyhow::Result<&mut Self> {
|
||||
let process = Process::new(
|
||||
None,
|
||||
self.logs_directory.as_path(),
|
||||
self.geth.as_path(),
|
||||
|command, stdout_file, stderr_file| {
|
||||
command
|
||||
.arg("--dev")
|
||||
.arg("--datadir")
|
||||
.arg(&self.data_directory)
|
||||
.arg("--ipcpath")
|
||||
.arg(&self.connection_string)
|
||||
.arg("--nodiscover")
|
||||
.arg("--maxpeers")
|
||||
.arg("0")
|
||||
.arg("--txlookuplimit")
|
||||
.arg("0")
|
||||
.arg("--cache.blocklogs")
|
||||
.arg("512")
|
||||
.arg("--state.scheme")
|
||||
.arg("hash")
|
||||
.arg("--syncmode")
|
||||
.arg("full")
|
||||
.arg("--gcmode")
|
||||
.arg("archive")
|
||||
.stderr(stderr_file)
|
||||
.stdout(stdout_file);
|
||||
},
|
||||
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
|
||||
max_wait_duration: self.start_timeout,
|
||||
check_function: Box::new(|_, stderr_line| match stderr_line {
|
||||
Some(line) => {
|
||||
if line.contains(Self::ERROR_MARKER) {
|
||||
anyhow::bail!("Failed to start geth {line}");
|
||||
} else if line.contains(Self::READY_MARKER) {
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
None => Ok(false),
|
||||
}),
|
||||
},
|
||||
);
|
||||
|
||||
match process {
|
||||
Ok(process) => self.handle = Some(process),
|
||||
Err(err) => {
|
||||
error!(?err, "Failed to start geth, shutting down gracefully");
|
||||
self.shutdown()
|
||||
.context("Failed to gracefully shutdown after geth start error")?;
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
async fn provider(&self) -> anyhow::Result<ConcreteProvider<Ethereum, Arc<EthereumWallet>>> {
|
||||
self.provider
|
||||
.get_or_try_init(|| async move {
|
||||
construct_concurrency_limited_provider::<Ethereum, _>(
|
||||
self.connection_string.as_str(),
|
||||
FallbackGasFiller::default(),
|
||||
ChainIdFiller::new(Some(CHAIN_ID)),
|
||||
NonceFiller::new(self.nonce_manager.clone()),
|
||||
self.wallet.clone(),
|
||||
)
|
||||
.await
|
||||
.context("Failed to construct the provider")
|
||||
})
|
||||
.await
|
||||
.cloned()
|
||||
}
|
||||
}
|
||||
|
||||
impl EthereumNode for GethNode {
|
||||
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>> {
|
||||
Box::pin(async move { Ok(()) })
|
||||
}
|
||||
|
||||
fn id(&self) -> usize {
|
||||
self.id as _
|
||||
}
|
||||
|
||||
fn connection_string(&self) -> &str {
|
||||
&self.connection_string
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
level = "info",
|
||||
skip_all,
|
||||
fields(geth_node_id = self.id, connection_string = self.connection_string),
|
||||
err,
|
||||
)]
|
||||
fn submit_transaction(
|
||||
&self,
|
||||
transaction: TransactionRequest,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TxHash>> + '_>> {
|
||||
Box::pin(async move {
|
||||
let provider = self
|
||||
.provider()
|
||||
.await
|
||||
.context("Failed to create the provider for transaction submission")?;
|
||||
let pending_transaction = provider
|
||||
.send_transaction(transaction)
|
||||
.await
|
||||
.context("Failed to submit the transaction through the provider")?;
|
||||
Ok(*pending_transaction.tx_hash())
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
level = "info",
|
||||
skip_all,
|
||||
fields(geth_node_id = self.id, connection_string = self.connection_string),
|
||||
err,
|
||||
)]
|
||||
fn get_receipt(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to create provider for getting the receipt")?
|
||||
.get_transaction_receipt(tx_hash)
|
||||
.await
|
||||
.context("Failed to get the receipt of the transaction")?
|
||||
.context("Failed to get the receipt of the transaction")
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
level = "info",
|
||||
skip_all,
|
||||
fields(geth_node_id = self.id, connection_string = self.connection_string),
|
||||
err,
|
||||
)]
|
||||
fn execute_transaction(
|
||||
&self,
|
||||
transaction: TransactionRequest,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
|
||||
Box::pin(async move {
|
||||
let provider = self
|
||||
.provider()
|
||||
.await
|
||||
.context("Failed to create provider for transaction submission")?;
|
||||
|
||||
let pending_transaction = provider
|
||||
.send_transaction(transaction)
|
||||
.await
|
||||
.inspect_err(
|
||||
|err| error!(%err, "Encountered an error when submitting the transaction"),
|
||||
)
|
||||
.context("Failed to submit transaction to geth node")?;
|
||||
let transaction_hash = *pending_transaction.tx_hash();
|
||||
|
||||
// The following is a fix for the "transaction indexing is in progress" error that we used
|
||||
// to get. You can find more information on this in the following GH issue in geth
|
||||
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
|
||||
// before we can get the receipt of the transaction it needs to have been indexed by the
|
||||
// node's indexer. Just because the transaction has been confirmed it doesn't mean that it
|
||||
// has been indexed. When we call alloy's `get_receipt` it checks if the transaction was
|
||||
// confirmed. If it has been, then it will call `eth_getTransactionReceipt` method which
|
||||
// _might_ return the above error if the tx has not yet been indexed yet. So, we need to
|
||||
// implement a retry mechanism for the receipt to keep retrying to get it until it
|
||||
// eventually works, but we only do that if the error we get back is the "transaction
|
||||
// indexing is in progress" error or if the receipt is None.
|
||||
//
|
||||
// Getting the transaction indexed and taking a receipt can take a long time especially when
|
||||
// a lot of transactions are being submitted to the node. Thus, while initially we only
|
||||
// allowed for 60 seconds of waiting with a 1 second delay in polling, we need to allow for
|
||||
// a larger wait time. Therefore, in here we allow for 5 minutes of waiting with exponential
|
||||
// backoff each time we attempt to get the receipt and find that it's not available.
|
||||
poll(
|
||||
Self::RECEIPT_POLLING_DURATION,
|
||||
PollingWaitBehavior::Constant(Duration::from_millis(200)),
|
||||
move || {
|
||||
let provider = provider.clone();
|
||||
async move {
|
||||
match provider.get_transaction_receipt(transaction_hash).await {
|
||||
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
|
||||
Ok(None) => Ok(ControlFlow::Continue(())),
|
||||
Err(error) => {
|
||||
let error_string = error.to_string();
|
||||
match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) {
|
||||
true => Ok(ControlFlow::Continue(())),
|
||||
false => Err(error.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
.instrument(tracing::info_span!(
|
||||
"Awaiting transaction receipt",
|
||||
?transaction_hash
|
||||
))
|
||||
.await
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn trace_transaction(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
trace_options: GethDebugTracingOptions,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
|
||||
Box::pin(async move {
|
||||
let provider = self
|
||||
.provider()
|
||||
.await
|
||||
.context("Failed to create provider for tracing")?;
|
||||
poll(
|
||||
Self::TRACE_POLLING_DURATION,
|
||||
PollingWaitBehavior::Constant(Duration::from_millis(200)),
|
||||
move || {
|
||||
let provider = provider.clone();
|
||||
let trace_options = trace_options.clone();
|
||||
async move {
|
||||
match provider
|
||||
.debug_trace_transaction(tx_hash, trace_options)
|
||||
.await
|
||||
{
|
||||
Ok(trace) => Ok(ControlFlow::Break(trace)),
|
||||
Err(error) => {
|
||||
let error_string = error.to_string();
|
||||
match error_string.contains(Self::TRANSACTION_TRACING_ERROR) {
|
||||
true => Ok(ControlFlow::Continue(())),
|
||||
false => Err(error.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
.await
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn state_diff(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<DiffMode>> + '_>> {
|
||||
Box::pin(async move {
|
||||
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
|
||||
diff_mode: Some(true),
|
||||
disable_code: None,
|
||||
disable_storage: None,
|
||||
});
|
||||
match self
|
||||
.trace_transaction(tx_hash, trace_options)
|
||||
.await
|
||||
.context("Failed to trace transaction for prestate diff")?
|
||||
.try_into_pre_state_frame()
|
||||
.context("Failed to convert trace into pre-state frame")?
|
||||
{
|
||||
PreStateFrame::Diff(diff) => Ok(diff),
|
||||
_ => anyhow::bail!("expected a diff mode trace"),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn balance_of(
|
||||
&self,
|
||||
address: Address,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to get the Geth provider")?
|
||||
.get_balance(address)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn latest_state_proof(
|
||||
&self,
|
||||
address: Address,
|
||||
keys: Vec<StorageKey>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<EIP1186AccountProofResponse>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to get the Geth provider")?
|
||||
.get_proof(address, keys)
|
||||
.latest()
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
})
|
||||
}
|
||||
|
||||
// #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn resolver(
|
||||
&self,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Arc<dyn ResolverApi + '_>>> + '_>> {
|
||||
Box::pin(async move {
|
||||
let id = self.id;
|
||||
let provider = self.provider().await?;
|
||||
Ok(Arc::new(GethNodeResolver { id, provider }) as Arc<dyn ResolverApi>)
|
||||
})
|
||||
}
|
||||
|
||||
fn evm_version(&self) -> EVMVersion {
|
||||
EVMVersion::Cancun
|
||||
}
|
||||
|
||||
fn subscribe_to_full_blocks_information(
|
||||
&self,
|
||||
) -> Pin<
|
||||
Box<
|
||||
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
|
||||
+ '_,
|
||||
>,
|
||||
> {
|
||||
Box::pin(async move {
|
||||
let provider = self
|
||||
.provider()
|
||||
.await
|
||||
.context("Failed to create the provider for block subscription")?;
|
||||
let block_subscription = provider.subscribe_full_blocks();
|
||||
let block_stream = block_subscription
|
||||
.into_stream()
|
||||
.await
|
||||
.context("Failed to create the block stream")?;
|
||||
|
||||
let mined_block_information_stream = block_stream.filter_map(|block| async {
|
||||
let block = block.ok()?;
|
||||
Some(MinedBlockInformation {
|
||||
block_number: block.number(),
|
||||
block_timestamp: block.header.timestamp,
|
||||
mined_gas: block.header.gas_used as _,
|
||||
block_gas_limit: block.header.gas_limit as _,
|
||||
transaction_hashes: block
|
||||
.transactions
|
||||
.into_hashes()
|
||||
.as_hashes()
|
||||
.expect("Must be hashes")
|
||||
.to_vec(),
|
||||
})
|
||||
});
|
||||
|
||||
Ok(Box::pin(mined_block_information_stream)
|
||||
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GethNodeResolver {
|
||||
id: u32,
|
||||
provider: ConcreteProvider<Ethereum, Arc<EthereumWallet>>,
|
||||
}
|
||||
|
||||
impl ResolverApi for GethNodeResolver {
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn chain_id(
|
||||
&self,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::primitives::ChainId>> + '_>> {
|
||||
Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) })
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn transaction_gas_price(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<u128>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_transaction_receipt(tx_hash)
|
||||
.await?
|
||||
.context("Failed to get the transaction receipt")
|
||||
.map(|receipt| receipt.effective_gas_price)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn block_gas_limit(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<u128>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the geth block")?
|
||||
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||
.map(|block| block.header.gas_limit as _)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn block_coinbase(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Address>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the geth block")?
|
||||
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||
.map(|block| block.header.beneficiary)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn block_difficulty(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the geth block")?
|
||||
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||
.map(|block| U256::from_be_bytes(block.header.mix_hash.0))
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn block_base_fee(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<u64>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the geth block")?
|
||||
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||
.and_then(|block| {
|
||||
block
|
||||
.header
|
||||
.base_fee_per_gas
|
||||
.context("Failed to get the base fee per gas")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn block_hash(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockHash>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the geth block")?
|
||||
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||
.map(|block| block.header.hash)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn block_timestamp(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockTimestamp>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the geth block")?
|
||||
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||
.map(|block| block.header.timestamp)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn last_block_number(&self) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockNumber>> + '_>> {
|
||||
Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) })
|
||||
}
|
||||
}
|
||||
|
||||
impl Node for GethNode {
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn shutdown(&mut self) -> anyhow::Result<()> {
|
||||
drop(self.handle.take());
|
||||
|
||||
// Remove the node's database so that subsequent runs do not run on the same database. We
|
||||
// ignore the error just in case the directory didn't exist in the first place and therefore
|
||||
// there's nothing to be deleted.
|
||||
let _ = remove_dir_all(self.base_directory.join(Self::DATA_DIRECTORY));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
|
||||
self.init(genesis)?.spawn_process()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn version(&self) -> anyhow::Result<String> {
|
||||
let output = Command::new(&self.geth)
|
||||
.arg("--version")
|
||||
.stdin(Stdio::null())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::null())
|
||||
.spawn()
|
||||
.context("Failed to spawn geth --version process")?
|
||||
.wait_with_output()
|
||||
.context("Failed to wait for geth --version output")?
|
||||
.stdout;
|
||||
Ok(String::from_utf8_lossy(&output).into())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for GethNode {
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn drop(&mut self) {
|
||||
self.shutdown().expect("Failed to shutdown")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use super::*;
|
||||
|
||||
fn test_config() -> TestExecutionContext {
|
||||
TestExecutionContext::default()
|
||||
}
|
||||
|
||||
fn new_node() -> (TestExecutionContext, GethNode) {
|
||||
let context = test_config();
|
||||
let mut node = GethNode::new(&context);
|
||||
node.init(context.genesis_configuration.genesis().unwrap().clone())
|
||||
.expect("Failed to initialize the node")
|
||||
.spawn_process()
|
||||
.expect("Failed to spawn the node process");
|
||||
(context, node)
|
||||
}
|
||||
|
||||
fn shared_state() -> &'static (TestExecutionContext, GethNode) {
|
||||
static STATE: LazyLock<(TestExecutionContext, GethNode)> = LazyLock::new(new_node);
|
||||
&STATE
|
||||
}
|
||||
|
||||
fn shared_node() -> &'static GethNode {
|
||||
&shared_state().1
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
|
||||
// Arrange
|
||||
let (context, node) = shared_state();
|
||||
|
||||
let account_address = context
|
||||
.wallet_configuration
|
||||
.wallet()
|
||||
.default_signer()
|
||||
.address();
|
||||
let transaction = TransactionRequest::default()
|
||||
.to(account_address)
|
||||
.value(U256::from(100_000_000_000_000u128));
|
||||
|
||||
// Act
|
||||
let receipt = node.execute_transaction(transaction).await;
|
||||
|
||||
// Assert
|
||||
let _ = receipt.expect("Failed to get the receipt for the transfer");
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore = "Ignored since they take a long time to run"]
|
||||
fn version_works() {
|
||||
// Arrange
|
||||
let node = shared_node();
|
||||
|
||||
// Act
|
||||
let version = node.version();
|
||||
|
||||
// Assert
|
||||
let version = version.expect("Failed to get the version");
|
||||
assert!(
|
||||
version.starts_with("geth version"),
|
||||
"expected version string, got: '{version}'"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "Ignored since they take a long time to run"]
|
||||
async fn can_get_chain_id_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node();
|
||||
|
||||
// Act
|
||||
let chain_id = node.resolver().await.unwrap().chain_id().await;
|
||||
|
||||
// Assert
|
||||
let chain_id = chain_id.expect("Failed to get the chain id");
|
||||
assert_eq!(chain_id, 420_420_420);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "Ignored since they take a long time to run"]
|
||||
async fn can_get_gas_limit_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node();
|
||||
|
||||
// Act
|
||||
let gas_limit = node
|
||||
.resolver()
|
||||
.await
|
||||
.unwrap()
|
||||
.block_gas_limit(BlockNumberOrTag::Latest)
|
||||
.await;
|
||||
|
||||
// Assert
|
||||
let _ = gas_limit.expect("Failed to get the gas limit");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "Ignored since they take a long time to run"]
|
||||
async fn can_get_coinbase_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node();
|
||||
|
||||
// Act
|
||||
let coinbase = node
|
||||
.resolver()
|
||||
.await
|
||||
.unwrap()
|
||||
.block_coinbase(BlockNumberOrTag::Latest)
|
||||
.await;
|
||||
|
||||
// Assert
|
||||
let _ = coinbase.expect("Failed to get the coinbase");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "Ignored since they take a long time to run"]
|
||||
async fn can_get_block_difficulty_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node();
|
||||
|
||||
// Act
|
||||
let block_difficulty = node
|
||||
.resolver()
|
||||
.await
|
||||
.unwrap()
|
||||
.block_difficulty(BlockNumberOrTag::Latest)
|
||||
.await;
|
||||
|
||||
// Assert
|
||||
let _ = block_difficulty.expect("Failed to get the block difficulty");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "Ignored since they take a long time to run"]
|
||||
async fn can_get_block_hash_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node();
|
||||
|
||||
// Act
|
||||
let block_hash = node
|
||||
.resolver()
|
||||
.await
|
||||
.unwrap()
|
||||
.block_hash(BlockNumberOrTag::Latest)
|
||||
.await;
|
||||
|
||||
// Assert
|
||||
let _ = block_hash.expect("Failed to get the block hash");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "Ignored since they take a long time to run"]
|
||||
async fn can_get_block_timestamp_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node();
|
||||
|
||||
// Act
|
||||
let block_timestamp = node
|
||||
.resolver()
|
||||
.await
|
||||
.unwrap()
|
||||
.block_timestamp(BlockNumberOrTag::Latest)
|
||||
.await;
|
||||
|
||||
// Assert
|
||||
let _ = block_timestamp.expect("Failed to get the block timestamp");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "Ignored since they take a long time to run"]
|
||||
async fn can_get_block_number_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node();
|
||||
|
||||
// Act
|
||||
let block_number = node.resolver().await.unwrap().last_block_number().await;
|
||||
|
||||
// Assert
|
||||
let _ = block_number.expect("Failed to get the block number");
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,4 @@
|
||||
pub mod geth;
|
||||
pub mod lighthouse_geth;
|
||||
pub mod substrate;
|
||||
pub mod zombienet;
|
||||
+602
-523
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,110 +0,0 @@
|
||||
//! This crate implements concurrent handling of testing node.
|
||||
|
||||
use std::{
|
||||
sync::atomic::{AtomicUsize, Ordering},
|
||||
thread,
|
||||
};
|
||||
|
||||
use alloy::genesis::Genesis;
|
||||
use anyhow::Context as _;
|
||||
use revive_dt_config::{
|
||||
ConcurrencyConfiguration, EthRpcConfiguration, GenesisConfiguration, GethConfiguration,
|
||||
KitchensinkConfiguration, ReviveDevNodeConfiguration, WalletConfiguration,
|
||||
WorkingDirectoryConfiguration,
|
||||
};
|
||||
use tracing::info;
|
||||
|
||||
use crate::Node;
|
||||
|
||||
/// The node pool starts one or more [Node] which then can be accessed
|
||||
/// in a round robbin fasion.
|
||||
pub struct NodePool<T> {
|
||||
next: AtomicUsize,
|
||||
nodes: Vec<T>,
|
||||
}
|
||||
|
||||
impl<T> NodePool<T>
|
||||
where
|
||||
T: Node + Send + 'static,
|
||||
{
|
||||
/// Create a new Pool. This will start as many nodes as there are workers in `config`.
|
||||
pub fn new(
|
||||
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||
+ AsRef<ConcurrencyConfiguration>
|
||||
+ AsRef<GenesisConfiguration>
|
||||
+ AsRef<WalletConfiguration>
|
||||
+ AsRef<GethConfiguration>
|
||||
+ AsRef<KitchensinkConfiguration>
|
||||
+ AsRef<ReviveDevNodeConfiguration>
|
||||
+ AsRef<EthRpcConfiguration>
|
||||
+ Send
|
||||
+ Sync
|
||||
+ Clone
|
||||
+ 'static,
|
||||
) -> anyhow::Result<Self> {
|
||||
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
|
||||
let nodes = concurrency_configuration.number_of_nodes;
|
||||
let genesis = genesis_configuration.genesis()?;
|
||||
|
||||
let mut handles = Vec::with_capacity(nodes);
|
||||
for _ in 0..nodes {
|
||||
let context = context.clone();
|
||||
let genesis = genesis.clone();
|
||||
handles.push(thread::spawn(move || spawn_node::<T>(context, genesis)));
|
||||
}
|
||||
|
||||
let mut nodes = Vec::with_capacity(nodes);
|
||||
for handle in handles {
|
||||
nodes.push(
|
||||
handle
|
||||
.join()
|
||||
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))
|
||||
.context("Failed to join node spawn thread")?
|
||||
.map_err(|error| anyhow::anyhow!("node failed to spawn: {error}"))
|
||||
.context("Node failed to spawn")?,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
nodes,
|
||||
next: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Get a handle to the next node.
|
||||
pub fn round_robbin(&self) -> &T {
|
||||
let current = self.next.fetch_add(1, Ordering::SeqCst) % self.nodes.len();
|
||||
self.nodes.get(current).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
fn spawn_node<T: Node + Send>(
|
||||
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||
+ AsRef<ConcurrencyConfiguration>
|
||||
+ AsRef<GenesisConfiguration>
|
||||
+ AsRef<WalletConfiguration>
|
||||
+ AsRef<GethConfiguration>
|
||||
+ AsRef<KitchensinkConfiguration>
|
||||
+ AsRef<ReviveDevNodeConfiguration>
|
||||
+ AsRef<EthRpcConfiguration>
|
||||
+ Clone
|
||||
+ 'static,
|
||||
genesis: Genesis,
|
||||
) -> anyhow::Result<T> {
|
||||
let mut node = T::new(context);
|
||||
info!(
|
||||
id = node.id(),
|
||||
connection_string = node.connection_string(),
|
||||
"Spawning node"
|
||||
);
|
||||
node.spawn(genesis)
|
||||
.context("Failed to spawn node process")?;
|
||||
info!(
|
||||
id = node.id(),
|
||||
connection_string = node.connection_string(),
|
||||
"Spawned node"
|
||||
);
|
||||
Ok(node)
|
||||
}
|
||||
@@ -0,0 +1,69 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use alloy::transports::BoxFuture;
|
||||
use tokio::sync::Semaphore;
|
||||
use tower::{Layer, Service};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ConcurrencyLimiterLayer {
|
||||
semaphore: Arc<Semaphore>,
|
||||
}
|
||||
|
||||
impl ConcurrencyLimiterLayer {
|
||||
pub fn new(permit_count: usize) -> Self {
|
||||
Self {
|
||||
semaphore: Arc::new(Semaphore::new(permit_count)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Layer<S> for ConcurrencyLimiterLayer {
|
||||
type Service = ConcurrencyLimiterService<S>;
|
||||
|
||||
fn layer(&self, inner: S) -> Self::Service {
|
||||
ConcurrencyLimiterService {
|
||||
service: inner,
|
||||
semaphore: self.semaphore.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ConcurrencyLimiterService<S> {
|
||||
service: S,
|
||||
semaphore: Arc<Semaphore>,
|
||||
}
|
||||
|
||||
impl<S, Request> Service<Request> for ConcurrencyLimiterService<S>
|
||||
where
|
||||
S: Service<Request> + Send,
|
||||
S::Future: Send + 'static,
|
||||
{
|
||||
type Response = S::Response;
|
||||
type Error = S::Error;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(
|
||||
&mut self,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Result<(), Self::Error>> {
|
||||
self.service.poll_ready(cx)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Request) -> Self::Future {
|
||||
let semaphore = self.semaphore.clone();
|
||||
let future = self.service.call(req);
|
||||
|
||||
Box::pin(async move {
|
||||
let _permit = semaphore
|
||||
.acquire()
|
||||
.await
|
||||
.expect("Semaphore has been closed");
|
||||
tracing::debug!(
|
||||
available_permits = semaphore.available_permits(),
|
||||
"Acquired Semaphore Permit"
|
||||
);
|
||||
future.await
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -7,6 +7,10 @@ use alloy::{
|
||||
transports::TransportResult,
|
||||
};
|
||||
|
||||
// Percentage padding applied to estimated gas (e.g. 120 = 20% padding)
|
||||
const GAS_ESTIMATE_PADDING_NUMERATOR: u64 = 120;
|
||||
const GAS_ESTIMATE_PADDING_DENOMINATOR: u64 = 100;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct FallbackGasFiller {
|
||||
inner: GasFiller,
|
||||
@@ -30,6 +34,12 @@ impl FallbackGasFiller {
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for FallbackGasFiller {
|
||||
fn default() -> Self {
|
||||
FallbackGasFiller::new(25_000_000, 1_000_000_000, 1_000_000_000)
|
||||
}
|
||||
}
|
||||
|
||||
impl<N> TxFiller<N> for FallbackGasFiller
|
||||
where
|
||||
N: Network,
|
||||
@@ -50,8 +60,6 @@ where
|
||||
provider: &P,
|
||||
tx: &<N as Network>::TransactionRequest,
|
||||
) -> TransportResult<Self::Fillable> {
|
||||
// Try to fetch GasFiller’s “fillable” (gas_price, base_fee, estimate_gas, …)
|
||||
// If it errors (i.e. tx would revert under eth_estimateGas), swallow it.
|
||||
match self.inner.prepare(provider, tx).await {
|
||||
Ok(fill) => Ok(Some(fill)),
|
||||
Err(_) => Ok(None),
|
||||
@@ -64,8 +72,17 @@ where
|
||||
mut tx: alloy::providers::SendableTx<N>,
|
||||
) -> TransportResult<SendableTx<N>> {
|
||||
if let Some(fill) = fillable {
|
||||
// our inner GasFiller succeeded — use it
|
||||
self.inner.fill(fill, tx).await
|
||||
let mut tx = self.inner.fill(fill, tx).await?;
|
||||
if let Some(builder) = tx.as_mut_builder() {
|
||||
if let Some(estimated) = builder.gas_limit() {
|
||||
let padded = estimated
|
||||
.checked_mul(GAS_ESTIMATE_PADDING_NUMERATOR)
|
||||
.and_then(|v| v.checked_div(GAS_ESTIMATE_PADDING_DENOMINATOR))
|
||||
.unwrap_or(u64::MAX);
|
||||
builder.set_gas_limit(padded);
|
||||
}
|
||||
}
|
||||
Ok(tx)
|
||||
} else {
|
||||
if let Some(builder) = tx.as_mut_builder() {
|
||||
builder.set_gas_limit(self.default_gas_limit);
|
||||
@@ -0,0 +1,7 @@
|
||||
mod concurrency_limiter;
|
||||
mod fallback_gas_filler;
|
||||
mod provider;
|
||||
|
||||
pub use concurrency_limiter::*;
|
||||
pub use fallback_gas_filler::*;
|
||||
pub use provider::*;
|
||||
@@ -0,0 +1,132 @@
|
||||
use std::{ops::ControlFlow, sync::LazyLock, time::Duration};
|
||||
|
||||
use alloy::{
|
||||
network::{Ethereum, Network, NetworkWallet, TransactionBuilder4844},
|
||||
providers::{
|
||||
Identity, PendingTransactionBuilder, Provider, ProviderBuilder, RootProvider,
|
||||
fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller},
|
||||
},
|
||||
rpc::client::ClientBuilder,
|
||||
};
|
||||
use anyhow::{Context, Result};
|
||||
use revive_dt_common::futures::{PollingWaitBehavior, poll};
|
||||
use tracing::{Instrument, debug, info, info_span};
|
||||
|
||||
use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller};
|
||||
|
||||
pub type ConcreteProvider<N, W> = FillProvider<
|
||||
JoinFill<
|
||||
JoinFill<JoinFill<JoinFill<Identity, FallbackGasFiller>, ChainIdFiller>, NonceFiller>,
|
||||
WalletFiller<W>,
|
||||
>,
|
||||
RootProvider<N>,
|
||||
N,
|
||||
>;
|
||||
|
||||
pub async fn construct_concurrency_limited_provider<N, W>(
|
||||
rpc_url: &str,
|
||||
fallback_gas_filler: FallbackGasFiller,
|
||||
chain_id_filler: ChainIdFiller,
|
||||
nonce_filler: NonceFiller,
|
||||
wallet: W,
|
||||
) -> Result<ConcreteProvider<N, W>>
|
||||
where
|
||||
N: Network<TransactionRequest: TransactionBuilder4844>,
|
||||
W: NetworkWallet<N>,
|
||||
Identity: TxFiller<N>,
|
||||
FallbackGasFiller: TxFiller<N>,
|
||||
ChainIdFiller: TxFiller<N>,
|
||||
NonceFiller: TxFiller<N>,
|
||||
WalletFiller<W>: TxFiller<N>,
|
||||
{
|
||||
// This is a global limit on the RPC concurrency that applies to all of the providers created
|
||||
// by the framework. With this limit, it means that we can have a maximum of N concurrent
|
||||
// requests at any point of time and no more than that. This is done in an effort to stabilize
|
||||
// the framework from some of the interment issues that we've been seeing related to RPC calls.
|
||||
static GLOBAL_CONCURRENCY_LIMITER_LAYER: LazyLock<ConcurrencyLimiterLayer> =
|
||||
LazyLock::new(|| ConcurrencyLimiterLayer::new(500));
|
||||
|
||||
let client = ClientBuilder::default()
|
||||
.layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone())
|
||||
.connect(rpc_url)
|
||||
.await
|
||||
.context("Failed to construct the RPC client")?;
|
||||
|
||||
let provider = ProviderBuilder::new()
|
||||
.disable_recommended_fillers()
|
||||
.network::<N>()
|
||||
.filler(fallback_gas_filler)
|
||||
.filler(chain_id_filler)
|
||||
.filler(nonce_filler)
|
||||
.wallet(wallet)
|
||||
.connect_client(client);
|
||||
|
||||
Ok(provider)
|
||||
}
|
||||
|
||||
pub async fn execute_transaction<N, W>(
|
||||
provider: ConcreteProvider<N, W>,
|
||||
transaction: N::TransactionRequest,
|
||||
) -> Result<N::ReceiptResponse>
|
||||
where
|
||||
N: Network<
|
||||
TransactionRequest: TransactionBuilder4844,
|
||||
TxEnvelope = <Ethereum as Network>::TxEnvelope,
|
||||
>,
|
||||
W: NetworkWallet<N>,
|
||||
Identity: TxFiller<N>,
|
||||
FallbackGasFiller: TxFiller<N>,
|
||||
ChainIdFiller: TxFiller<N>,
|
||||
NonceFiller: TxFiller<N>,
|
||||
WalletFiller<W>: TxFiller<N>,
|
||||
{
|
||||
let sendable_transaction = provider
|
||||
.fill(transaction)
|
||||
.await
|
||||
.context("Failed to fill transaction")?;
|
||||
|
||||
let transaction_envelope = sendable_transaction
|
||||
.try_into_envelope()
|
||||
.context("Failed to convert transaction into an envelope")?;
|
||||
let tx_hash = *transaction_envelope.tx_hash();
|
||||
|
||||
let mut pending_transaction = match provider.send_tx_envelope(transaction_envelope).await {
|
||||
Ok(pending_transaction) => pending_transaction,
|
||||
Err(error) => {
|
||||
let error_string = error.to_string();
|
||||
|
||||
if error_string.contains("Transaction Already Imported") {
|
||||
PendingTransactionBuilder::<N>::new(provider.root().clone(), tx_hash)
|
||||
} else {
|
||||
return Err(error).context(format!("Failed to submit transaction {tx_hash}"));
|
||||
}
|
||||
}
|
||||
};
|
||||
debug!(%tx_hash, "Submitted Transaction");
|
||||
|
||||
pending_transaction.set_timeout(Some(Duration::from_secs(120)));
|
||||
let tx_hash = pending_transaction.watch().await.context(format!(
|
||||
"Transaction inclusion watching timeout for {tx_hash}"
|
||||
))?;
|
||||
|
||||
poll(
|
||||
Duration::from_secs(60),
|
||||
PollingWaitBehavior::Constant(Duration::from_secs(3)),
|
||||
|| {
|
||||
let provider = provider.clone();
|
||||
|
||||
async move {
|
||||
match provider.get_transaction_receipt(tx_hash).await {
|
||||
Ok(Some(receipt)) => {
|
||||
info!("Found the transaction receipt");
|
||||
Ok(ControlFlow::Break(receipt))
|
||||
}
|
||||
_ => Ok(ControlFlow::Continue(())),
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
.instrument(info_span!("Polling for receipt", %tx_hash))
|
||||
.await
|
||||
.context(format!("Polling for receipt failed for {tx_hash}"))
|
||||
}
|
||||
@@ -13,7 +13,7 @@ revive-dt-config = { workspace = true }
|
||||
revive-dt-format = { workspace = true }
|
||||
revive-dt-compiler = { workspace = true }
|
||||
|
||||
alloy-primitives = { workspace = true }
|
||||
alloy = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
paste = { workspace = true }
|
||||
indexmap = { workspace = true, features = ["serde"] }
|
||||
|
||||
@@ -8,14 +8,15 @@ use std::{
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
|
||||
use alloy_primitives::Address;
|
||||
use alloy::primitives::Address;
|
||||
use anyhow::{Context as _, Result};
|
||||
use indexmap::IndexMap;
|
||||
use revive_dt_common::types::PlatformIdentifier;
|
||||
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
|
||||
use revive_dt_config::{Context, TestingPlatform};
|
||||
use revive_dt_config::Context;
|
||||
use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance};
|
||||
use semver::Version;
|
||||
use serde::Serialize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::{DisplayFromStr, serde_as};
|
||||
use tokio::sync::{
|
||||
broadcast::{Sender, channel},
|
||||
@@ -84,11 +85,8 @@ impl ReportAggregator {
|
||||
RunnerEvent::TestIgnored(event) => {
|
||||
self.handle_test_ignored_event(*event);
|
||||
}
|
||||
RunnerEvent::LeaderNodeAssigned(event) => {
|
||||
self.handle_leader_node_assigned_event(*event);
|
||||
}
|
||||
RunnerEvent::FollowerNodeAssigned(event) => {
|
||||
self.handle_follower_node_assigned_event(*event);
|
||||
RunnerEvent::NodeAssigned(event) => {
|
||||
self.handle_node_assigned_event(*event);
|
||||
}
|
||||
RunnerEvent::PreLinkContractsCompilationSucceeded(event) => {
|
||||
self.handle_pre_link_contracts_compilation_succeeded_event(*event)
|
||||
@@ -108,6 +106,10 @@ impl ReportAggregator {
|
||||
RunnerEvent::ContractDeployed(event) => {
|
||||
self.handle_contract_deployed_event(*event);
|
||||
}
|
||||
RunnerEvent::Completion(event) => {
|
||||
self.handle_completion(*event);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
debug!("Report aggregation completed");
|
||||
@@ -257,28 +259,15 @@ impl ReportAggregator {
|
||||
let _ = self.listener_tx.send(event);
|
||||
}
|
||||
|
||||
fn handle_leader_node_assigned_event(&mut self, event: LeaderNodeAssignedEvent) {
|
||||
fn handle_node_assigned_event(&mut self, event: NodeAssignedEvent) {
|
||||
let execution_information = self.execution_information(&ExecutionSpecifier {
|
||||
test_specifier: event.test_specifier,
|
||||
node_id: event.id,
|
||||
node_designation: NodeDesignation::Leader,
|
||||
platform_identifier: event.platform_identifier,
|
||||
});
|
||||
execution_information.node = Some(TestCaseNodeInformation {
|
||||
id: event.id,
|
||||
platform: event.platform,
|
||||
connection_string: event.connection_string,
|
||||
});
|
||||
}
|
||||
|
||||
fn handle_follower_node_assigned_event(&mut self, event: FollowerNodeAssignedEvent) {
|
||||
let execution_information = self.execution_information(&ExecutionSpecifier {
|
||||
test_specifier: event.test_specifier,
|
||||
node_id: event.id,
|
||||
node_designation: NodeDesignation::Follower,
|
||||
});
|
||||
execution_information.node = Some(TestCaseNodeInformation {
|
||||
id: event.id,
|
||||
platform: event.platform,
|
||||
platform_identifier: event.platform_identifier,
|
||||
connection_string: event.connection_string,
|
||||
});
|
||||
}
|
||||
@@ -397,6 +386,10 @@ impl ReportAggregator {
|
||||
.insert(event.contract_instance, event.address);
|
||||
}
|
||||
|
||||
fn handle_completion(&mut self, _: CompletionEvent) {
|
||||
self.runner_rx.close();
|
||||
}
|
||||
|
||||
fn test_case_report(&mut self, specifier: &TestSpecifier) -> &mut TestCaseReport {
|
||||
self.report
|
||||
.test_case_information
|
||||
@@ -413,19 +406,16 @@ impl ReportAggregator {
|
||||
specifier: &ExecutionSpecifier,
|
||||
) -> &mut ExecutionInformation {
|
||||
let test_case_report = self.test_case_report(&specifier.test_specifier);
|
||||
match specifier.node_designation {
|
||||
NodeDesignation::Leader => test_case_report
|
||||
.leader_execution_information
|
||||
.get_or_insert_default(),
|
||||
NodeDesignation::Follower => test_case_report
|
||||
.follower_execution_information
|
||||
.get_or_insert_default(),
|
||||
}
|
||||
test_case_report
|
||||
.platform_execution
|
||||
.entry(specifier.platform_identifier)
|
||||
.or_default()
|
||||
.get_or_insert_default()
|
||||
}
|
||||
}
|
||||
|
||||
#[serde_as]
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct Report {
|
||||
/// The context that the tool was started up with.
|
||||
pub context: Context,
|
||||
@@ -450,22 +440,18 @@ impl Report {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Default)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
||||
pub struct TestCaseReport {
|
||||
/// Information on the status of the test case and whether it succeeded, failed, or was ignored.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub status: Option<TestCaseStatus>,
|
||||
/// Information related to the execution on the leader.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub leader_execution_information: Option<ExecutionInformation>,
|
||||
/// Information related to the execution on the follower.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub follower_execution_information: Option<ExecutionInformation>,
|
||||
/// Information related to the execution on one of the platforms.
|
||||
pub platform_execution: BTreeMap<PlatformIdentifier, Option<ExecutionInformation>>,
|
||||
}
|
||||
|
||||
/// Information related to the status of the test. Could be that the test succeeded, failed, or that
|
||||
/// it was ignored.
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(tag = "status")]
|
||||
pub enum TestCaseStatus {
|
||||
/// The test case succeeded.
|
||||
@@ -488,19 +474,19 @@ pub enum TestCaseStatus {
|
||||
},
|
||||
}
|
||||
|
||||
/// Information related to the leader or follower node that's being used to execute the step.
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
/// Information related to the platform node that's being used to execute the step.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct TestCaseNodeInformation {
|
||||
/// The ID of the node that this case is being executed on.
|
||||
pub id: usize,
|
||||
/// The platform of the node.
|
||||
pub platform: TestingPlatform,
|
||||
pub platform_identifier: PlatformIdentifier,
|
||||
/// The connection string of the node.
|
||||
pub connection_string: String,
|
||||
}
|
||||
|
||||
/// Execution information tied to the leader or the follower.
|
||||
#[derive(Clone, Debug, Default, Serialize)]
|
||||
/// Execution information tied to the platform.
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct ExecutionInformation {
|
||||
/// Information related to the node assigned to this test case.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
@@ -520,7 +506,7 @@ pub struct ExecutionInformation {
|
||||
}
|
||||
|
||||
/// Information related to compilation
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(tag = "status")]
|
||||
pub enum CompilationStatus {
|
||||
/// The compilation was successful.
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
use revive_dt_common::define_wrapper_type;
|
||||
use revive_dt_common::{define_wrapper_type, types::PlatformIdentifier};
|
||||
use revive_dt_compiler::Mode;
|
||||
use revive_dt_format::{case::CaseIdx, input::StepIdx};
|
||||
use revive_dt_format::{case::CaseIdx, steps::StepPath};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
define_wrapper_type!(
|
||||
@@ -22,22 +22,16 @@ pub struct TestSpecifier {
|
||||
}
|
||||
|
||||
/// An absolute path for a test that also includes information about the node that it's assigned to
|
||||
/// and whether it's the leader or follower.
|
||||
/// and what platform it belongs to.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct ExecutionSpecifier {
|
||||
pub test_specifier: Arc<TestSpecifier>,
|
||||
pub node_id: usize,
|
||||
pub node_designation: NodeDesignation,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub enum NodeDesignation {
|
||||
Leader,
|
||||
Follower,
|
||||
pub platform_identifier: PlatformIdentifier,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct StepExecutionSpecifier {
|
||||
pub execution_specifier: Arc<ExecutionSpecifier>,
|
||||
pub step_idx: StepIdx,
|
||||
pub step_idx: StepPath,
|
||||
}
|
||||
|
||||
@@ -3,11 +3,11 @@
|
||||
|
||||
use std::{collections::BTreeMap, path::PathBuf, sync::Arc};
|
||||
|
||||
use alloy_primitives::Address;
|
||||
use alloy::primitives::Address;
|
||||
use anyhow::Context as _;
|
||||
use indexmap::IndexMap;
|
||||
use revive_dt_common::types::PlatformIdentifier;
|
||||
use revive_dt_compiler::{CompilerInput, CompilerOutput};
|
||||
use revive_dt_config::TestingPlatform;
|
||||
use revive_dt_format::metadata::Metadata;
|
||||
use revive_dt_format::{corpus::Corpus, metadata::ContractInstance};
|
||||
use semver::Version;
|
||||
@@ -412,14 +412,14 @@ macro_rules! define_event {
|
||||
pub fn execution_specific_reporter(
|
||||
&self,
|
||||
node_id: impl Into<usize>,
|
||||
node_designation: impl Into<$crate::common::NodeDesignation>
|
||||
platform_identifier: impl Into<PlatformIdentifier>
|
||||
) -> [< $ident ExecutionSpecificReporter >] {
|
||||
[< $ident ExecutionSpecificReporter >] {
|
||||
reporter: self.reporter.clone(),
|
||||
execution_specifier: Arc::new($crate::common::ExecutionSpecifier {
|
||||
test_specifier: self.test_specifier.clone(),
|
||||
node_id: node_id.into(),
|
||||
node_designation: node_designation.into(),
|
||||
platform_identifier: platform_identifier.into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -434,7 +434,7 @@ macro_rules! define_event {
|
||||
}
|
||||
|
||||
/// A reporter that's tied to a specific execution of the test case such as execution on
|
||||
/// a specific node like the leader or follower.
|
||||
/// a specific node from a specific platform.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct [< $ident ExecutionSpecificReporter >] {
|
||||
$vis reporter: [< $ident Reporter >],
|
||||
@@ -520,25 +520,14 @@ define_event! {
|
||||
/// A reason for the failure of the test.
|
||||
reason: String,
|
||||
},
|
||||
/// An event emitted when the test case is assigned a leader node.
|
||||
LeaderNodeAssigned {
|
||||
/// An event emitted when the test case is assigned a platform node.
|
||||
NodeAssigned {
|
||||
/// A specifier for the test that the assignment is for.
|
||||
test_specifier: Arc<TestSpecifier>,
|
||||
/// The ID of the node that this case is being executed on.
|
||||
id: usize,
|
||||
/// The platform of the node.
|
||||
platform: TestingPlatform,
|
||||
/// The connection string of the node.
|
||||
connection_string: String,
|
||||
},
|
||||
/// An event emitted when the test case is assigned a follower node.
|
||||
FollowerNodeAssigned {
|
||||
/// A specifier for the test that the assignment is for.
|
||||
test_specifier: Arc<TestSpecifier>,
|
||||
/// The ID of the node that this case is being executed on.
|
||||
id: usize,
|
||||
/// The platform of the node.
|
||||
platform: TestingPlatform,
|
||||
/// The identifier of the platform used.
|
||||
platform_identifier: PlatformIdentifier,
|
||||
/// The connection string of the node.
|
||||
connection_string: String,
|
||||
},
|
||||
@@ -624,6 +613,8 @@ define_event! {
|
||||
/// The address of the contract.
|
||||
address: Address
|
||||
},
|
||||
/// Reports the completion of the run.
|
||||
Completion {}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
+11
-5
@@ -75,7 +75,9 @@ ABSOLUTE_PATH=$(realpath "$TEST_REPO_DIR/fixtures/solidity/")
|
||||
cat > "$CORPUS_FILE" << EOF
|
||||
{
|
||||
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
|
||||
"path": "$ABSOLUTE_PATH"
|
||||
"paths": [
|
||||
"$(realpath "$TEST_REPO_DIR/fixtures/solidity/simple")"
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
@@ -89,14 +91,18 @@ echo "This may take a while..."
|
||||
echo ""
|
||||
|
||||
# Run the tool
|
||||
RUST_LOG="error" cargo run --release -- execute-tests \
|
||||
cargo build --release;
|
||||
RUST_LOG="info,alloy_pubsub::service=error" ./target/release/retester test \
|
||||
--platform geth-evm-solc \
|
||||
--corpus "$CORPUS_FILE" \
|
||||
--working-directory "$WORKDIR" \
|
||||
--concurrency.number-of-nodes 5 \
|
||||
--concurrency.number-of-nodes 10 \
|
||||
--concurrency.number-of-threads 5 \
|
||||
--concurrency.ignore-concurrency-limit \
|
||||
--wallet.additional-keys 100000 \
|
||||
--kitchensink.path "$SUBSTRATE_NODE_BIN" \
|
||||
--revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
|
||||
--eth-rpc.path "$ETH_RPC_BIN" \
|
||||
> logs.log \
|
||||
2> output.log
|
||||
> logs.log
|
||||
|
||||
echo -e "${GREEN}=== Test run completed! ===${NC}"
|
||||
+98
-12
@@ -25,7 +25,7 @@
|
||||
"null"
|
||||
],
|
||||
"items": {
|
||||
"type": "string"
|
||||
"$ref": "#/$defs/VmIdentifier"
|
||||
}
|
||||
},
|
||||
"cases": {
|
||||
@@ -95,6 +95,26 @@
|
||||
"cases"
|
||||
],
|
||||
"$defs": {
|
||||
"VmIdentifier": {
|
||||
"description": "An enum representing the identifiers of the supported VMs.",
|
||||
"oneOf": [
|
||||
{
|
||||
"description": "The ethereum virtual machine.",
|
||||
"type": "string",
|
||||
"const": "evm"
|
||||
},
|
||||
{
|
||||
"description": "The EraVM virtual machine.",
|
||||
"type": "string",
|
||||
"const": "eravm"
|
||||
},
|
||||
{
|
||||
"description": "Polkadot's PolaVM Risc-v based virtual machine.",
|
||||
"type": "string",
|
||||
"const": "polkavm"
|
||||
}
|
||||
]
|
||||
},
|
||||
"Case": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -168,19 +188,27 @@
|
||||
"anyOf": [
|
||||
{
|
||||
"description": "A function call or an invocation to some function on some smart contract.",
|
||||
"$ref": "#/$defs/Input"
|
||||
"$ref": "#/$defs/FunctionCallStep"
|
||||
},
|
||||
{
|
||||
"description": "A step for performing a balance assertion on some account or contract.",
|
||||
"$ref": "#/$defs/BalanceAssertion"
|
||||
"$ref": "#/$defs/BalanceAssertionStep"
|
||||
},
|
||||
{
|
||||
"description": "A step for asserting that the storage of some contract or account is empty.",
|
||||
"$ref": "#/$defs/StorageEmptyAssertion"
|
||||
"$ref": "#/$defs/StorageEmptyAssertionStep"
|
||||
},
|
||||
{
|
||||
"description": "A special step for repeating a bunch of steps a certain number of times.",
|
||||
"$ref": "#/$defs/RepeatStep"
|
||||
},
|
||||
{
|
||||
"description": "A step type that allows for a new account address to be allocated and to later on be used\nas the caller in another step.",
|
||||
"$ref": "#/$defs/AllocateAccountStep"
|
||||
}
|
||||
]
|
||||
},
|
||||
"Input": {
|
||||
"FunctionCallStep": {
|
||||
"description": "This is an input step which is a transaction description that the framework translates into a\ntransaction and executes on the nodes.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -353,9 +381,13 @@
|
||||
"properties": {
|
||||
"address": {
|
||||
"description": "An optional field of the address of the emitter of the event.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/$defs/StepAddress"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"topics": {
|
||||
@@ -375,6 +407,10 @@
|
||||
"values"
|
||||
]
|
||||
},
|
||||
"StepAddress": {
|
||||
"description": "An address type that might either be an address literal or a resolvable address.",
|
||||
"type": "string"
|
||||
},
|
||||
"EtherValue": {
|
||||
"description": "Defines an Ether value.\n\nThis is an unsigned 256 bit integer that's followed by some denomination which can either be\neth, ether, gwei, or wei.",
|
||||
"type": "string"
|
||||
@@ -394,7 +430,7 @@
|
||||
"return_data"
|
||||
]
|
||||
},
|
||||
"BalanceAssertion": {
|
||||
"BalanceAssertionStep": {
|
||||
"description": "This represents a balance assertion step where the framework needs to query the balance of some\naccount or contract and assert that it's some amount.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -407,7 +443,7 @@
|
||||
},
|
||||
"address": {
|
||||
"description": "The address that the balance assertion should be done on.\n\nThis is a string which will be resolved into an address when being processed. Therefore,\nthis could be a normal hex address, a variable such as `Test.address`, or perhaps even a\nfull on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are\nfollowed in the calldata.",
|
||||
"type": "string"
|
||||
"$ref": "#/$defs/StepAddress"
|
||||
},
|
||||
"expected_balance": {
|
||||
"description": "The amount of balance to assert that the account or contract has. This is a 256 bit string\nthat's serialized and deserialized into a decimal string.",
|
||||
@@ -419,7 +455,8 @@
|
||||
"expected_balance"
|
||||
]
|
||||
},
|
||||
"StorageEmptyAssertion": {
|
||||
"StorageEmptyAssertionStep": {
|
||||
"description": "This represents an assertion for the storage of some contract or account and whether it's empty\nor not.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"comment": {
|
||||
@@ -431,7 +468,7 @@
|
||||
},
|
||||
"address": {
|
||||
"description": "The address that the balance assertion should be done on.\n\nThis is a string which will be resolved into an address when being processed. Therefore,\nthis could be a normal hex address, a variable such as `Test.address`, or perhaps even a\nfull on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are\nfollowed in the calldata.",
|
||||
"type": "string"
|
||||
"$ref": "#/$defs/StepAddress"
|
||||
},
|
||||
"is_storage_empty": {
|
||||
"description": "A boolean of whether the storage of the address is empty or not.",
|
||||
@@ -443,6 +480,55 @@
|
||||
"is_storage_empty"
|
||||
]
|
||||
},
|
||||
"RepeatStep": {
|
||||
"description": "This represents a repetition step which is a special step type that allows for a sequence of\nsteps to be repeated (on different drivers) a certain number of times.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"comment": {
|
||||
"description": "An optional comment on the repetition step.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"repeat": {
|
||||
"description": "The number of repetitions that the steps should be repeated for.",
|
||||
"type": "integer",
|
||||
"format": "uint",
|
||||
"minimum": 0
|
||||
},
|
||||
"steps": {
|
||||
"description": "The sequence of steps to repeat for the above defined number of repetitions.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/$defs/Step"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"repeat",
|
||||
"steps"
|
||||
]
|
||||
},
|
||||
"AllocateAccountStep": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"comment": {
|
||||
"description": "An optional comment on the account allocation step.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"allocate_account": {
|
||||
"description": "An instruction to allocate a new account with the value being the variable name of that\naccount. This must start with `$VARIABLE:` and then be followed by the variable name of the\naccount.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"allocate_account"
|
||||
]
|
||||
},
|
||||
"ContractPathAndIdent": {
|
||||
"description": "Represents an identifier used for contracts.\n\nThe type supports serialization from and into the following string format:\n\n```text\n${path}:${contract_ident}\n```",
|
||||
"type": "string"
|
||||
|
||||
Reference in New Issue
Block a user