mirror of
https://github.com/pezkuwichain/revive-differential-tests.git
synced 2026-04-22 09:08:02 +00:00
Compare commits
97 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 93f6e5efdf | |||
| ecd6252666 | |||
| 1d1642887b | |||
| db0522aa71 | |||
| ca5cad8e43 | |||
| 33911b5ce3 | |||
| 5b730d914e | |||
| 9f7a314b20 | |||
| 5cd3dd8c83 | |||
| 8d15f87ff0 | |||
| 566dd06d9a | |||
| 5c30e8a5bf | |||
| c2ba2cfed6 | |||
| 3dda739cef | |||
| 97e3f8bbff | |||
| 7189361a58 | |||
| 9b700bfec2 | |||
| 98b62d705f | |||
| 1a894f791a | |||
| c2526e48e7 | |||
| 7878f68c26 | |||
| 8b1afc36a3 | |||
| 60328cd493 | |||
| eb264fcc7b | |||
| 84b139d3b4 | |||
| d93824d973 | |||
| bec5a7e390 | |||
| 85033cfead | |||
| 76d6a154c1 | |||
| c58551803d | |||
| 185edcfad9 | |||
| 09d56f5177 | |||
| a59e287fa1 | |||
| f2045db0e9 | |||
| 5a11f44673 | |||
| 46aea0890d | |||
| 9b40c9b9e3 | |||
| f67a9bf643 | |||
| 67d767ffde | |||
| f7fbe094ec | |||
| 90b2dd4cfe | |||
| 64d63ef999 | |||
| 757bfbe116 | |||
| 8619e7feb0 | |||
| edba49b301 | |||
| 9980926d40 | |||
| ff993d44a5 | |||
| 8cbb1a9f77 | |||
| 56c2fe8c0c | |||
| 330a773a1c | |||
| f51693cb9f | |||
| 4db7009640 | |||
| 5a36e242ec | |||
| 33329632b5 | |||
| 429f2e92a2 | |||
| 65f41f2038 | |||
| 3ed8a1ca1c | |||
| 2923d675cd | |||
| 8f5bcf08ad | |||
| 90fb89adc0 | |||
| b03ad3027e | |||
| 972f3b6d5b | |||
| 6f4aa731ab | |||
| 589a5dc988 | |||
| c6d55515be | |||
| a9970eb2bb | |||
| 2259942363 | |||
| 0b97d7dc29 | |||
| 2bee2d5c8b | |||
| 854e8d9690 | |||
| 2d517784dd | |||
| baa11ad28f | |||
| c2e65f9e33 | |||
| 14888f9767 | |||
| 3e99d1c2a5 | |||
| 4e234aa1bd | |||
| b204de5484 | |||
| 5eb3a0e1b5 | |||
| 772bd217c3 | |||
| 0513a4befb | |||
| de7c7d6703 | |||
| 3a537c2812 | |||
| 4ab79ed97e | |||
| ee97b62e70 | |||
| e9b5a06aec | |||
| 534170db6f | |||
| 090b56c46a | |||
| 547563e718 | |||
| c8eb8cf7b0 | |||
| 3b26e1e1d6 | |||
| 1bc20d088f | |||
| 10bfaed461 | |||
| 399f7820cd | |||
| ae1174febe | |||
| 38b42560ec | |||
| 8009f5880c | |||
| c590fa7bfd |
@@ -0,0 +1,191 @@
|
||||
name: Test workflow
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
types: [opened, synchronize]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
cache-polkadot:
|
||||
name: Build and cache Polkadot binaries on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-24.04, macos-14]
|
||||
|
||||
steps:
|
||||
- name: Checkout repo and submodules
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Install dependencies (Linux)
|
||||
if: matrix.os == 'ubuntu-24.04'
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y protobuf-compiler clang libclang-dev
|
||||
rustup target add wasm32-unknown-unknown
|
||||
rustup component add rust-src
|
||||
|
||||
- name: Install dependencies (macOS)
|
||||
if: matrix.os == 'macos-14'
|
||||
run: |
|
||||
brew install protobuf
|
||||
rustup target add wasm32-unknown-unknown
|
||||
rustup component add rust-src
|
||||
|
||||
- name: Cache binaries
|
||||
id: cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/substrate-node
|
||||
~/.cargo/bin/eth-rpc
|
||||
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}
|
||||
|
||||
- name: Build substrate-node
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
cd polkadot-sdk
|
||||
cargo install --locked --force --profile=production --path substrate/bin/node/cli --bin substrate-node --features cli
|
||||
|
||||
- name: Build eth-rpc
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
cd polkadot-sdk
|
||||
cargo install --path substrate/frame/revive/rpc --bin eth-rpc
|
||||
|
||||
ci:
|
||||
name: CI on ${{ matrix.os }}
|
||||
needs: cache-polkadot
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-24.04, macos-14]
|
||||
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Restore binaries from cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/substrate-node
|
||||
~/.cargo/bin/eth-rpc
|
||||
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}
|
||||
|
||||
- name: Setup Rust toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
rustflags: ""
|
||||
|
||||
- name: Add wasm32 target and formatting
|
||||
run: |
|
||||
rustup target add wasm32-unknown-unknown
|
||||
rustup component add rust-src rustfmt clippy
|
||||
|
||||
- name: Install Geth on Ubuntu
|
||||
if: matrix.os == 'ubuntu-24.04'
|
||||
run: |
|
||||
sudo add-apt-repository -y ppa:ethereum/ethereum
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y protobuf-compiler
|
||||
|
||||
sudo apt-get install -y solc
|
||||
|
||||
# We were facing some issues in CI with the 1.16.* versions of geth, and specifically on
|
||||
# Ubuntu. Eventually, we found out that the last version of geth that worked in our CI was
|
||||
# version 1.15.11. Thus, this is the version that we want to use in CI. The PPA sadly does
|
||||
# not have historic versions of Geth and therefore we need to resort to downloading pre
|
||||
# built binaries for Geth and the surrounding tools which is what the following parts of
|
||||
# the script do.
|
||||
|
||||
sudo apt-get install -y wget ca-certificates tar
|
||||
ARCH=$(uname -m)
|
||||
if [ "$ARCH" = "x86_64" ]; then
|
||||
URL="https://gethstore.blob.core.windows.net/builds/geth-alltools-linux-amd64-1.15.11-36b2371c.tar.gz"
|
||||
elif [ "$ARCH" = "aarch64" ]; then
|
||||
URL="https://gethstore.blob.core.windows.net/builds/geth-alltools-linux-arm64-1.15.11-36b2371c.tar.gz"
|
||||
else
|
||||
echo "Unsupported architecture: $ARCH"
|
||||
exit 1
|
||||
fi
|
||||
wget -qO- "$URL" | sudo tar xz -C /usr/local/bin --strip-components=1
|
||||
geth --version
|
||||
|
||||
curl -sL https://github.com/paritytech/revive/releases/download/v0.3.0/resolc-x86_64-unknown-linux-musl -o resolc
|
||||
chmod +x resolc
|
||||
sudo mv resolc /usr/local/bin
|
||||
|
||||
- name: Install Geth on macOS
|
||||
if: matrix.os == 'macos-14'
|
||||
run: |
|
||||
brew tap ethereum/ethereum
|
||||
brew install ethereum protobuf
|
||||
|
||||
brew install solidity
|
||||
|
||||
curl -sL https://github.com/paritytech/revive/releases/download/v0.3.0/resolc-universal-apple-darwin -o resolc
|
||||
chmod +x resolc
|
||||
sudo mv resolc /usr/local/bin
|
||||
|
||||
- name: Install Kurtosis on macOS
|
||||
if: matrix.os == 'macos-14'
|
||||
run: brew install kurtosis-tech/tap/kurtosis-cli
|
||||
|
||||
- name: Install Kurtosis on Ubuntu
|
||||
if: matrix.os == 'ubuntu-24.04'
|
||||
run: |
|
||||
echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list
|
||||
sudo apt update
|
||||
sudo apt install kurtosis-cli
|
||||
|
||||
- name: Machete
|
||||
uses: bnjbvr/cargo-machete@v0.7.1
|
||||
|
||||
- name: Format
|
||||
run: make format
|
||||
|
||||
- name: Clippy
|
||||
run: make clippy
|
||||
|
||||
- name: Check substrate-node version
|
||||
run: substrate-node --version
|
||||
|
||||
- name: Check eth-rpc version
|
||||
run: eth-rpc --version
|
||||
|
||||
- name: Check resolc version
|
||||
run: resolc --version
|
||||
|
||||
- name: Test Formatting
|
||||
run: make format
|
||||
|
||||
- name: Test Clippy
|
||||
run: make clippy
|
||||
|
||||
- name: Test Machete
|
||||
run: make machete
|
||||
|
||||
- name: Unit Tests
|
||||
if: matrix.os == 'ubuntu-24.04'
|
||||
run: cargo test --workspace -- --nocapture
|
||||
|
||||
# We can't install docker in the MacOS image used in CI and therefore we need to skip the
|
||||
# Kurtosis and lighthouse related tests when running the CI on MacOS.
|
||||
- name: Unit Tests
|
||||
if: matrix.os == 'macos-14'
|
||||
run: |
|
||||
cargo test --workspace -- --nocapture --skip lighthouse_geth::tests::
|
||||
+16
@@ -0,0 +1,16 @@
|
||||
/target
|
||||
.vscode/
|
||||
.DS_Store
|
||||
node_modules
|
||||
/*.json
|
||||
|
||||
# We do not want to commit any log files that we produce from running the code locally so this is
|
||||
# added to the .gitignore file.
|
||||
*.log
|
||||
|
||||
profile.json.gz
|
||||
resolc-compiler-tests
|
||||
workdir
|
||||
|
||||
!/schema.json
|
||||
!/dev-genesis.json
|
||||
@@ -0,0 +1,3 @@
|
||||
[submodule "polkadot-sdk"]
|
||||
path = polkadot-sdk
|
||||
url = https://github.com/paritytech/polkadot-sdk.git
|
||||
Generated
+7271
File diff suppressed because it is too large
Load Diff
+101
@@ -0,0 +1,101 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = ["crates/*"]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.1.0"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
license = "MIT/Apache-2.0"
|
||||
edition = "2024"
|
||||
repository = "https://github.com/paritytech/revive-differential-testing.git"
|
||||
rust-version = "1.87.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
revive-dt-common = { version = "0.1.0", path = "crates/common" }
|
||||
revive-dt-compiler = { version = "0.1.0", path = "crates/compiler" }
|
||||
revive-dt-config = { version = "0.1.0", path = "crates/config" }
|
||||
revive-dt-core = { version = "0.1.0", path = "crates/core" }
|
||||
revive-dt-format = { version = "0.1.0", path = "crates/format" }
|
||||
revive-dt-node = { version = "0.1.0", path = "crates/node" }
|
||||
revive-dt-node-interaction = { version = "0.1.0", path = "crates/node-interaction" }
|
||||
revive-dt-node-pool = { version = "0.1.0", path = "crates/node-pool" }
|
||||
revive-dt-report = { version = "0.1.0", path = "crates/report" }
|
||||
revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
|
||||
|
||||
alloy-primitives = "1.2.1"
|
||||
alloy-sol-types = "1.2.1"
|
||||
anyhow = "1.0"
|
||||
bson = { version = "2.15.0" }
|
||||
cacache = { version = "13.1.0" }
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
dashmap = { version = "6.1.0" }
|
||||
foundry-compilers-artifacts = { version = "0.18.0" }
|
||||
futures = { version = "0.3.31" }
|
||||
hex = "0.4.3"
|
||||
regex = "1"
|
||||
moka = "0.12.10"
|
||||
paste = "1.0.15"
|
||||
reqwest = { version = "0.12.15", features = ["json"] }
|
||||
once_cell = "1.21"
|
||||
schemars = { version = "1.0.4", features = ["semver1"] }
|
||||
semver = { version = "1.0", features = ["serde"] }
|
||||
serde = { version = "1.0", default-features = false, features = ["derive"] }
|
||||
serde_json = { version = "1.0", default-features = false, features = [
|
||||
"arbitrary_precision",
|
||||
"std",
|
||||
"unbounded_depth",
|
||||
] }
|
||||
serde_with = { version = "3.14.0", features = ["hex"] }
|
||||
serde_yaml_ng = { version = "0.10.0" }
|
||||
sha2 = { version = "0.10.9" }
|
||||
sp-core = "36.1.0"
|
||||
sp-runtime = "41.1.0"
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
temp-dir = { version = "0.1.16" }
|
||||
tempfile = "3.3"
|
||||
thiserror = "2"
|
||||
tokio = { version = "1.47.0", default-features = false, features = [
|
||||
"rt-multi-thread",
|
||||
"process",
|
||||
"rt",
|
||||
] }
|
||||
uuid = { version = "1.8", features = ["v4"] }
|
||||
tracing = { version = "0.1.41" }
|
||||
tracing-appender = { version = "0.2.3" }
|
||||
tracing-subscriber = { version = "0.3.19", default-features = false, features = [
|
||||
"fmt",
|
||||
"json",
|
||||
"env-filter",
|
||||
] }
|
||||
indexmap = { version = "2.10.0", default-features = false }
|
||||
|
||||
# revive compiler
|
||||
revive-solc-json-interface = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
||||
revive-common = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
||||
revive-differential = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
||||
|
||||
[workspace.dependencies.alloy]
|
||||
version = "1.0.22"
|
||||
default-features = false
|
||||
features = [
|
||||
"json-abi",
|
||||
"providers",
|
||||
"provider-ipc",
|
||||
"provider-ws",
|
||||
"provider-debug-api",
|
||||
"reqwest",
|
||||
"rpc-types",
|
||||
"signer-local",
|
||||
"std",
|
||||
"network",
|
||||
"serde",
|
||||
"rpc-types-eth",
|
||||
"genesis",
|
||||
]
|
||||
|
||||
[profile.bench]
|
||||
inherits = "release"
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
|
||||
[workspace.lints.clippy]
|
||||
@@ -0,0 +1,15 @@
|
||||
.PHONY: format clippy test machete
|
||||
|
||||
format:
|
||||
cargo fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
cargo clippy --all-features --workspace -- --deny warnings
|
||||
|
||||
machete:
|
||||
cargo install cargo-machete
|
||||
cargo machete crates
|
||||
|
||||
test: format clippy machete
|
||||
cargo test --workspace -- --nocapture
|
||||
|
||||
@@ -1,2 +1,243 @@
|
||||
# revive-differential-tests
|
||||
revive differential testing framework
|
||||
<div align="center">
|
||||
<h1><code>Revive Differential Tests</code></h1>
|
||||
|
||||
<p>
|
||||
<strong>Differential testing for Ethereum-compatible smart contract stacks</strong>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
This project compiles and executes declarative smart-contract tests against multiple platforms, then compares behavior (status, return data, events, and state diffs). Today it supports:
|
||||
|
||||
- Geth (EVM reference implementation)
|
||||
- Revive Kitchensink (Substrate-based PolkaVM + `eth-rpc` proxy)
|
||||
|
||||
Use it to:
|
||||
|
||||
- Detect observable differences between platforms (execution success, logs, state changes)
|
||||
- Ensure reproducible builds across compilers/hosts
|
||||
- Run end-to-end regression suites
|
||||
|
||||
This framework uses the [MatterLabs tests format](https://github.com/matter-labs/era-compiler-tests/tree/main/solidity) for declarative tests which is composed of the following:
|
||||
|
||||
- Metadata files, this is akin to a module of tests in Rust.
|
||||
- Each metadata file contains multiple cases, a case is akin to a Rust test where a module can contain multiple tests.
|
||||
- Each case contains multiple steps and assertions, this is akin to any Rust test that contains multiple statements.
|
||||
|
||||
Metadata files are JSON files, but Solidity files can also be metadata files if they include inline metadata provided as a comment at the top of the contract.
|
||||
|
||||
All of the steps contained within each test case are either:
|
||||
|
||||
- Transactions that need to be submitted and assertions to run on the submitted transactions.
|
||||
- Assertions on the state of the chain (e.g., account balances, storage, etc...)
|
||||
|
||||
All of the transactions submitted by the this tool to the test nodes follow a similar logic to what wallets do. We first use alloy to estimate the transaction fees, then we attach that to the transaction and submit it to the node and then await the transaction receipt.
|
||||
|
||||
This repository contains none of the tests and only contains the testing framework or the test runner. The tests can be found in the [`resolc-compiler-tests`](https://github.com/paritytech/resolc-compiler-tests) repository which is a clone of [MatterLab's test suite](https://github.com/matter-labs/era-compiler-tests) with some modifications and adjustments made to suit our use case.
|
||||
|
||||
## Requirements
|
||||
|
||||
This section describes the required dependencies that this framework requires to run. Compiling this framework is pretty straightforward and no additional dependencies beyond what's specified in the `Cargo.toml` file should be required.
|
||||
|
||||
- Stable Rust
|
||||
- Geth - When doing differential testing against the PVM we submit transactions to a Geth node and to Kitchensink to compare them.
|
||||
- Kitchensink - When doing differential testing against the PVM we submit transactions to a Geth node and to Kitchensink to compare them.
|
||||
- ETH-RPC - All communication with Kitchensink is done through the ETH RPC.
|
||||
- Solc - This is actually a transitive dependency, while this tool doesn't require solc as it downloads the versions that it requires, resolc requires that Solc is installed and available in the path.
|
||||
- Resolc - This is required to compile the contracts to PolkaVM bytecode.
|
||||
- Kurtosis - The Kurtosis CLI tool is required for the production Ethereum mainnet-like node configuration with Geth as the execution layer and lighthouse as the consensus layer. Kurtosis also requires docker to be installed since it runs everything inside of docker containers.
|
||||
|
||||
All of the above need to be installed and available in the path in order for the tool to work.
|
||||
|
||||
## Running The Tool
|
||||
|
||||
This tool is being updated quite frequently. Therefore, it's recommended that you don't install the tool and then run it, but rather that you run it from the root of the directory using `cargo run --release`. The help command of the tool gives you all of the information you need to know about each of the options and flags that the tool offers.
|
||||
|
||||
```bash
|
||||
$ cargo run --release -- execute-tests --help
|
||||
Error: Executes tests in the MatterLabs format differentially on multiple targets concurrently
|
||||
|
||||
Usage: retester execute-tests [OPTIONS]
|
||||
|
||||
Options:
|
||||
-w, --working-directory <WORKING_DIRECTORY>
|
||||
The working directory that the program will use for all of the temporary artifacts needed at runtime.
|
||||
|
||||
If not specified, then a temporary directory will be created and used by the program for all temporary artifacts.
|
||||
|
||||
[default: ]
|
||||
|
||||
-p, --platform <PLATFORMS>
|
||||
The set of platforms that the differential tests should run on
|
||||
|
||||
[default: geth-evm-solc,revive-dev-node-polkavm-resolc]
|
||||
|
||||
Possible values:
|
||||
- geth-evm-solc: The Go-ethereum reference full node EVM implementation with the solc compiler
|
||||
- kitchensink-polkavm-resolc: The kitchensink node with the PolkaVM backend with the resolc compiler
|
||||
- kitchensink-revm-solc: The kitchensink node with the REVM backend with the solc compiler
|
||||
- revive-dev-node-polkavm-resolc: The revive dev node with the PolkaVM backend with the resolc compiler
|
||||
- revive-dev-node-revm-solc: The revive dev node with the REVM backend with the solc compiler
|
||||
|
||||
-c, --corpus <CORPUS>
|
||||
A list of test corpus JSON files to be tested
|
||||
|
||||
-h, --help
|
||||
Print help (see a summary with '-h')
|
||||
|
||||
Solc Configuration:
|
||||
--solc.version <VERSION>
|
||||
Specifies the default version of the Solc compiler that should be used if there is no override specified by one of the test cases
|
||||
|
||||
[default: 0.8.29]
|
||||
|
||||
Resolc Configuration:
|
||||
--resolc.path <resolc.path>
|
||||
Specifies the path of the resolc compiler to be used by the tool.
|
||||
|
||||
If this is not specified, then the tool assumes that it should use the resolc binary that's provided in the user's $PATH.
|
||||
|
||||
[default: resolc]
|
||||
|
||||
Geth Configuration:
|
||||
--geth.path <geth.path>
|
||||
Specifies the path of the geth node to be used by the tool.
|
||||
|
||||
If this is not specified, then the tool assumes that it should use the geth binary that's provided in the user's $PATH.
|
||||
|
||||
[default: geth]
|
||||
|
||||
--geth.start-timeout-ms <geth.start-timeout-ms>
|
||||
The amount of time to wait upon startup before considering that the node timed out
|
||||
|
||||
[default: 5000]
|
||||
|
||||
Kitchensink Configuration:
|
||||
--kitchensink.path <kitchensink.path>
|
||||
Specifies the path of the kitchensink node to be used by the tool.
|
||||
|
||||
If this is not specified, then the tool assumes that it should use the kitchensink binary that's provided in the user's $PATH.
|
||||
|
||||
[default: substrate-node]
|
||||
|
||||
--kitchensink.start-timeout-ms <kitchensink.start-timeout-ms>
|
||||
The amount of time to wait upon startup before considering that the node timed out
|
||||
|
||||
[default: 5000]
|
||||
|
||||
--kitchensink.dont-use-dev-node
|
||||
This configures the tool to use Kitchensink instead of using the revive-dev-node
|
||||
|
||||
Revive Dev Node Configuration:
|
||||
--revive-dev-node.path <revive-dev-node.path>
|
||||
Specifies the path of the revive dev node to be used by the tool.
|
||||
|
||||
If this is not specified, then the tool assumes that it should use the revive dev node binary that's provided in the user's $PATH.
|
||||
|
||||
[default: revive-dev-node]
|
||||
|
||||
--revive-dev-node.start-timeout-ms <revive-dev-node.start-timeout-ms>
|
||||
The amount of time to wait upon startup before considering that the node timed out
|
||||
|
||||
[default: 5000]
|
||||
|
||||
Eth RPC Configuration:
|
||||
--eth-rpc.path <eth-rpc.path>
|
||||
Specifies the path of the ETH RPC to be used by the tool.
|
||||
|
||||
If this is not specified, then the tool assumes that it should use the ETH RPC binary that's provided in the user's $PATH.
|
||||
|
||||
[default: eth-rpc]
|
||||
|
||||
--eth-rpc.start-timeout-ms <eth-rpc.start-timeout-ms>
|
||||
The amount of time to wait upon startup before considering that the node timed out
|
||||
|
||||
[default: 5000]
|
||||
|
||||
Genesis Configuration:
|
||||
--genesis.path <genesis.path>
|
||||
Specifies the path of the genesis file to use for the nodes that are started.
|
||||
|
||||
This is expected to be the path of a JSON geth genesis file.
|
||||
|
||||
Wallet Configuration:
|
||||
--wallet.default-private-key <DEFAULT_KEY>
|
||||
The private key of the default signer
|
||||
|
||||
[default: 0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d]
|
||||
|
||||
--wallet.additional-keys <ADDITIONAL_KEYS>
|
||||
This argument controls which private keys the nodes should have access to and be added to its wallet signers. With a value of N, private keys (0, N] will be added to the signer set of the node
|
||||
|
||||
[default: 100000]
|
||||
|
||||
Concurrency Configuration:
|
||||
--concurrency.number-of-nodes <NUMBER_OF_NODES>
|
||||
Determines the amount of nodes that will be spawned for each chain
|
||||
|
||||
[default: 5]
|
||||
|
||||
--concurrency.number-of-threads <NUMBER_OF_THREADS>
|
||||
Determines the amount of tokio worker threads that will will be used
|
||||
|
||||
[default: 16]
|
||||
|
||||
--concurrency.number-of-concurrent-tasks <NUMBER_CONCURRENT_TASKS>
|
||||
Determines the amount of concurrent tasks that will be spawned to run tests.
|
||||
|
||||
Defaults to 10 x the number of nodes.
|
||||
|
||||
--concurrency.ignore-concurrency-limit
|
||||
Determines if the concurrency limit should be ignored or not
|
||||
|
||||
Compilation Configuration:
|
||||
--compilation.invalidate-cache
|
||||
Controls if the compilation cache should be invalidated or not
|
||||
|
||||
Report Configuration:
|
||||
--report.include-compiler-input
|
||||
Controls if the compiler input is included in the final report
|
||||
|
||||
--report.include-compiler-output
|
||||
Controls if the compiler output is included in the final report
|
||||
```
|
||||
|
||||
To run tests with this tool you need a corpus JSON file that defines the tests included in the corpus. The simplest corpus file looks like the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
|
||||
"path": "resolc-compiler-tests/fixtures/solidity"
|
||||
}
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> Note that the tests can be found in the [`resolc-compiler-tests`](https://github.com/paritytech/resolc-compiler-tests) repository.
|
||||
|
||||
The above corpus file instructs the tool to look for all of the test cases contained within all of the metadata files of the specified directory.
|
||||
|
||||
The simplest command to run this tool is the following:
|
||||
|
||||
```bash
|
||||
RUST_LOG="info" cargo run --release -- execute-tests \
|
||||
--platform geth-evm-solc \
|
||||
--corpus corp.json \
|
||||
--working-directory workdir \
|
||||
--concurrency.number-of-nodes 5 \
|
||||
--concurrency.ignore-concurrency-limit \
|
||||
> logs.log \
|
||||
2> output.log
|
||||
```
|
||||
|
||||
The above command will run the tool executing every one of the tests discovered in the path specified in the corpus file. All of the logs from the execution will be persisted in the `logs.log` file and all of the output of the tool will be persisted to the `output.log` file. If all that you're looking for is to run the tool and check which tests succeeded and failed, then the `output.log` file is what you need to be looking at. However, if you're contributing the to the tool then the `logs.log` file will be very valuable.
|
||||
|
||||
If you only want to run a subset of tests, then you can specify that in your corpus file. The following is an example:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
|
||||
"paths": [
|
||||
"path/to/a/single/metadata/file/I/want/to/run.json",
|
||||
"path/to/a/directory/to/find/all/metadata/files/within"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
{
|
||||
"config": {
|
||||
"chainId": 420420420,
|
||||
"homesteadBlock": 0,
|
||||
"eip150Block": 0,
|
||||
"eip155Block": 0,
|
||||
"eip158Block": 0,
|
||||
"byzantiumBlock": 0,
|
||||
"constantinopleBlock": 0,
|
||||
"petersburgBlock": 0,
|
||||
"istanbulBlock": 0,
|
||||
"berlinBlock": 0,
|
||||
"londonBlock": 0,
|
||||
"arrowGlacierBlock": 0,
|
||||
"grayGlacierBlock": 0,
|
||||
"shanghaiTime": 0,
|
||||
"cancunTime": 0,
|
||||
"terminalTotalDifficulty": 0,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"blobSchedule": {
|
||||
"cancun": {
|
||||
"target": 3,
|
||||
"max": 6,
|
||||
"baseFeeUpdateFraction": 3338477
|
||||
}
|
||||
}
|
||||
},
|
||||
"coinbase": "0xffffffffffffffffffffffffffffffffffffffff",
|
||||
"difficulty": "0x00",
|
||||
"extraData": "",
|
||||
"gasLimit": "0xffffffff",
|
||||
"nonce": "0x0000000000000042",
|
||||
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"timestamp": "0x00",
|
||||
"alloc": {}
|
||||
}
|
||||
@@ -0,0 +1,337 @@
|
||||
{
|
||||
"modes": [
|
||||
"Y >=0.8.9",
|
||||
"E"
|
||||
],
|
||||
"cases": [
|
||||
{
|
||||
"name": "first",
|
||||
"inputs": [
|
||||
{
|
||||
"address": "0xdeadbeef00000000000000000000000000000042",
|
||||
"expected_balance": "1233"
|
||||
},
|
||||
{
|
||||
"address": "0xdeadbeef00000000000000000000000000000042",
|
||||
"is_storage_empty": true
|
||||
},
|
||||
{
|
||||
"address": "0xdeadbeef00000000000000000000000000000042",
|
||||
"is_storage_empty": false
|
||||
},
|
||||
{
|
||||
"instance": "WBTC_1",
|
||||
"method": "#deployer",
|
||||
"calldata": [
|
||||
"0x40",
|
||||
"0x80",
|
||||
"4",
|
||||
"0x5742544300000000000000000000000000000000000000000000000000000000",
|
||||
"14",
|
||||
"0x5772617070656420425443000000000000000000000000000000000000000000"
|
||||
],
|
||||
"expected": [
|
||||
"WBTC_1.address"
|
||||
]
|
||||
},
|
||||
{
|
||||
"instance": "WBTC_2",
|
||||
"method": "#deployer",
|
||||
"calldata": [
|
||||
"0x40",
|
||||
"0x80",
|
||||
"4",
|
||||
"0x5742544300000000000000000000000000000000000000000000000000000000",
|
||||
"14",
|
||||
"0x5772617070656420425443000000000000000000000000000000000000000000"
|
||||
],
|
||||
"expected": [
|
||||
"WBTC_2.address"
|
||||
]
|
||||
},
|
||||
{
|
||||
"instance": "Mooniswap",
|
||||
"method": "#deployer",
|
||||
"calldata": [
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000060",
|
||||
"0x00000000000000000000000000000000000000000000000000000000000000c0",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000100",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000002",
|
||||
"WBTC_1.address",
|
||||
"WBTC_2.address",
|
||||
"4",
|
||||
"0x5742544300000000000000000000000000000000000000000000000000000000",
|
||||
"14",
|
||||
"0x5772617070656420425443000000000000000000000000000000000000000000"
|
||||
],
|
||||
"expected": {
|
||||
"return_data": [
|
||||
"Mooniswap.address"
|
||||
],
|
||||
"events": [
|
||||
{
|
||||
"topics": [
|
||||
"0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0xdeadbeef01000000000000000000000000000000"
|
||||
],
|
||||
"values": []
|
||||
}
|
||||
],
|
||||
"exception": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"instance": "WBTC_1",
|
||||
"method": "_mint",
|
||||
"calldata": [
|
||||
"0xdeadbeef00000000000000000000000000000042",
|
||||
"1000000000"
|
||||
],
|
||||
"expected": {
|
||||
"return_data": [],
|
||||
"events": [
|
||||
{
|
||||
"topics": [
|
||||
"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0xdeadbeef00000000000000000000000000000042"
|
||||
],
|
||||
"values": [
|
||||
"1000000000"
|
||||
]
|
||||
}
|
||||
],
|
||||
"exception": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"instance": "WBTC_2",
|
||||
"method": "_mint",
|
||||
"calldata": [
|
||||
"0xdeadbeef00000000000000000000000000000042",
|
||||
"1000000000"
|
||||
],
|
||||
"expected": {
|
||||
"return_data": [],
|
||||
"events": [
|
||||
{
|
||||
"topics": [
|
||||
"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0xdeadbeef00000000000000000000000000000042"
|
||||
],
|
||||
"values": [
|
||||
"1000000000"
|
||||
]
|
||||
}
|
||||
],
|
||||
"exception": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"instance": "WBTC_1",
|
||||
"caller": "0xdeadbeef00000000000000000000000000000042",
|
||||
"method": "approve",
|
||||
"calldata": [
|
||||
"Mooniswap.address",
|
||||
"500000000"
|
||||
],
|
||||
"expected": {
|
||||
"return_data": [
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000001"
|
||||
],
|
||||
"events": [
|
||||
{
|
||||
"topics": [
|
||||
"0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925",
|
||||
"0xdeadbeef00000000000000000000000000000042",
|
||||
"Mooniswap.address"
|
||||
],
|
||||
"values": [
|
||||
"500000000"
|
||||
]
|
||||
}
|
||||
],
|
||||
"exception": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"instance": "WBTC_2",
|
||||
"caller": "0xdeadbeef00000000000000000000000000000042",
|
||||
"method": "approve",
|
||||
"calldata": [
|
||||
"Mooniswap.address",
|
||||
"500000000"
|
||||
],
|
||||
"expected": {
|
||||
"return_data": [
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000001"
|
||||
],
|
||||
"events": [
|
||||
{
|
||||
"topics": [
|
||||
"0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925",
|
||||
"0xdeadbeef00000000000000000000000000000042",
|
||||
"Mooniswap.address"
|
||||
],
|
||||
"values": [
|
||||
"500000000"
|
||||
]
|
||||
}
|
||||
],
|
||||
"exception": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"instance": "Mooniswap",
|
||||
"caller": "0xdeadbeef00000000000000000000000000000042",
|
||||
"method": "deposit",
|
||||
"calldata": [
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000040",
|
||||
"0x00000000000000000000000000000000000000000000000000000000000000a0",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000002",
|
||||
"10000000",
|
||||
"10000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000002",
|
||||
"1000000",
|
||||
"1000000"
|
||||
],
|
||||
"expected": {
|
||||
"return_data": [
|
||||
"10000000"
|
||||
],
|
||||
"events": [
|
||||
{
|
||||
"topics": [
|
||||
"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"Mooniswap.address"
|
||||
],
|
||||
"values": [
|
||||
"1000"
|
||||
]
|
||||
},
|
||||
{
|
||||
"topics": [
|
||||
"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
|
||||
"0xdeadbeef00000000000000000000000000000042",
|
||||
"Mooniswap.address"
|
||||
],
|
||||
"values": [
|
||||
"10000000"
|
||||
]
|
||||
},
|
||||
{
|
||||
"topics": [
|
||||
"0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925",
|
||||
"0xdeadbeef00000000000000000000000000000042",
|
||||
"Mooniswap.address"
|
||||
],
|
||||
"values": [
|
||||
"490000000"
|
||||
]
|
||||
},
|
||||
{
|
||||
"topics": [
|
||||
"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
|
||||
"0xdeadbeef00000000000000000000000000000042",
|
||||
"Mooniswap.address"
|
||||
],
|
||||
"values": [
|
||||
"10000000"
|
||||
]
|
||||
},
|
||||
{
|
||||
"topics": [
|
||||
"0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925",
|
||||
"0xdeadbeef00000000000000000000000000000042",
|
||||
"Mooniswap.address"
|
||||
],
|
||||
"values": [
|
||||
"490000000"
|
||||
]
|
||||
},
|
||||
{
|
||||
"topics": [
|
||||
"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0xdeadbeef00000000000000000000000000000042"
|
||||
],
|
||||
"values": [
|
||||
"10000000"
|
||||
]
|
||||
},
|
||||
{
|
||||
"topics": [
|
||||
"0x2da466a7b24304f47e87fa2e1e5a81b9831ce54fec19055ce277ca2f39ba42c4",
|
||||
"0xdeadbeef00000000000000000000000000000042"
|
||||
],
|
||||
"values": [
|
||||
"10000000"
|
||||
]
|
||||
}
|
||||
],
|
||||
"exception": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"instance": "Mooniswap",
|
||||
"caller": "0xdeadbeef00000000000000000000000000000042",
|
||||
"method": "swap",
|
||||
"calldata": [
|
||||
"WBTC_1.address",
|
||||
"WBTC_2.address",
|
||||
"5000",
|
||||
"5000",
|
||||
"0"
|
||||
]
|
||||
}
|
||||
],
|
||||
"expected": {
|
||||
"return_data": [
|
||||
"5000"
|
||||
],
|
||||
"events": [
|
||||
{
|
||||
"topics": [
|
||||
"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
|
||||
"0xdeadbeef00000000000000000000000000000042",
|
||||
"Mooniswap.address"
|
||||
],
|
||||
"values": [
|
||||
"5000"
|
||||
]
|
||||
},
|
||||
{
|
||||
"topics": [
|
||||
"0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925",
|
||||
"0xdeadbeef00000000000000000000000000000042",
|
||||
"Mooniswap.address"
|
||||
],
|
||||
"values": [
|
||||
"489995000"
|
||||
]
|
||||
}
|
||||
],
|
||||
"exception": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"contracts": {
|
||||
"Mooniswap": "Mooniswap.sol:Mooniswap",
|
||||
"WBTC_1": "ERC20/ERC20.sol:ERC20",
|
||||
"WBTC_2": "ERC20/ERC20.sol:ERC20",
|
||||
"VirtualBalance": "Mooniswap.sol:VirtualBalance",
|
||||
"Math": "math/Math.sol:Math"
|
||||
},
|
||||
"libraries": {
|
||||
"Mooniswap.sol": {
|
||||
"VirtualBalance": "VirtualBalance"
|
||||
},
|
||||
"math/Math.sol": {
|
||||
"Math": "Math"
|
||||
}
|
||||
},
|
||||
"group": "Real life"
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
[package]
|
||||
name = "revive-dt-common"
|
||||
description = "A library containing common concepts that other crates in the workspace can rely on"
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
alloy = { workspace = true }
|
||||
alloy-primitives = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
moka = { workspace = true, features = ["sync"] }
|
||||
once_cell = { workspace = true }
|
||||
semver = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
schemars = { workspace = true }
|
||||
strum = { workspace = true }
|
||||
tokio = { workspace = true, default-features = false, features = ["time"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -0,0 +1,49 @@
|
||||
//! This module implements a cached file system allowing for results to be stored in-memory rather
|
||||
//! rather being queried from the file system again.
|
||||
|
||||
use std::fs;
|
||||
use std::io::{Error, Result};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use moka::sync::Cache;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
pub fn read(path: impl AsRef<Path>) -> Result<Vec<u8>> {
|
||||
static READ_CACHE: Lazy<Cache<PathBuf, Vec<u8>>> = Lazy::new(|| Cache::new(10_000));
|
||||
|
||||
let path = path.as_ref().canonicalize()?;
|
||||
match READ_CACHE.get(path.as_path()) {
|
||||
Some(content) => Ok(content),
|
||||
None => {
|
||||
let content = fs::read(path.as_path())?;
|
||||
READ_CACHE.insert(path, content.clone());
|
||||
Ok(content)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_to_string(path: impl AsRef<Path>) -> Result<String> {
|
||||
let content = read(path)?;
|
||||
String::from_utf8(content).map_err(|_| {
|
||||
Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
"The contents of the file are not valid UTF8",
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn read_dir(path: impl AsRef<Path>) -> Result<Box<dyn Iterator<Item = Result<PathBuf>>>> {
|
||||
static READ_DIR_CACHE: Lazy<Cache<PathBuf, Vec<PathBuf>>> = Lazy::new(|| Cache::new(10_000));
|
||||
|
||||
let path = path.as_ref().canonicalize()?;
|
||||
match READ_DIR_CACHE.get(path.as_path()) {
|
||||
Some(entries) => Ok(Box::new(entries.into_iter().map(Ok)) as Box<_>),
|
||||
None => {
|
||||
let entries = fs::read_dir(path.as_path())?
|
||||
.flat_map(|maybe_entry| maybe_entry.map(|entry| entry.path()))
|
||||
.collect();
|
||||
READ_DIR_CACHE.insert(path.clone(), entries);
|
||||
Ok(read_dir(path).unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
use std::{
|
||||
fs::{read_dir, remove_dir_all, remove_file},
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
|
||||
/// This method clears the passed directory of all of the files and directories contained within
|
||||
/// without deleting the directory.
|
||||
pub fn clear_directory(path: impl AsRef<Path>) -> Result<()> {
|
||||
for entry in read_dir(path.as_ref())
|
||||
.with_context(|| format!("Failed to read directory: {}", path.as_ref().display()))?
|
||||
{
|
||||
let entry = entry.with_context(|| {
|
||||
format!(
|
||||
"Failed to read an entry in directory: {}",
|
||||
path.as_ref().display()
|
||||
)
|
||||
})?;
|
||||
let entry_path = entry.path();
|
||||
|
||||
if entry_path.is_file() {
|
||||
remove_file(&entry_path)
|
||||
.with_context(|| format!("Failed to remove file: {}", entry_path.display()))?
|
||||
} else {
|
||||
remove_dir_all(&entry_path)
|
||||
.with_context(|| format!("Failed to remove directory: {}", entry_path.display()))?
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
mod clear_dir;
|
||||
|
||||
pub use clear_dir::*;
|
||||
@@ -0,0 +1,3 @@
|
||||
mod poll;
|
||||
|
||||
pub use poll::*;
|
||||
@@ -0,0 +1,72 @@
|
||||
use std::ops::ControlFlow;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{Context as _, Result, anyhow};
|
||||
|
||||
const EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION: Duration = Duration::from_secs(60);
|
||||
|
||||
/// A function that polls for a fallible future for some period of time and errors if it fails to
|
||||
/// get a result after polling.
|
||||
///
|
||||
/// Given a future that returns a [`Result<ControlFlow<O, ()>>`], this function calls the future
|
||||
/// repeatedly (with some wait period) until the future returns a [`ControlFlow::Break`] or until it
|
||||
/// returns an [`Err`] in which case the function stops polling and returns the error.
|
||||
///
|
||||
/// If the future keeps returning [`ControlFlow::Continue`] and fails to return a [`Break`] within
|
||||
/// the permitted polling duration then this function returns an [`Err`]
|
||||
///
|
||||
/// [`Break`]: ControlFlow::Break
|
||||
/// [`Continue`]: ControlFlow::Continue
|
||||
pub async fn poll<F, O>(
|
||||
polling_duration: Duration,
|
||||
polling_wait_behavior: PollingWaitBehavior,
|
||||
mut future: impl FnMut() -> F,
|
||||
) -> Result<O>
|
||||
where
|
||||
F: Future<Output = Result<ControlFlow<O, ()>>>,
|
||||
{
|
||||
let mut retries = 0;
|
||||
let mut total_wait_duration = Duration::ZERO;
|
||||
let max_allowed_wait_duration = polling_duration;
|
||||
|
||||
loop {
|
||||
if total_wait_duration >= max_allowed_wait_duration {
|
||||
break Err(anyhow!(
|
||||
"Polling failed after {} retries and a total of {:?} of wait time",
|
||||
retries,
|
||||
total_wait_duration
|
||||
));
|
||||
}
|
||||
|
||||
match future()
|
||||
.await
|
||||
.context("Polled future returned an error during polling loop")?
|
||||
{
|
||||
ControlFlow::Continue(()) => {
|
||||
let next_wait_duration = match polling_wait_behavior {
|
||||
PollingWaitBehavior::Constant(duration) => duration,
|
||||
PollingWaitBehavior::ExponentialBackoff => {
|
||||
Duration::from_secs(2u64.pow(retries))
|
||||
.min(EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION)
|
||||
}
|
||||
};
|
||||
let next_wait_duration =
|
||||
next_wait_duration.min(max_allowed_wait_duration - total_wait_duration);
|
||||
total_wait_duration += next_wait_duration;
|
||||
retries += 1;
|
||||
|
||||
tokio::time::sleep(next_wait_duration).await;
|
||||
}
|
||||
ControlFlow::Break(output) => {
|
||||
break Ok(output);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
|
||||
pub enum PollingWaitBehavior {
|
||||
Constant(Duration),
|
||||
#[default]
|
||||
ExponentialBackoff,
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
/// An iterator that could be either of two iterators.
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum EitherIter<A, B> {
|
||||
A(A),
|
||||
B(B),
|
||||
}
|
||||
|
||||
impl<A, B, T> Iterator for EitherIter<A, B>
|
||||
where
|
||||
A: Iterator<Item = T>,
|
||||
B: Iterator<Item = T>,
|
||||
{
|
||||
type Item = T;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
match self {
|
||||
EitherIter::A(iter) => iter.next(),
|
||||
EitherIter::B(iter) => iter.next(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,91 @@
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::HashSet,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
/// An iterator that finds files of a certain extension in the provided directory. You can think of
|
||||
/// this a glob pattern similar to: `${path}/**/*.md`
|
||||
pub struct FilesWithExtensionIterator {
|
||||
/// The set of allowed extensions that that match the requirement and that should be returned
|
||||
/// when found.
|
||||
allowed_extensions: HashSet<Cow<'static, str>>,
|
||||
|
||||
/// The set of directories to visit next. This iterator does BFS and so these directories will
|
||||
/// only be visited if we can't find any files in our state.
|
||||
directories_to_search: Vec<PathBuf>,
|
||||
|
||||
/// The set of files matching the allowed extensions that were found. If there are entries in
|
||||
/// this vector then they will be returned when the [`Iterator::next`] method is called. If not
|
||||
/// then we visit one of the next directories to visit.
|
||||
files_matching_allowed_extensions: Vec<PathBuf>,
|
||||
|
||||
/// This option controls if the the cached file system should be used or not. This could be
|
||||
/// better for certain cases where the entries in the directories do not change and therefore
|
||||
/// caching can be used.
|
||||
use_cached_fs: bool,
|
||||
}
|
||||
|
||||
impl FilesWithExtensionIterator {
|
||||
pub fn new(root_directory: impl AsRef<Path>) -> Self {
|
||||
Self {
|
||||
allowed_extensions: Default::default(),
|
||||
directories_to_search: vec![root_directory.as_ref().to_path_buf()],
|
||||
files_matching_allowed_extensions: Default::default(),
|
||||
use_cached_fs: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_allowed_extension(
|
||||
mut self,
|
||||
allowed_extension: impl Into<Cow<'static, str>>,
|
||||
) -> Self {
|
||||
self.allowed_extensions.insert(allowed_extension.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_use_cached_fs(mut self, use_cached_fs: bool) -> Self {
|
||||
self.use_cached_fs = use_cached_fs;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for FilesWithExtensionIterator {
|
||||
type Item = PathBuf;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if let Some(file_path) = self.files_matching_allowed_extensions.pop() {
|
||||
return Some(file_path);
|
||||
};
|
||||
|
||||
let directory_to_search = self.directories_to_search.pop()?;
|
||||
|
||||
let iterator = if self.use_cached_fs {
|
||||
let Ok(dir_entries) = crate::cached_fs::read_dir(directory_to_search.as_path()) else {
|
||||
return self.next();
|
||||
};
|
||||
Box::new(dir_entries) as Box<dyn Iterator<Item = std::io::Result<PathBuf>>>
|
||||
} else {
|
||||
let Ok(dir_entries) = std::fs::read_dir(directory_to_search) else {
|
||||
return self.next();
|
||||
};
|
||||
Box::new(dir_entries.map(|maybe_entry| maybe_entry.map(|entry| entry.path()))) as Box<_>
|
||||
};
|
||||
|
||||
for entry_path in iterator.flatten() {
|
||||
if entry_path.is_dir() {
|
||||
self.directories_to_search.push(entry_path)
|
||||
} else if entry_path.is_file()
|
||||
&& entry_path.extension().is_some_and(|ext| {
|
||||
self.allowed_extensions
|
||||
.iter()
|
||||
.any(|allowed| ext.eq_ignore_ascii_case(allowed.as_ref()))
|
||||
})
|
||||
{
|
||||
self.files_matching_allowed_extensions.push(entry_path)
|
||||
}
|
||||
}
|
||||
|
||||
self.next()
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
mod either_iter;
|
||||
mod files_with_extension_iterator;
|
||||
|
||||
pub use either_iter::*;
|
||||
pub use files_with_extension_iterator::*;
|
||||
@@ -0,0 +1,9 @@
|
||||
//! This crate provides common concepts, functionality, types, macros, and more that other crates in
|
||||
//! the workspace can benefit from.
|
||||
|
||||
pub mod cached_fs;
|
||||
pub mod fs;
|
||||
pub mod futures;
|
||||
pub mod iterators;
|
||||
pub mod macros;
|
||||
pub mod types;
|
||||
@@ -0,0 +1,140 @@
|
||||
#[macro_export]
|
||||
macro_rules! impl_for_wrapper {
|
||||
(Display, $ident: ident) => {
|
||||
#[automatically_derived]
|
||||
impl std::fmt::Display for $ident {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
std::fmt::Display::fmt(&self.0, f)
|
||||
}
|
||||
}
|
||||
};
|
||||
(FromStr, $ident: ident) => {
|
||||
#[automatically_derived]
|
||||
impl std::str::FromStr for $ident {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> anyhow::Result<Self> {
|
||||
s.parse().map(Self).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Defines wrappers around types.
|
||||
///
|
||||
/// For example, the macro invocation seen below:
|
||||
///
|
||||
/// ```rust,ignore
|
||||
/// define_wrapper_type!(CaseId => usize);
|
||||
/// ```
|
||||
///
|
||||
/// Would define a wrapper type that looks like the following:
|
||||
///
|
||||
/// ```rust,ignore
|
||||
/// pub struct CaseId(usize);
|
||||
/// ```
|
||||
///
|
||||
/// And would also implement a number of methods on this type making it easier to use.
|
||||
///
|
||||
/// These wrapper types become very useful as they make the code a lot easier to read.
|
||||
///
|
||||
/// Take the following as an example:
|
||||
///
|
||||
/// ```rust,ignore
|
||||
/// struct State {
|
||||
/// contracts: HashMap<usize, HashMap<String, Vec<u8>>>
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// In the above code it's hard to understand what the various types refer to or what to expect them
|
||||
/// to contain.
|
||||
///
|
||||
/// With these wrapper types we're able to create code that's self-documenting in that the types
|
||||
/// tell us what the code is referring to. The above code is transformed into
|
||||
///
|
||||
/// ```rust,ignore
|
||||
/// struct State {
|
||||
/// contracts: HashMap<CaseId, HashMap<ContractName, ContractByteCode>>
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// Note that we follow the same syntax for defining wrapper structs but we do not permit the use of
|
||||
/// generics.
|
||||
#[macro_export]
|
||||
macro_rules! define_wrapper_type {
|
||||
(
|
||||
$(#[$meta: meta])*
|
||||
$vis:vis struct $ident: ident($ty: ty)
|
||||
|
||||
$(
|
||||
impl $($trait_ident: ident),*
|
||||
)?
|
||||
|
||||
;
|
||||
) => {
|
||||
$(#[$meta])*
|
||||
$vis struct $ident($ty);
|
||||
|
||||
impl $ident {
|
||||
pub fn new(value: impl Into<$ty>) -> Self {
|
||||
Self(value.into())
|
||||
}
|
||||
|
||||
pub fn into_inner(self) -> $ty {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn as_inner(&self) -> &$ty {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<$ty> for $ident {
|
||||
fn as_ref(&self) -> &$ty {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl AsMut<$ty> for $ident {
|
||||
fn as_mut(&mut self) -> &mut $ty {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Deref for $ident {
|
||||
type Target = $ty;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::DerefMut for $ident {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<$ty> for $ident {
|
||||
fn from(value: $ty) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<$ident> for $ty {
|
||||
fn from(value: $ident) -> Self {
|
||||
value.0
|
||||
}
|
||||
}
|
||||
|
||||
$(
|
||||
$(
|
||||
$crate::macros::impl_for_wrapper!($trait_ident, $ident);
|
||||
)*
|
||||
)?
|
||||
};
|
||||
}
|
||||
|
||||
/// Technically not needed but this allows for the macro to be found in the `macros` module of the
|
||||
/// crate in addition to being found in the root of the crate.
|
||||
pub use {define_wrapper_type, impl_for_wrapper};
|
||||
@@ -0,0 +1,3 @@
|
||||
mod define_wrapper_type;
|
||||
|
||||
pub use define_wrapper_type::*;
|
||||
@@ -0,0 +1,128 @@
|
||||
use clap::ValueEnum;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
|
||||
|
||||
/// An enum of the platform identifiers of all of the platforms supported by this framework. This
|
||||
/// could be thought of like the target triple from Rust and LLVM where it specifies the platform
|
||||
/// completely starting with the node, the vm, and finally the compiler used for this combination.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
ValueEnum,
|
||||
EnumString,
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[strum(serialize_all = "kebab-case")]
|
||||
pub enum PlatformIdentifier {
|
||||
/// The Go-ethereum reference full node EVM implementation with the solc compiler.
|
||||
GethEvmSolc,
|
||||
/// The Lighthouse Go-ethereum reference full node EVM implementation with the solc compiler.
|
||||
LighthouseGethEvmSolc,
|
||||
/// The kitchensink node with the PolkaVM backend with the resolc compiler.
|
||||
KitchensinkPolkavmResolc,
|
||||
/// The kitchensink node with the REVM backend with the solc compiler.
|
||||
KitchensinkRevmSolc,
|
||||
/// The revive dev node with the PolkaVM backend with the resolc compiler.
|
||||
ReviveDevNodePolkavmResolc,
|
||||
/// The revive dev node with the REVM backend with the solc compiler.
|
||||
ReviveDevNodeRevmSolc,
|
||||
}
|
||||
|
||||
/// An enum of the platform identifiers of all of the platforms supported by this framework.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
ValueEnum,
|
||||
EnumString,
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
)]
|
||||
pub enum CompilerIdentifier {
|
||||
/// The solc compiler.
|
||||
Solc,
|
||||
/// The resolc compiler.
|
||||
Resolc,
|
||||
}
|
||||
|
||||
/// An enum representing the identifiers of the supported nodes.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
ValueEnum,
|
||||
EnumString,
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
)]
|
||||
pub enum NodeIdentifier {
|
||||
/// The go-ethereum node implementation.
|
||||
Geth,
|
||||
/// The go-ethereum node implementation.
|
||||
LighthouseGeth,
|
||||
/// The Kitchensink node implementation.
|
||||
Kitchensink,
|
||||
/// The revive dev node implementation.
|
||||
ReviveDevNode,
|
||||
}
|
||||
|
||||
/// An enum representing the identifiers of the supported VMs.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
ValueEnum,
|
||||
EnumString,
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
#[strum(serialize_all = "lowercase")]
|
||||
pub enum VmIdentifier {
|
||||
/// The ethereum virtual machine.
|
||||
Evm,
|
||||
/// The EraVM virtual machine.
|
||||
EraVM,
|
||||
/// Polkadot's PolaVM Risc-v based virtual machine.
|
||||
PolkaVM,
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
mod identifiers;
|
||||
mod mode;
|
||||
mod private_key_allocator;
|
||||
mod version_or_requirement;
|
||||
|
||||
pub use identifiers::*;
|
||||
pub use mode::*;
|
||||
pub use private_key_allocator::*;
|
||||
pub use version_or_requirement::*;
|
||||
@@ -0,0 +1,173 @@
|
||||
use crate::types::VersionOrRequirement;
|
||||
use semver::Version;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
/// This represents a mode that a given test should be run with, if possible.
|
||||
///
|
||||
/// We obtain this by taking a [`ParsedMode`], which may be looser or more strict
|
||||
/// in its requirements, and then expanding it out into a list of [`Mode`]s.
|
||||
///
|
||||
/// Use [`ParsedMode::to_test_modes()`] to do this.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct Mode {
|
||||
pub pipeline: ModePipeline,
|
||||
pub optimize_setting: ModeOptimizerSetting,
|
||||
pub version: Option<semver::VersionReq>,
|
||||
}
|
||||
|
||||
impl Display for Mode {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.pipeline.fmt(f)?;
|
||||
f.write_str(" ")?;
|
||||
self.optimize_setting.fmt(f)?;
|
||||
|
||||
if let Some(version) = &self.version {
|
||||
f.write_str(" ")?;
|
||||
version.fmt(f)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Mode {
|
||||
/// Return all of the available mode combinations.
|
||||
pub fn all() -> impl Iterator<Item = &'static Mode> {
|
||||
static ALL_MODES: LazyLock<Vec<Mode>> = LazyLock::new(|| {
|
||||
ModePipeline::test_cases()
|
||||
.flat_map(|pipeline| {
|
||||
ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode {
|
||||
pipeline,
|
||||
optimize_setting,
|
||||
version: None,
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
});
|
||||
ALL_MODES.iter()
|
||||
}
|
||||
|
||||
/// Resolves the [`Mode`]'s solidity version requirement into a [`VersionOrRequirement`] if
|
||||
/// the requirement is present on the object. Otherwise, the passed default version is used.
|
||||
pub fn compiler_version_to_use(&self, default: Version) -> VersionOrRequirement {
|
||||
match self.version {
|
||||
Some(ref requirement) => requirement.clone().into(),
|
||||
None => default.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// What do we want the compiler to do?
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
|
||||
pub enum ModePipeline {
|
||||
/// Compile Solidity code via Yul IR
|
||||
ViaYulIR,
|
||||
/// Compile Solidity direct to assembly
|
||||
ViaEVMAssembly,
|
||||
}
|
||||
|
||||
impl FromStr for ModePipeline {
|
||||
type Err = anyhow::Error;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
// via Yul IR
|
||||
"Y" => Ok(ModePipeline::ViaYulIR),
|
||||
// Don't go via Yul IR
|
||||
"E" => Ok(ModePipeline::ViaEVMAssembly),
|
||||
// Anything else that we see isn't a mode at all
|
||||
_ => Err(anyhow::anyhow!(
|
||||
"Unsupported pipeline '{s}': expected 'Y' or 'E'"
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ModePipeline {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ModePipeline::ViaYulIR => f.write_str("Y"),
|
||||
ModePipeline::ViaEVMAssembly => f.write_str("E"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ModePipeline {
|
||||
/// Should we go via Yul IR?
|
||||
pub fn via_yul_ir(&self) -> bool {
|
||||
matches!(self, ModePipeline::ViaYulIR)
|
||||
}
|
||||
|
||||
/// An iterator over the available pipelines that we'd like to test,
|
||||
/// when an explicit pipeline was not specified.
|
||||
pub fn test_cases() -> impl Iterator<Item = ModePipeline> + Clone {
|
||||
[ModePipeline::ViaYulIR, ModePipeline::ViaEVMAssembly].into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
|
||||
pub enum ModeOptimizerSetting {
|
||||
/// 0 / -: Don't apply any optimizations
|
||||
M0,
|
||||
/// 1: Apply less than default optimizations
|
||||
M1,
|
||||
/// 2: Apply the default optimizations
|
||||
M2,
|
||||
/// 3 / +: Apply aggressive optimizations
|
||||
M3,
|
||||
/// s: Optimize for size
|
||||
Ms,
|
||||
/// z: Aggressively optimize for size
|
||||
Mz,
|
||||
}
|
||||
|
||||
impl FromStr for ModeOptimizerSetting {
|
||||
type Err = anyhow::Error;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
"M0" => Ok(ModeOptimizerSetting::M0),
|
||||
"M1" => Ok(ModeOptimizerSetting::M1),
|
||||
"M2" => Ok(ModeOptimizerSetting::M2),
|
||||
"M3" => Ok(ModeOptimizerSetting::M3),
|
||||
"Ms" => Ok(ModeOptimizerSetting::Ms),
|
||||
"Mz" => Ok(ModeOptimizerSetting::Mz),
|
||||
_ => Err(anyhow::anyhow!(
|
||||
"Unsupported optimizer setting '{s}': expected 'M0', 'M1', 'M2', 'M3', 'Ms' or 'Mz'"
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ModeOptimizerSetting {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ModeOptimizerSetting::M0 => f.write_str("M0"),
|
||||
ModeOptimizerSetting::M1 => f.write_str("M1"),
|
||||
ModeOptimizerSetting::M2 => f.write_str("M2"),
|
||||
ModeOptimizerSetting::M3 => f.write_str("M3"),
|
||||
ModeOptimizerSetting::Ms => f.write_str("Ms"),
|
||||
ModeOptimizerSetting::Mz => f.write_str("Mz"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ModeOptimizerSetting {
|
||||
/// An iterator over the available optimizer settings that we'd like to test,
|
||||
/// when an explicit optimizer setting was not specified.
|
||||
pub fn test_cases() -> impl Iterator<Item = ModeOptimizerSetting> + Clone {
|
||||
[
|
||||
// No optimizations:
|
||||
ModeOptimizerSetting::M0,
|
||||
// Aggressive optimizations:
|
||||
ModeOptimizerSetting::M3,
|
||||
]
|
||||
.into_iter()
|
||||
}
|
||||
|
||||
/// Are any optimizations enabled?
|
||||
pub fn optimizations_enabled(&self) -> bool {
|
||||
!matches!(self, ModeOptimizerSetting::M0)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,35 @@
|
||||
use alloy::signers::local::PrivateKeySigner;
|
||||
use alloy_primitives::U256;
|
||||
use anyhow::{Result, bail};
|
||||
|
||||
/// This is a sequential private key allocator. When instantiated, it allocated private keys in
|
||||
/// sequentially and in order until the maximum private key specified is reached.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct PrivateKeyAllocator {
|
||||
/// The next private key to be returned by the allocator when requested.
|
||||
next_private_key: U256,
|
||||
|
||||
/// The highest private key (exclusive) that can be returned by this allocator.
|
||||
highest_private_key_exclusive: U256,
|
||||
}
|
||||
|
||||
impl PrivateKeyAllocator {
|
||||
/// Creates a new instance of the private key allocator.
|
||||
pub fn new(highest_private_key_exclusive: U256) -> Self {
|
||||
Self {
|
||||
next_private_key: U256::ZERO,
|
||||
highest_private_key_exclusive,
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates a new private key and errors out if the maximum private key has been reached.
|
||||
pub fn allocate(&mut self) -> Result<PrivateKeySigner> {
|
||||
if self.next_private_key >= self.highest_private_key_exclusive {
|
||||
bail!("Attempted to allocate a private key but failed since all have been allocated");
|
||||
};
|
||||
let private_key =
|
||||
PrivateKeySigner::from_slice(self.next_private_key.to_be_bytes::<32>().as_slice())?;
|
||||
self.next_private_key += U256::ONE;
|
||||
Ok(private_key)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
use semver::{Version, VersionReq};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum VersionOrRequirement {
|
||||
Version(Version),
|
||||
Requirement(VersionReq),
|
||||
}
|
||||
|
||||
impl From<Version> for VersionOrRequirement {
|
||||
fn from(value: Version) -> Self {
|
||||
Self::Version(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<VersionReq> for VersionOrRequirement {
|
||||
fn from(value: VersionReq) -> Self {
|
||||
Self::Requirement(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<VersionOrRequirement> for Version {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> {
|
||||
let VersionOrRequirement::Version(version) = value else {
|
||||
anyhow::bail!("Version or requirement was not a version");
|
||||
};
|
||||
Ok(version)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<VersionOrRequirement> for VersionReq {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> {
|
||||
let VersionOrRequirement::Requirement(requirement) = value else {
|
||||
anyhow::bail!("Version or requirement was not a requirement");
|
||||
};
|
||||
Ok(requirement)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
[package]
|
||||
name = "revive-dt-compiler"
|
||||
description = "Library for compiling Solidity contracts to EVM and PVM"
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
revive-solc-json-interface = { workspace = true }
|
||||
revive-dt-common = { workspace = true }
|
||||
revive-dt-config = { workspace = true }
|
||||
revive-dt-solc-binaries = { workspace = true }
|
||||
revive-common = { workspace = true }
|
||||
|
||||
alloy = { workspace = true }
|
||||
alloy-primitives = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
dashmap = { workspace = true }
|
||||
foundry-compilers-artifacts = { workspace = true }
|
||||
semver = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -0,0 +1,176 @@
|
||||
//! This crate provides compiler helpers for all supported Solidity targets:
|
||||
//! - Ethereum solc compiler
|
||||
//! - Polkadot revive resolc compiler
|
||||
//! - Polkadot revive Wasm compiler
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
hash::Hash,
|
||||
path::{Path, PathBuf},
|
||||
pin::Pin,
|
||||
};
|
||||
|
||||
use alloy::json_abi::JsonAbi;
|
||||
use alloy_primitives::Address;
|
||||
use anyhow::{Context as _, Result};
|
||||
use semver::Version;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use revive_common::EVMVersion;
|
||||
use revive_dt_common::cached_fs::read_to_string;
|
||||
|
||||
// Re-export this as it's a part of the compiler interface.
|
||||
pub use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
|
||||
|
||||
pub mod revive_js;
|
||||
pub mod revive_resolc;
|
||||
pub mod solc;
|
||||
|
||||
/// A common interface for all supported Solidity compilers.
|
||||
pub trait SolidityCompiler {
|
||||
/// Returns the version of the compiler.
|
||||
fn version(&self) -> &Version;
|
||||
|
||||
/// Returns the path of the compiler executable.
|
||||
fn path(&self) -> &Path;
|
||||
|
||||
/// The low-level compiler interface.
|
||||
fn build(
|
||||
&self,
|
||||
input: CompilerInput,
|
||||
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>>;
|
||||
|
||||
/// Does the compiler support the provided mode and version settings.
|
||||
fn supports_mode(
|
||||
&self,
|
||||
optimizer_setting: ModeOptimizerSetting,
|
||||
pipeline: ModePipeline,
|
||||
) -> bool;
|
||||
}
|
||||
|
||||
/// The generic compilation input configuration.
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct CompilerInput {
|
||||
pub pipeline: Option<ModePipeline>,
|
||||
pub optimization: Option<ModeOptimizerSetting>,
|
||||
pub evm_version: Option<EVMVersion>,
|
||||
pub allow_paths: Vec<PathBuf>,
|
||||
pub base_path: Option<PathBuf>,
|
||||
pub sources: HashMap<PathBuf, String>,
|
||||
pub libraries: HashMap<PathBuf, HashMap<String, Address>>,
|
||||
pub revert_string_handling: Option<RevertString>,
|
||||
}
|
||||
|
||||
/// The generic compilation output configuration.
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct CompilerOutput {
|
||||
/// The compiled contracts. The bytecode of the contract is kept as a string in case linking is
|
||||
/// required and the compiled source has placeholders.
|
||||
pub contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
}
|
||||
|
||||
/// A generic builder style interface for configuring the supported compiler options.
|
||||
#[derive(Default)]
|
||||
pub struct Compiler {
|
||||
input: CompilerInput,
|
||||
}
|
||||
|
||||
impl Compiler {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
input: CompilerInput {
|
||||
pipeline: Default::default(),
|
||||
optimization: Default::default(),
|
||||
evm_version: Default::default(),
|
||||
allow_paths: Default::default(),
|
||||
base_path: Default::default(),
|
||||
sources: Default::default(),
|
||||
libraries: Default::default(),
|
||||
revert_string_handling: Default::default(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_optimization(mut self, value: impl Into<Option<ModeOptimizerSetting>>) -> Self {
|
||||
self.input.optimization = value.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_pipeline(mut self, value: impl Into<Option<ModePipeline>>) -> Self {
|
||||
self.input.pipeline = value.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_evm_version(mut self, version: impl Into<Option<EVMVersion>>) -> Self {
|
||||
self.input.evm_version = version.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_allow_path(mut self, path: impl AsRef<Path>) -> Self {
|
||||
self.input.allow_paths.push(path.as_ref().into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_base_path(mut self, path: impl Into<Option<PathBuf>>) -> Self {
|
||||
self.input.base_path = path.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_source(mut self, path: impl AsRef<Path>) -> Result<Self> {
|
||||
self.input.sources.insert(
|
||||
path.as_ref().to_path_buf(),
|
||||
read_to_string(path.as_ref()).context("Failed to read the contract source")?,
|
||||
);
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
pub fn with_library(
|
||||
mut self,
|
||||
path: impl AsRef<Path>,
|
||||
name: impl AsRef<str>,
|
||||
address: Address,
|
||||
) -> Self {
|
||||
self.input
|
||||
.libraries
|
||||
.entry(path.as_ref().to_path_buf())
|
||||
.or_default()
|
||||
.insert(name.as_ref().into(), address);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_revert_string_handling(
|
||||
mut self,
|
||||
revert_string_handling: impl Into<Option<RevertString>>,
|
||||
) -> Self {
|
||||
self.input.revert_string_handling = revert_string_handling.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn then(self, callback: impl FnOnce(Self) -> Self) -> Self {
|
||||
callback(self)
|
||||
}
|
||||
|
||||
pub fn try_then<E>(self, callback: impl FnOnce(Self) -> Result<Self, E>) -> Result<Self, E> {
|
||||
callback(self)
|
||||
}
|
||||
|
||||
pub async fn try_build(self, compiler: &dyn SolidityCompiler) -> Result<CompilerOutput> {
|
||||
compiler.build(self.input).await
|
||||
}
|
||||
|
||||
pub fn input(&self) -> &CompilerInput {
|
||||
&self.input
|
||||
}
|
||||
}
|
||||
|
||||
/// Defines how the compiler should handle revert strings.
|
||||
#[derive(
|
||||
Clone, Debug, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize,
|
||||
)]
|
||||
pub enum RevertString {
|
||||
#[default]
|
||||
Default,
|
||||
Debug,
|
||||
Strip,
|
||||
VerboseDebug,
|
||||
}
|
||||
@@ -0,0 +1,2 @@
|
||||
//! Implements the [crate::SolidityCompiler] trait with revive Wasm for
|
||||
//! compiling contracts to PVM bytecode (via Wasm).
|
||||
@@ -0,0 +1,297 @@
|
||||
//! Implements the [SolidityCompiler] trait with `resolc` for
|
||||
//! compiling contracts to PolkaVM (PVM) bytecode.
|
||||
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
pin::Pin,
|
||||
process::Stdio,
|
||||
sync::{Arc, LazyLock},
|
||||
};
|
||||
|
||||
use dashmap::DashMap;
|
||||
use revive_dt_common::types::VersionOrRequirement;
|
||||
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
|
||||
use revive_solc_json_interface::{
|
||||
SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
|
||||
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection,
|
||||
SolcStandardJsonOutput,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler, solc::Solc,
|
||||
};
|
||||
|
||||
use alloy::json_abi::JsonAbi;
|
||||
use anyhow::{Context as _, Result};
|
||||
use semver::Version;
|
||||
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
|
||||
|
||||
/// A wrapper around the `resolc` binary, emitting PVM-compatible bytecode.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct Resolc(Arc<ResolcInner>);
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
struct ResolcInner {
|
||||
/// The internal solc compiler that the resolc compiler uses as a compiler frontend.
|
||||
solc: Solc,
|
||||
/// Path to the `resolc` executable
|
||||
resolc_path: PathBuf,
|
||||
}
|
||||
|
||||
impl Resolc {
|
||||
pub async fn new(
|
||||
context: impl AsRef<SolcConfiguration>
|
||||
+ AsRef<ResolcConfiguration>
|
||||
+ AsRef<WorkingDirectoryConfiguration>,
|
||||
version: impl Into<Option<VersionOrRequirement>>,
|
||||
) -> Result<Self> {
|
||||
/// This is a cache of all of the resolc compiler objects. Since we do not currently support
|
||||
/// multiple resolc compiler versions, so our cache is just keyed by the solc compiler and
|
||||
/// its version to the resolc compiler.
|
||||
static COMPILERS_CACHE: LazyLock<DashMap<Solc, Resolc>> = LazyLock::new(Default::default);
|
||||
|
||||
let resolc_configuration = AsRef::<ResolcConfiguration>::as_ref(&context);
|
||||
|
||||
let solc = Solc::new(&context, version)
|
||||
.await
|
||||
.context("Failed to create the solc compiler frontend for resolc")?;
|
||||
|
||||
Ok(COMPILERS_CACHE
|
||||
.entry(solc.clone())
|
||||
.or_insert_with(|| {
|
||||
Self(Arc::new(ResolcInner {
|
||||
solc,
|
||||
resolc_path: resolc_configuration.path.clone(),
|
||||
}))
|
||||
})
|
||||
.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl SolidityCompiler for Resolc {
|
||||
fn version(&self) -> &Version {
|
||||
// We currently return the solc compiler version since we do not support multiple resolc
|
||||
// compiler versions.
|
||||
SolidityCompiler::version(&self.0.solc)
|
||||
}
|
||||
|
||||
fn path(&self) -> &std::path::Path {
|
||||
&self.0.resolc_path
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", ret)]
|
||||
fn build(
|
||||
&self,
|
||||
CompilerInput {
|
||||
pipeline,
|
||||
optimization,
|
||||
evm_version,
|
||||
allow_paths,
|
||||
base_path,
|
||||
sources,
|
||||
libraries,
|
||||
// TODO: this is currently not being handled since there is no way to pass it into
|
||||
// resolc. So, we need to go back to this later once it's supported.
|
||||
revert_string_handling: _,
|
||||
}: CompilerInput,
|
||||
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>> {
|
||||
Box::pin(async move {
|
||||
if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) {
|
||||
anyhow::bail!(
|
||||
"Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}"
|
||||
);
|
||||
}
|
||||
|
||||
let input = SolcStandardJsonInput {
|
||||
language: SolcStandardJsonInputLanguage::Solidity,
|
||||
sources: sources
|
||||
.into_iter()
|
||||
.map(|(path, source)| (path.display().to_string(), source.into()))
|
||||
.collect(),
|
||||
settings: SolcStandardJsonInputSettings {
|
||||
evm_version,
|
||||
libraries: Some(
|
||||
libraries
|
||||
.into_iter()
|
||||
.map(|(source_code, libraries_map)| {
|
||||
(
|
||||
source_code.display().to_string(),
|
||||
libraries_map
|
||||
.into_iter()
|
||||
.map(|(library_ident, library_address)| {
|
||||
(library_ident, library_address.to_string())
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
),
|
||||
remappings: None,
|
||||
output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()),
|
||||
via_ir: Some(true),
|
||||
optimizer: SolcStandardJsonInputSettingsOptimizer::new(
|
||||
optimization
|
||||
.unwrap_or(ModeOptimizerSetting::M0)
|
||||
.optimizations_enabled(),
|
||||
None,
|
||||
&Version::new(0, 0, 0),
|
||||
false,
|
||||
),
|
||||
metadata: None,
|
||||
polkavm: None,
|
||||
},
|
||||
};
|
||||
|
||||
let path = &self.0.resolc_path;
|
||||
let mut command = AsyncCommand::new(path);
|
||||
command
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.arg("--standard-json");
|
||||
|
||||
if let Some(ref base_path) = base_path {
|
||||
command.arg("--base-path").arg(base_path);
|
||||
}
|
||||
if !allow_paths.is_empty() {
|
||||
command.arg("--allow-paths").arg(
|
||||
allow_paths
|
||||
.iter()
|
||||
.map(|path| path.display().to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(","),
|
||||
);
|
||||
}
|
||||
let mut child = command
|
||||
.spawn()
|
||||
.with_context(|| format!("Failed to spawn resolc at {}", path.display()))?;
|
||||
|
||||
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
|
||||
let serialized_input = serde_json::to_vec(&input)
|
||||
.context("Failed to serialize Standard JSON input for resolc")?;
|
||||
stdin_pipe
|
||||
.write_all(&serialized_input)
|
||||
.await
|
||||
.context("Failed to write Standard JSON to resolc stdin")?;
|
||||
|
||||
let output = child
|
||||
.wait_with_output()
|
||||
.await
|
||||
.context("Failed while waiting for resolc process to finish")?;
|
||||
let stdout = output.stdout;
|
||||
let stderr = output.stderr;
|
||||
|
||||
if !output.status.success() {
|
||||
let json_in = serde_json::to_string_pretty(&input)
|
||||
.context("Failed to pretty-print Standard JSON input for logging")?;
|
||||
let message = String::from_utf8_lossy(&stderr);
|
||||
tracing::error!(
|
||||
status = %output.status,
|
||||
message = %message,
|
||||
json_input = json_in,
|
||||
"Compilation using resolc failed"
|
||||
);
|
||||
anyhow::bail!("Compilation failed with an error: {message}");
|
||||
}
|
||||
|
||||
let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout)
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
||||
String::from_utf8_lossy(&stderr)
|
||||
)
|
||||
})
|
||||
.context("Failed to parse resolc standard JSON output")?;
|
||||
|
||||
tracing::debug!(
|
||||
output = %serde_json::to_string(&parsed).unwrap(),
|
||||
"Compiled successfully"
|
||||
);
|
||||
|
||||
// Detecting if the compiler output contained errors and reporting them through logs and
|
||||
// errors instead of returning the compiler output that might contain errors.
|
||||
for error in parsed.errors.iter().flatten() {
|
||||
if error.severity == "error" {
|
||||
tracing::error!(
|
||||
?error,
|
||||
?input,
|
||||
output = %serde_json::to_string(&parsed).unwrap(),
|
||||
"Encountered an error in the compilation"
|
||||
);
|
||||
anyhow::bail!("Encountered an error in the compilation: {error}")
|
||||
}
|
||||
}
|
||||
|
||||
let Some(contracts) = parsed.contracts else {
|
||||
anyhow::bail!("Unexpected error - resolc output doesn't have a contracts section");
|
||||
};
|
||||
|
||||
let mut compiler_output = CompilerOutput::default();
|
||||
for (source_path, contracts) in contracts.into_iter() {
|
||||
let src_for_msg = source_path.clone();
|
||||
let source_path = PathBuf::from(source_path)
|
||||
.canonicalize()
|
||||
.with_context(|| format!("Failed to canonicalize path {src_for_msg}"))?;
|
||||
|
||||
let map = compiler_output.contracts.entry(source_path).or_default();
|
||||
for (contract_name, contract_information) in contracts.into_iter() {
|
||||
let bytecode = contract_information
|
||||
.evm
|
||||
.and_then(|evm| evm.bytecode.clone())
|
||||
.context("Unexpected - Contract compiled with resolc has no bytecode")?;
|
||||
let abi = {
|
||||
let metadata = contract_information
|
||||
.metadata
|
||||
.as_ref()
|
||||
.context("No metadata found for the contract")?;
|
||||
let solc_metadata_str = match metadata {
|
||||
serde_json::Value::String(solc_metadata_str) => {
|
||||
solc_metadata_str.as_str()
|
||||
}
|
||||
serde_json::Value::Object(metadata_object) => {
|
||||
let solc_metadata_value = metadata_object
|
||||
.get("solc_metadata")
|
||||
.context("Contract doesn't have a 'solc_metadata' field")?;
|
||||
solc_metadata_value
|
||||
.as_str()
|
||||
.context("The 'solc_metadata' field is not a string")?
|
||||
}
|
||||
serde_json::Value::Null
|
||||
| serde_json::Value::Bool(_)
|
||||
| serde_json::Value::Number(_)
|
||||
| serde_json::Value::Array(_) => {
|
||||
anyhow::bail!("Unsupported type of metadata {metadata:?}")
|
||||
}
|
||||
};
|
||||
let solc_metadata = serde_json::from_str::<serde_json::Value>(
|
||||
solc_metadata_str,
|
||||
)
|
||||
.context(
|
||||
"Failed to deserialize the solc_metadata as a serde_json generic value",
|
||||
)?;
|
||||
let output_value = solc_metadata
|
||||
.get("output")
|
||||
.context("solc_metadata doesn't have an output field")?;
|
||||
let abi_value = output_value
|
||||
.get("abi")
|
||||
.context("solc_metadata output doesn't contain an abi field")?;
|
||||
serde_json::from_value::<JsonAbi>(abi_value.clone())
|
||||
.context("ABI found in solc_metadata output is not valid ABI")?
|
||||
};
|
||||
map.insert(contract_name, (bytecode.object, abi));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(compiler_output)
|
||||
})
|
||||
}
|
||||
|
||||
fn supports_mode(
|
||||
&self,
|
||||
optimize_setting: ModeOptimizerSetting,
|
||||
pipeline: ModePipeline,
|
||||
) -> bool {
|
||||
pipeline == ModePipeline::ViaYulIR
|
||||
&& SolidityCompiler::supports_mode(&self.0.solc, optimize_setting, pipeline)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,289 @@
|
||||
//! Implements the [SolidityCompiler] trait with solc for
|
||||
//! compiling contracts to EVM bytecode.
|
||||
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
pin::Pin,
|
||||
process::Stdio,
|
||||
sync::{Arc, LazyLock},
|
||||
};
|
||||
|
||||
use dashmap::DashMap;
|
||||
use revive_dt_common::types::VersionOrRequirement;
|
||||
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
|
||||
use revive_dt_solc_binaries::download_solc;
|
||||
|
||||
use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
|
||||
|
||||
use anyhow::{Context as _, Result};
|
||||
use foundry_compilers_artifacts::{
|
||||
output_selection::{
|
||||
BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection,
|
||||
},
|
||||
solc::CompilerOutput as SolcOutput,
|
||||
solc::*,
|
||||
};
|
||||
use semver::Version;
|
||||
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct Solc(Arc<SolcInner>);
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
struct SolcInner {
|
||||
/// The path of the solidity compiler executable that this object uses.
|
||||
solc_path: PathBuf,
|
||||
/// The version of the solidity compiler executable that this object uses.
|
||||
solc_version: Version,
|
||||
}
|
||||
|
||||
impl Solc {
|
||||
pub async fn new(
|
||||
context: impl AsRef<SolcConfiguration>
|
||||
+ AsRef<ResolcConfiguration>
|
||||
+ AsRef<WorkingDirectoryConfiguration>,
|
||||
version: impl Into<Option<VersionOrRequirement>>,
|
||||
) -> Result<Self> {
|
||||
// This is a cache for the compiler objects so that whenever the same compiler version is
|
||||
// requested the same object is returned. We do this as we do not want to keep cloning the
|
||||
// compiler around.
|
||||
static COMPILERS_CACHE: LazyLock<DashMap<(PathBuf, Version), Solc>> =
|
||||
LazyLock::new(Default::default);
|
||||
|
||||
let working_directory_configuration =
|
||||
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
||||
let solc_configuration = AsRef::<SolcConfiguration>::as_ref(&context);
|
||||
|
||||
// We attempt to download the solc binary. Note the following: this call does the version
|
||||
// resolution for us. Therefore, even if the download didn't proceed, this function will
|
||||
// resolve the version requirement into a canonical version of the compiler. It's then up
|
||||
// to us to either use the provided path or not.
|
||||
let version = version
|
||||
.into()
|
||||
.unwrap_or_else(|| solc_configuration.version.clone().into());
|
||||
let (version, path) =
|
||||
download_solc(working_directory_configuration.as_path(), version, false)
|
||||
.await
|
||||
.context("Failed to download/get path to solc binary")?;
|
||||
|
||||
Ok(COMPILERS_CACHE
|
||||
.entry((path.clone(), version.clone()))
|
||||
.or_insert_with(|| {
|
||||
Self(Arc::new(SolcInner {
|
||||
solc_path: path,
|
||||
solc_version: version,
|
||||
}))
|
||||
})
|
||||
.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl SolidityCompiler for Solc {
|
||||
fn version(&self) -> &Version {
|
||||
&self.0.solc_version
|
||||
}
|
||||
|
||||
fn path(&self) -> &std::path::Path {
|
||||
&self.0.solc_path
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", ret)]
|
||||
fn build(
|
||||
&self,
|
||||
CompilerInput {
|
||||
pipeline,
|
||||
optimization,
|
||||
evm_version,
|
||||
allow_paths,
|
||||
base_path,
|
||||
sources,
|
||||
libraries,
|
||||
revert_string_handling,
|
||||
}: CompilerInput,
|
||||
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>> {
|
||||
Box::pin(async move {
|
||||
// Be careful to entirely omit the viaIR field if the compiler does not support it,
|
||||
// as it will error if you provide fields it does not know about. Because
|
||||
// `supports_mode` is called prior to instantiating a compiler, we should never
|
||||
// ask for something which is invalid.
|
||||
let via_ir = match (pipeline, self.compiler_supports_yul()) {
|
||||
(pipeline, true) => pipeline.map(|p| p.via_yul_ir()),
|
||||
(_pipeline, false) => None,
|
||||
};
|
||||
|
||||
let input = SolcInput {
|
||||
language: SolcLanguage::Solidity,
|
||||
sources: Sources(
|
||||
sources
|
||||
.into_iter()
|
||||
.map(|(source_path, source_code)| (source_path, Source::new(source_code)))
|
||||
.collect(),
|
||||
),
|
||||
settings: Settings {
|
||||
optimizer: Optimizer {
|
||||
enabled: optimization.map(|o| o.optimizations_enabled()),
|
||||
details: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
output_selection: OutputSelection::common_output_selection(
|
||||
[
|
||||
ContractOutputSelection::Abi,
|
||||
ContractOutputSelection::Evm(EvmOutputSelection::ByteCode(
|
||||
BytecodeOutputSelection::Object,
|
||||
)),
|
||||
]
|
||||
.into_iter()
|
||||
.map(|item| item.to_string()),
|
||||
),
|
||||
evm_version: evm_version.map(|version| version.to_string().parse().unwrap()),
|
||||
via_ir,
|
||||
libraries: Libraries {
|
||||
libs: libraries
|
||||
.into_iter()
|
||||
.map(|(file_path, libraries)| {
|
||||
(
|
||||
file_path,
|
||||
libraries
|
||||
.into_iter()
|
||||
.map(|(library_name, library_address)| {
|
||||
(library_name, library_address.to_string())
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
},
|
||||
debug: revert_string_handling.map(|revert_string_handling| DebuggingSettings {
|
||||
revert_strings: match revert_string_handling {
|
||||
crate::RevertString::Default => Some(RevertStrings::Default),
|
||||
crate::RevertString::Debug => Some(RevertStrings::Debug),
|
||||
crate::RevertString::Strip => Some(RevertStrings::Strip),
|
||||
crate::RevertString::VerboseDebug => Some(RevertStrings::VerboseDebug),
|
||||
},
|
||||
debug_info: Default::default(),
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
|
||||
let path = &self.0.solc_path;
|
||||
let mut command = AsyncCommand::new(path);
|
||||
command
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.arg("--standard-json");
|
||||
|
||||
if let Some(ref base_path) = base_path {
|
||||
command.arg("--base-path").arg(base_path);
|
||||
}
|
||||
if !allow_paths.is_empty() {
|
||||
command.arg("--allow-paths").arg(
|
||||
allow_paths
|
||||
.iter()
|
||||
.map(|path| path.display().to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(","),
|
||||
);
|
||||
}
|
||||
let mut child = command
|
||||
.spawn()
|
||||
.with_context(|| format!("Failed to spawn solc at {}", path.display()))?;
|
||||
|
||||
let stdin = child.stdin.as_mut().expect("should be piped");
|
||||
let serialized_input = serde_json::to_vec(&input)
|
||||
.context("Failed to serialize Standard JSON input for solc")?;
|
||||
stdin
|
||||
.write_all(&serialized_input)
|
||||
.await
|
||||
.context("Failed to write Standard JSON to solc stdin")?;
|
||||
let output = child
|
||||
.wait_with_output()
|
||||
.await
|
||||
.context("Failed while waiting for solc process to finish")?;
|
||||
|
||||
if !output.status.success() {
|
||||
let json_in = serde_json::to_string_pretty(&input)
|
||||
.context("Failed to pretty-print Standard JSON input for logging")?;
|
||||
let message = String::from_utf8_lossy(&output.stderr);
|
||||
tracing::error!(
|
||||
status = %output.status,
|
||||
message = %message,
|
||||
json_input = json_in,
|
||||
"Compilation using solc failed"
|
||||
);
|
||||
anyhow::bail!("Compilation failed with an error: {message}");
|
||||
}
|
||||
|
||||
let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout)
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
||||
String::from_utf8_lossy(&output.stdout)
|
||||
)
|
||||
})
|
||||
.context("Failed to parse solc standard JSON output")?;
|
||||
|
||||
// Detecting if the compiler output contained errors and reporting them through logs and
|
||||
// errors instead of returning the compiler output that might contain errors.
|
||||
for error in parsed.errors.iter() {
|
||||
if error.severity == Severity::Error {
|
||||
tracing::error!(?error, ?input, "Encountered an error in the compilation");
|
||||
anyhow::bail!("Encountered an error in the compilation: {error}")
|
||||
}
|
||||
}
|
||||
|
||||
tracing::debug!(
|
||||
output = %String::from_utf8_lossy(&output.stdout).to_string(),
|
||||
"Compiled successfully"
|
||||
);
|
||||
|
||||
let mut compiler_output = CompilerOutput::default();
|
||||
for (contract_path, contracts) in parsed.contracts {
|
||||
let map = compiler_output
|
||||
.contracts
|
||||
.entry(contract_path.canonicalize().with_context(|| {
|
||||
format!(
|
||||
"Failed to canonicalize contract path {}",
|
||||
contract_path.display()
|
||||
)
|
||||
})?)
|
||||
.or_default();
|
||||
for (contract_name, contract_info) in contracts.into_iter() {
|
||||
let source_code = contract_info
|
||||
.evm
|
||||
.and_then(|evm| evm.bytecode)
|
||||
.map(|bytecode| match bytecode.object {
|
||||
BytecodeObject::Bytecode(bytecode) => bytecode.to_string(),
|
||||
BytecodeObject::Unlinked(unlinked) => unlinked,
|
||||
})
|
||||
.context("Unexpected - contract compiled with solc has no source code")?;
|
||||
let abi = contract_info
|
||||
.abi
|
||||
.context("Unexpected - contract compiled with solc as no ABI")?;
|
||||
map.insert(contract_name, (source_code, abi));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(compiler_output)
|
||||
})
|
||||
}
|
||||
|
||||
fn supports_mode(
|
||||
&self,
|
||||
_optimize_setting: ModeOptimizerSetting,
|
||||
pipeline: ModePipeline,
|
||||
) -> bool {
|
||||
// solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support mode E
|
||||
// (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler is new enough.
|
||||
pipeline == ModePipeline::ViaEVMAssembly
|
||||
|| (pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul())
|
||||
}
|
||||
}
|
||||
|
||||
impl Solc {
|
||||
fn compiler_supports_yul(&self) -> bool {
|
||||
const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13);
|
||||
SolidityCompiler::version(self) >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity >=0.6.9;
|
||||
|
||||
contract Callable {
|
||||
function f(uint[1] memory p1) public pure returns(uint) {
|
||||
return p1[0];
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
// Report https://linear.app/matterlabs/issue/CPR-269/call-with-calldata-variable-bug
|
||||
|
||||
pragma solidity >=0.6.9;
|
||||
|
||||
import "./callable.sol";
|
||||
|
||||
contract Main {
|
||||
function main(uint[1] calldata p1, Callable callable) public returns(uint) {
|
||||
return callable.f(p1);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
{ "cases": [ {
|
||||
"name": "first",
|
||||
"inputs": [
|
||||
{
|
||||
"instance": "Main",
|
||||
"method": "main",
|
||||
"calldata": [
|
||||
"1",
|
||||
"Callable.address"
|
||||
]
|
||||
}
|
||||
],
|
||||
"expected": [
|
||||
"1"
|
||||
]
|
||||
} ],
|
||||
"contracts": {
|
||||
"Main": "main.sol:Main",
|
||||
"Callable": "callable.sol:Callable"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,88 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use revive_dt_common::types::VersionOrRequirement;
|
||||
use revive_dt_compiler::{Compiler, revive_resolc::Resolc, solc::Solc};
|
||||
use revive_dt_config::TestExecutionContext;
|
||||
use semver::Version;
|
||||
|
||||
#[tokio::test]
|
||||
async fn contracts_can_be_compiled_with_solc() {
|
||||
// Arrange
|
||||
let args = TestExecutionContext::default();
|
||||
let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Act
|
||||
let output = Compiler::new()
|
||||
.with_source("./tests/assets/array_one_element/callable.sol")
|
||||
.unwrap()
|
||||
.with_source("./tests/assets/array_one_element/main.sol")
|
||||
.unwrap()
|
||||
.try_build(&solc)
|
||||
.await;
|
||||
|
||||
// Assert
|
||||
let output = output.expect("Failed to compile");
|
||||
assert_eq!(output.contracts.len(), 2);
|
||||
|
||||
let main_file_contracts = output
|
||||
.contracts
|
||||
.get(
|
||||
&PathBuf::from("./tests/assets/array_one_element/main.sol")
|
||||
.canonicalize()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let callable_file_contracts = output
|
||||
.contracts
|
||||
.get(
|
||||
&PathBuf::from("./tests/assets/array_one_element/callable.sol")
|
||||
.canonicalize()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
assert!(main_file_contracts.contains_key("Main"));
|
||||
assert!(callable_file_contracts.contains_key("Callable"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn contracts_can_be_compiled_with_resolc() {
|
||||
// Arrange
|
||||
let args = TestExecutionContext::default();
|
||||
let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Act
|
||||
let output = Compiler::new()
|
||||
.with_source("./tests/assets/array_one_element/callable.sol")
|
||||
.unwrap()
|
||||
.with_source("./tests/assets/array_one_element/main.sol")
|
||||
.unwrap()
|
||||
.try_build(&resolc)
|
||||
.await;
|
||||
|
||||
// Assert
|
||||
let output = output.expect("Failed to compile");
|
||||
assert_eq!(output.contracts.len(), 2);
|
||||
|
||||
let main_file_contracts = output
|
||||
.contracts
|
||||
.get(
|
||||
&PathBuf::from("./tests/assets/array_one_element/main.sol")
|
||||
.canonicalize()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let callable_file_contracts = output
|
||||
.contracts
|
||||
.get(
|
||||
&PathBuf::from("./tests/assets/array_one_element/callable.sol")
|
||||
.canonicalize()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
assert!(main_file_contracts.contains_key("Main"));
|
||||
assert!(callable_file_contracts.contains_key("Callable"));
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "revive-dt-config"
|
||||
description = "global configuration for the revive differential tester"
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
revive-dt-common = { workspace = true }
|
||||
|
||||
alloy = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
semver = { workspace = true }
|
||||
temp-dir = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
strum = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -0,0 +1,695 @@
|
||||
//! The global configuration used across all revive differential testing crates.
|
||||
|
||||
use std::{
|
||||
fmt::Display,
|
||||
fs::read_to_string,
|
||||
ops::Deref,
|
||||
path::{Path, PathBuf},
|
||||
str::FromStr,
|
||||
sync::{Arc, LazyLock, OnceLock},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use alloy::{
|
||||
genesis::Genesis,
|
||||
hex::ToHexExt,
|
||||
network::EthereumWallet,
|
||||
primitives::{FixedBytes, U256},
|
||||
signers::local::PrivateKeySigner,
|
||||
};
|
||||
use clap::{Parser, ValueEnum, ValueHint};
|
||||
use revive_dt_common::types::PlatformIdentifier;
|
||||
use semver::Version;
|
||||
use serde::{Serialize, Serializer};
|
||||
use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
|
||||
use temp_dir::TempDir;
|
||||
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
#[command(name = "retester")]
|
||||
pub enum Context {
|
||||
/// Executes tests in the MatterLabs format differentially on multiple targets concurrently.
|
||||
ExecuteTests(Box<TestExecutionContext>),
|
||||
/// Exports the JSON schema of the MatterLabs test format used by the tool.
|
||||
ExportJsonSchema,
|
||||
}
|
||||
|
||||
impl Context {
|
||||
pub fn working_directory_configuration(&self) -> &WorkingDirectoryConfiguration {
|
||||
self.as_ref()
|
||||
}
|
||||
|
||||
pub fn report_configuration(&self) -> &ReportConfiguration {
|
||||
self.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<WorkingDirectoryConfiguration> for Context {
|
||||
fn as_ref(&self) -> &WorkingDirectoryConfiguration {
|
||||
match self {
|
||||
Self::ExecuteTests(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<SolcConfiguration> for Context {
|
||||
fn as_ref(&self) -> &SolcConfiguration {
|
||||
match self {
|
||||
Self::ExecuteTests(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ResolcConfiguration> for Context {
|
||||
fn as_ref(&self) -> &ResolcConfiguration {
|
||||
match self {
|
||||
Self::ExecuteTests(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<GethConfiguration> for Context {
|
||||
fn as_ref(&self) -> &GethConfiguration {
|
||||
match self {
|
||||
Self::ExecuteTests(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<KurtosisConfiguration> for Context {
|
||||
fn as_ref(&self) -> &KurtosisConfiguration {
|
||||
match self {
|
||||
Self::ExecuteTests(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<KitchensinkConfiguration> for Context {
|
||||
fn as_ref(&self) -> &KitchensinkConfiguration {
|
||||
match self {
|
||||
Self::ExecuteTests(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ReviveDevNodeConfiguration> for Context {
|
||||
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
|
||||
match self {
|
||||
Self::ExecuteTests(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<EthRpcConfiguration> for Context {
|
||||
fn as_ref(&self) -> &EthRpcConfiguration {
|
||||
match self {
|
||||
Self::ExecuteTests(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<GenesisConfiguration> for Context {
|
||||
fn as_ref(&self) -> &GenesisConfiguration {
|
||||
match self {
|
||||
Self::ExecuteTests(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<WalletConfiguration> for Context {
|
||||
fn as_ref(&self) -> &WalletConfiguration {
|
||||
match self {
|
||||
Self::ExecuteTests(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ConcurrencyConfiguration> for Context {
|
||||
fn as_ref(&self) -> &ConcurrencyConfiguration {
|
||||
match self {
|
||||
Self::ExecuteTests(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<CompilationConfiguration> for Context {
|
||||
fn as_ref(&self) -> &CompilationConfiguration {
|
||||
match self {
|
||||
Self::ExecuteTests(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ReportConfiguration> for Context {
|
||||
fn as_ref(&self) -> &ReportConfiguration {
|
||||
match self {
|
||||
Self::ExecuteTests(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct TestExecutionContext {
|
||||
/// The working directory that the program will use for all of the temporary artifacts needed at
|
||||
/// runtime.
|
||||
///
|
||||
/// If not specified, then a temporary directory will be created and used by the program for all
|
||||
/// temporary artifacts.
|
||||
#[clap(
|
||||
short,
|
||||
long,
|
||||
default_value = "",
|
||||
value_hint = ValueHint::DirPath,
|
||||
)]
|
||||
pub working_directory: WorkingDirectoryConfiguration,
|
||||
|
||||
/// The set of platforms that the differential tests should run on.
|
||||
#[arg(
|
||||
short = 'p',
|
||||
long = "platform",
|
||||
default_values = ["geth-evm-solc", "revive-dev-node-polkavm-resolc"]
|
||||
)]
|
||||
pub platforms: Vec<PlatformIdentifier>,
|
||||
|
||||
/// A list of test corpus JSON files to be tested.
|
||||
#[arg(long = "corpus", short)]
|
||||
pub corpus: Vec<PathBuf>,
|
||||
|
||||
/// Configuration parameters for the solc compiler.
|
||||
#[clap(flatten, next_help_heading = "Solc Configuration")]
|
||||
pub solc_configuration: SolcConfiguration,
|
||||
|
||||
/// Configuration parameters for the resolc compiler.
|
||||
#[clap(flatten, next_help_heading = "Resolc Configuration")]
|
||||
pub resolc_configuration: ResolcConfiguration,
|
||||
|
||||
/// Configuration parameters for the geth node.
|
||||
#[clap(flatten, next_help_heading = "Geth Configuration")]
|
||||
pub geth_configuration: GethConfiguration,
|
||||
|
||||
/// Configuration parameters for the lighthouse node.
|
||||
#[clap(flatten, next_help_heading = "Lighthouse Configuration")]
|
||||
pub lighthouse_configuration: KurtosisConfiguration,
|
||||
|
||||
/// Configuration parameters for the Kitchensink.
|
||||
#[clap(flatten, next_help_heading = "Kitchensink Configuration")]
|
||||
pub kitchensink_configuration: KitchensinkConfiguration,
|
||||
|
||||
/// Configuration parameters for the Revive Dev Node.
|
||||
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
|
||||
pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
|
||||
|
||||
/// Configuration parameters for the Eth Rpc.
|
||||
#[clap(flatten, next_help_heading = "Eth RPC Configuration")]
|
||||
pub eth_rpc_configuration: EthRpcConfiguration,
|
||||
|
||||
/// Configuration parameters for the genesis.
|
||||
#[clap(flatten, next_help_heading = "Genesis Configuration")]
|
||||
pub genesis_configuration: GenesisConfiguration,
|
||||
|
||||
/// Configuration parameters for the wallet.
|
||||
#[clap(flatten, next_help_heading = "Wallet Configuration")]
|
||||
pub wallet_configuration: WalletConfiguration,
|
||||
|
||||
/// Configuration parameters for concurrency.
|
||||
#[clap(flatten, next_help_heading = "Concurrency Configuration")]
|
||||
pub concurrency_configuration: ConcurrencyConfiguration,
|
||||
|
||||
/// Configuration parameters for the compilers and compilation.
|
||||
#[clap(flatten, next_help_heading = "Compilation Configuration")]
|
||||
pub compilation_configuration: CompilationConfiguration,
|
||||
|
||||
/// Configuration parameters for the report.
|
||||
#[clap(flatten, next_help_heading = "Report Configuration")]
|
||||
pub report_configuration: ReportConfiguration,
|
||||
}
|
||||
|
||||
impl Default for TestExecutionContext {
|
||||
fn default() -> Self {
|
||||
Self::parse_from(["execution-context"])
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<WorkingDirectoryConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &WorkingDirectoryConfiguration {
|
||||
&self.working_directory
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<SolcConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &SolcConfiguration {
|
||||
&self.solc_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ResolcConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &ResolcConfiguration {
|
||||
&self.resolc_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<GethConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &GethConfiguration {
|
||||
&self.geth_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<KurtosisConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &KurtosisConfiguration {
|
||||
&self.lighthouse_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<KitchensinkConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &KitchensinkConfiguration {
|
||||
&self.kitchensink_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ReviveDevNodeConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
|
||||
&self.revive_dev_node_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<EthRpcConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &EthRpcConfiguration {
|
||||
&self.eth_rpc_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<GenesisConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &GenesisConfiguration {
|
||||
&self.genesis_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<WalletConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &WalletConfiguration {
|
||||
&self.wallet_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ConcurrencyConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &ConcurrencyConfiguration {
|
||||
&self.concurrency_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<CompilationConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &CompilationConfiguration {
|
||||
&self.compilation_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ReportConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &ReportConfiguration {
|
||||
&self.report_configuration
|
||||
}
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for Solc.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct SolcConfiguration {
|
||||
/// Specifies the default version of the Solc compiler that should be used if there is no
|
||||
/// override specified by one of the test cases.
|
||||
#[clap(long = "solc.version", default_value = "0.8.29")]
|
||||
pub version: Version,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for Resolc.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct ResolcConfiguration {
|
||||
/// Specifies the path of the resolc compiler to be used by the tool.
|
||||
///
|
||||
/// If this is not specified, then the tool assumes that it should use the resolc binary that's
|
||||
/// provided in the user's $PATH.
|
||||
#[clap(id = "resolc.path", long = "resolc.path", default_value = "resolc")]
|
||||
pub path: PathBuf,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for Geth.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct GethConfiguration {
|
||||
/// Specifies the path of the geth node to be used by the tool.
|
||||
///
|
||||
/// If this is not specified, then the tool assumes that it should use the geth binary that's
|
||||
/// provided in the user's $PATH.
|
||||
#[clap(id = "geth.path", long = "geth.path", default_value = "geth")]
|
||||
pub path: PathBuf,
|
||||
|
||||
/// The amount of time to wait upon startup before considering that the node timed out.
|
||||
#[clap(
|
||||
id = "geth.start-timeout-ms",
|
||||
long = "geth.start-timeout-ms",
|
||||
default_value = "30000",
|
||||
value_parser = parse_duration
|
||||
)]
|
||||
pub start_timeout_ms: Duration,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for kurtosis.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct KurtosisConfiguration {
|
||||
/// Specifies the path of the kurtosis node to be used by the tool.
|
||||
///
|
||||
/// If this is not specified, then the tool assumes that it should use the kurtosis binary that's
|
||||
/// provided in the user's $PATH.
|
||||
#[clap(
|
||||
id = "kurtosis.path",
|
||||
long = "kurtosis.path",
|
||||
default_value = "kurtosis"
|
||||
)]
|
||||
pub path: PathBuf,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for Kitchensink.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct KitchensinkConfiguration {
|
||||
/// Specifies the path of the kitchensink node to be used by the tool.
|
||||
///
|
||||
/// If this is not specified, then the tool assumes that it should use the kitchensink binary
|
||||
/// that's provided in the user's $PATH.
|
||||
#[clap(
|
||||
id = "kitchensink.path",
|
||||
long = "kitchensink.path",
|
||||
default_value = "substrate-node"
|
||||
)]
|
||||
pub path: PathBuf,
|
||||
|
||||
/// The amount of time to wait upon startup before considering that the node timed out.
|
||||
#[clap(
|
||||
id = "kitchensink.start-timeout-ms",
|
||||
long = "kitchensink.start-timeout-ms",
|
||||
default_value = "30000",
|
||||
value_parser = parse_duration
|
||||
)]
|
||||
pub start_timeout_ms: Duration,
|
||||
|
||||
/// This configures the tool to use Kitchensink instead of using the revive-dev-node.
|
||||
#[clap(long = "kitchensink.dont-use-dev-node")]
|
||||
pub use_kitchensink: bool,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for the revive dev node.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct ReviveDevNodeConfiguration {
|
||||
/// Specifies the path of the revive dev node to be used by the tool.
|
||||
///
|
||||
/// If this is not specified, then the tool assumes that it should use the revive dev node binary
|
||||
/// that's provided in the user's $PATH.
|
||||
#[clap(
|
||||
id = "revive-dev-node.path",
|
||||
long = "revive-dev-node.path",
|
||||
default_value = "revive-dev-node"
|
||||
)]
|
||||
pub path: PathBuf,
|
||||
|
||||
/// The amount of time to wait upon startup before considering that the node timed out.
|
||||
#[clap(
|
||||
id = "revive-dev-node.start-timeout-ms",
|
||||
long = "revive-dev-node.start-timeout-ms",
|
||||
default_value = "30000",
|
||||
value_parser = parse_duration
|
||||
)]
|
||||
pub start_timeout_ms: Duration,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for the ETH RPC.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct EthRpcConfiguration {
|
||||
/// Specifies the path of the ETH RPC to be used by the tool.
|
||||
///
|
||||
/// If this is not specified, then the tool assumes that it should use the ETH RPC binary
|
||||
/// that's provided in the user's $PATH.
|
||||
#[clap(id = "eth-rpc.path", long = "eth-rpc.path", default_value = "eth-rpc")]
|
||||
pub path: PathBuf,
|
||||
|
||||
/// The amount of time to wait upon startup before considering that the node timed out.
|
||||
#[clap(
|
||||
id = "eth-rpc.start-timeout-ms",
|
||||
long = "eth-rpc.start-timeout-ms",
|
||||
default_value = "30000",
|
||||
value_parser = parse_duration
|
||||
)]
|
||||
pub start_timeout_ms: Duration,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for the genesis.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct GenesisConfiguration {
|
||||
/// Specifies the path of the genesis file to use for the nodes that are started.
|
||||
///
|
||||
/// This is expected to be the path of a JSON geth genesis file.
|
||||
#[clap(id = "genesis.path", long = "genesis.path")]
|
||||
path: Option<PathBuf>,
|
||||
|
||||
/// The genesis object found at the provided path.
|
||||
#[clap(skip)]
|
||||
#[serde(skip)]
|
||||
genesis: OnceLock<Genesis>,
|
||||
}
|
||||
|
||||
impl GenesisConfiguration {
|
||||
pub fn genesis(&self) -> anyhow::Result<&Genesis> {
|
||||
static DEFAULT_GENESIS: LazyLock<Genesis> = LazyLock::new(|| {
|
||||
let genesis = include_str!("../../../assets/dev-genesis.json");
|
||||
serde_json::from_str(genesis).unwrap()
|
||||
});
|
||||
|
||||
match self.genesis.get() {
|
||||
Some(genesis) => Ok(genesis),
|
||||
None => {
|
||||
let genesis = match self.path.as_ref() {
|
||||
Some(genesis_path) => {
|
||||
let genesis_content = read_to_string(genesis_path)?;
|
||||
serde_json::from_str(genesis_content.as_str())?
|
||||
}
|
||||
None => DEFAULT_GENESIS.clone(),
|
||||
};
|
||||
Ok(self.genesis.get_or_init(|| genesis))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for the wallet.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct WalletConfiguration {
|
||||
/// The private key of the default signer.
|
||||
#[clap(
|
||||
long = "wallet.default-private-key",
|
||||
default_value = "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d"
|
||||
)]
|
||||
#[serde(serialize_with = "serialize_private_key")]
|
||||
default_key: PrivateKeySigner,
|
||||
|
||||
/// This argument controls which private keys the nodes should have access to and be added to
|
||||
/// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set
|
||||
/// of the node.
|
||||
#[clap(long = "wallet.additional-keys", default_value_t = 200)]
|
||||
additional_keys: usize,
|
||||
|
||||
/// The wallet object that will be used.
|
||||
#[clap(skip)]
|
||||
#[serde(skip)]
|
||||
wallet: OnceLock<Arc<EthereumWallet>>,
|
||||
}
|
||||
|
||||
impl WalletConfiguration {
|
||||
pub fn wallet(&self) -> Arc<EthereumWallet> {
|
||||
self.wallet
|
||||
.get_or_init(|| {
|
||||
let mut wallet = EthereumWallet::new(self.default_key.clone());
|
||||
for signer in (1..=self.additional_keys)
|
||||
.map(|id| U256::from(id))
|
||||
.map(|id| id.to_be_bytes::<32>())
|
||||
.map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap())
|
||||
{
|
||||
wallet.register_signer(signer);
|
||||
}
|
||||
Arc::new(wallet)
|
||||
})
|
||||
.clone()
|
||||
}
|
||||
|
||||
pub fn highest_private_key_exclusive(&self) -> U256 {
|
||||
U256::try_from(self.additional_keys).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize_private_key<S>(value: &PrivateKeySigner, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
value.to_bytes().encode_hex().serialize(serializer)
|
||||
}
|
||||
|
||||
/// A set of configuration for concurrency.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct ConcurrencyConfiguration {
|
||||
/// Determines the amount of nodes that will be spawned for each chain.
|
||||
#[clap(long = "concurrency.number-of-nodes", default_value_t = 5)]
|
||||
pub number_of_nodes: usize,
|
||||
|
||||
/// Determines the amount of tokio worker threads that will will be used.
|
||||
#[arg(
|
||||
long = "concurrency.number-of-threads",
|
||||
default_value_t = std::thread::available_parallelism()
|
||||
.map(|n| n.get())
|
||||
.unwrap_or(1)
|
||||
)]
|
||||
pub number_of_threads: usize,
|
||||
|
||||
/// Determines the amount of concurrent tasks that will be spawned to run tests.
|
||||
///
|
||||
/// Defaults to 10 x the number of nodes.
|
||||
#[arg(long = "concurrency.number-of-concurrent-tasks")]
|
||||
number_concurrent_tasks: Option<usize>,
|
||||
|
||||
/// Determines if the concurrency limit should be ignored or not.
|
||||
#[arg(long = "concurrency.ignore-concurrency-limit")]
|
||||
ignore_concurrency_limit: bool,
|
||||
}
|
||||
|
||||
impl ConcurrencyConfiguration {
|
||||
pub fn concurrency_limit(&self) -> Option<usize> {
|
||||
match self.ignore_concurrency_limit {
|
||||
true => None,
|
||||
false => Some(
|
||||
self.number_concurrent_tasks
|
||||
.unwrap_or(20 * self.number_of_nodes),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct CompilationConfiguration {
|
||||
/// Controls if the compilation cache should be invalidated or not.
|
||||
#[arg(long = "compilation.invalidate-cache")]
|
||||
pub invalidate_compilation_cache: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct ReportConfiguration {
|
||||
/// Controls if the compiler input is included in the final report.
|
||||
#[clap(long = "report.include-compiler-input")]
|
||||
pub include_compiler_input: bool,
|
||||
|
||||
/// Controls if the compiler output is included in the final report.
|
||||
#[clap(long = "report.include-compiler-output")]
|
||||
pub include_compiler_output: bool,
|
||||
}
|
||||
|
||||
/// Represents the working directory that the program uses.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum WorkingDirectoryConfiguration {
|
||||
/// A temporary directory is used as the working directory. This will be removed when dropped.
|
||||
TemporaryDirectory(Arc<TempDir>),
|
||||
/// A directory with a path is used as the working directory.
|
||||
Path(PathBuf),
|
||||
}
|
||||
|
||||
impl WorkingDirectoryConfiguration {
|
||||
pub fn as_path(&self) -> &Path {
|
||||
self.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for WorkingDirectoryConfiguration {
|
||||
type Target = Path;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.as_path()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<Path> for WorkingDirectoryConfiguration {
|
||||
fn as_ref(&self) -> &Path {
|
||||
match self {
|
||||
WorkingDirectoryConfiguration::TemporaryDirectory(temp_dir) => temp_dir.path(),
|
||||
WorkingDirectoryConfiguration::Path(path) => path.as_path(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for WorkingDirectoryConfiguration {
|
||||
fn default() -> Self {
|
||||
TempDir::new()
|
||||
.map(Arc::new)
|
||||
.map(Self::TemporaryDirectory)
|
||||
.expect("Failed to create the temporary directory")
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for WorkingDirectoryConfiguration {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
"" => Ok(Default::default()),
|
||||
_ => Ok(Self::Path(PathBuf::from(s))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for WorkingDirectoryConfiguration {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
Display::fmt(&self.as_path().display(), f)
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for WorkingDirectoryConfiguration {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
self.as_path().serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_duration(s: &str) -> anyhow::Result<Duration> {
|
||||
u64::from_str(s)
|
||||
.map(Duration::from_millis)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
/// The Solidity compatible node implementation.
|
||||
///
|
||||
/// This describes the solutions to be tested against on a high level.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
ValueEnum,
|
||||
EnumString,
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
)]
|
||||
#[strum(serialize_all = "kebab-case")]
|
||||
pub enum TestingPlatform {
|
||||
/// The go-ethereum reference full node EVM implementation.
|
||||
Geth,
|
||||
/// The kitchensink runtime provides the PolkaVM (PVM) based node implementation.
|
||||
Kitchensink,
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
[package]
|
||||
name = "revive-dt-core"
|
||||
description = "revive differential testing core utility"
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "retester"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
revive-dt-common = { workspace = true }
|
||||
revive-dt-compiler = { workspace = true }
|
||||
revive-dt-config = { workspace = true }
|
||||
revive-dt-format = { workspace = true }
|
||||
revive-dt-node = { workspace = true }
|
||||
revive-dt-node-interaction = { workspace = true }
|
||||
revive-dt-report = { workspace = true }
|
||||
|
||||
alloy = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
bson = { workspace = true }
|
||||
cacache = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
indexmap = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-appender = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
schemars = { workspace = true }
|
||||
semver = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -0,0 +1,357 @@
|
||||
//! A wrapper around the compiler which allows for caching of compilation artifacts so that they can
|
||||
//! be reused between runs.
|
||||
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use futures::FutureExt;
|
||||
use revive_dt_common::{iterators::FilesWithExtensionIterator, types::CompilerIdentifier};
|
||||
use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler};
|
||||
use revive_dt_core::Platform;
|
||||
use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata};
|
||||
|
||||
use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address};
|
||||
use anyhow::{Context as _, Error, Result};
|
||||
use revive_dt_report::ExecutionSpecificReporter;
|
||||
use semver::Version;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::{Mutex, RwLock};
|
||||
use tracing::{Instrument, debug, debug_span, instrument};
|
||||
|
||||
pub struct CachedCompiler<'a> {
|
||||
/// The cache that stores the compiled contracts.
|
||||
artifacts_cache: ArtifactsCache,
|
||||
|
||||
/// This is a mechanism that the cached compiler uses so that if multiple compilation requests
|
||||
/// come in for the same contract we never compile all of them and only compile it once and all
|
||||
/// other tasks that request this same compilation concurrently get the cached version.
|
||||
cache_key_lock: RwLock<HashMap<CacheKey<'a>, Arc<Mutex<()>>>>,
|
||||
}
|
||||
|
||||
impl<'a> CachedCompiler<'a> {
|
||||
pub async fn new(path: impl AsRef<Path>, invalidate_cache: bool) -> Result<Self> {
|
||||
let mut cache = ArtifactsCache::new(path);
|
||||
if invalidate_cache {
|
||||
cache = cache
|
||||
.with_invalidated_cache()
|
||||
.await
|
||||
.context("Failed to invalidate compilation cache directory")?;
|
||||
}
|
||||
Ok(Self {
|
||||
artifacts_cache: cache,
|
||||
cache_key_lock: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Compiles or gets the compilation artifacts from the cache.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(
|
||||
level = "debug",
|
||||
skip_all,
|
||||
fields(
|
||||
metadata_file_path = %metadata_file_path.display(),
|
||||
%mode,
|
||||
platform = %platform.platform_identifier()
|
||||
),
|
||||
err
|
||||
)]
|
||||
pub async fn compile_contracts(
|
||||
&self,
|
||||
metadata: &'a Metadata,
|
||||
metadata_file_path: &'a Path,
|
||||
mode: Cow<'a, Mode>,
|
||||
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||
compiler: &dyn SolidityCompiler,
|
||||
platform: &dyn Platform,
|
||||
reporter: &ExecutionSpecificReporter,
|
||||
) -> Result<CompilerOutput> {
|
||||
let cache_key = CacheKey {
|
||||
compiler_identifier: platform.compiler_identifier(),
|
||||
compiler_version: compiler.version().clone(),
|
||||
metadata_file_path,
|
||||
solc_mode: mode.clone(),
|
||||
};
|
||||
|
||||
let compilation_callback = || {
|
||||
async move {
|
||||
compile_contracts(
|
||||
metadata
|
||||
.directory()
|
||||
.context("Failed to get metadata directory while preparing compilation")?,
|
||||
metadata
|
||||
.files_to_compile()
|
||||
.context("Failed to enumerate files to compile from metadata")?,
|
||||
&mode,
|
||||
deployed_libraries,
|
||||
compiler,
|
||||
reporter,
|
||||
)
|
||||
.map(|compilation_result| compilation_result.map(CacheValue::new))
|
||||
.await
|
||||
}
|
||||
.instrument(debug_span!(
|
||||
"Running compilation for the cache key",
|
||||
cache_key.compiler_identifier = %cache_key.compiler_identifier,
|
||||
cache_key.compiler_version = %cache_key.compiler_version,
|
||||
cache_key.metadata_file_path = %cache_key.metadata_file_path.display(),
|
||||
cache_key.solc_mode = %cache_key.solc_mode,
|
||||
))
|
||||
};
|
||||
|
||||
let compiled_contracts = match deployed_libraries {
|
||||
// If deployed libraries have been specified then we will re-compile the contract as it
|
||||
// means that linking is required in this case.
|
||||
Some(_) => {
|
||||
debug!("Deployed libraries defined, recompilation must take place");
|
||||
debug!("Cache miss");
|
||||
compilation_callback()
|
||||
.await
|
||||
.context("Compilation callback for deployed libraries failed")?
|
||||
.compiler_output
|
||||
}
|
||||
// If no deployed libraries are specified then we can follow the cached flow and attempt
|
||||
// to lookup the compilation artifacts in the cache.
|
||||
None => {
|
||||
debug!("Deployed libraries undefined, attempting to make use of cache");
|
||||
|
||||
// Lock this specific cache key such that we do not get inconsistent state. We want
|
||||
// that when multiple cases come in asking for the compilation artifacts then they
|
||||
// don't all trigger a compilation if there's a cache miss. Hence, the lock here.
|
||||
let read_guard = self.cache_key_lock.read().await;
|
||||
let mutex = match read_guard.get(&cache_key).cloned() {
|
||||
Some(value) => {
|
||||
drop(read_guard);
|
||||
value
|
||||
}
|
||||
None => {
|
||||
drop(read_guard);
|
||||
self.cache_key_lock
|
||||
.write()
|
||||
.await
|
||||
.entry(cache_key.clone())
|
||||
.or_default()
|
||||
.clone()
|
||||
}
|
||||
};
|
||||
let _guard = mutex.lock().await;
|
||||
|
||||
match self.artifacts_cache.get(&cache_key).await {
|
||||
Some(cache_value) => {
|
||||
if deployed_libraries.is_some() {
|
||||
reporter
|
||||
.report_post_link_contracts_compilation_succeeded_event(
|
||||
compiler.version().clone(),
|
||||
compiler.path(),
|
||||
true,
|
||||
None,
|
||||
cache_value.compiler_output.clone(),
|
||||
)
|
||||
.expect("Can't happen");
|
||||
} else {
|
||||
reporter
|
||||
.report_pre_link_contracts_compilation_succeeded_event(
|
||||
compiler.version().clone(),
|
||||
compiler.path(),
|
||||
true,
|
||||
None,
|
||||
cache_value.compiler_output.clone(),
|
||||
)
|
||||
.expect("Can't happen");
|
||||
}
|
||||
cache_value.compiler_output
|
||||
}
|
||||
None => {
|
||||
compilation_callback()
|
||||
.await
|
||||
.context("Compilation callback failed (cache miss path)")?
|
||||
.compiler_output
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(compiled_contracts)
|
||||
}
|
||||
}
|
||||
|
||||
async fn compile_contracts(
|
||||
metadata_directory: impl AsRef<Path>,
|
||||
mut files_to_compile: impl Iterator<Item = PathBuf>,
|
||||
mode: &Mode,
|
||||
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||
compiler: &dyn SolidityCompiler,
|
||||
reporter: &ExecutionSpecificReporter,
|
||||
) -> Result<CompilerOutput> {
|
||||
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
|
||||
.with_allowed_extension("sol")
|
||||
.with_use_cached_fs(true)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let compilation = Compiler::new()
|
||||
.with_allow_path(metadata_directory)
|
||||
// Handling the modes
|
||||
.with_optimization(mode.optimize_setting)
|
||||
.with_pipeline(mode.pipeline)
|
||||
// Adding the contract sources to the compiler.
|
||||
.try_then(|compiler| {
|
||||
files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path))
|
||||
})?
|
||||
// Adding the deployed libraries to the compiler.
|
||||
.then(|compiler| {
|
||||
deployed_libraries
|
||||
.iter()
|
||||
.flat_map(|value| value.iter())
|
||||
.map(|(instance, (ident, address, abi))| (instance, ident, address, abi))
|
||||
.flat_map(|(_, ident, address, _)| {
|
||||
all_sources_in_dir
|
||||
.iter()
|
||||
.map(move |path| (ident, address, path))
|
||||
})
|
||||
.fold(compiler, |compiler, (ident, address, path)| {
|
||||
compiler.with_library(path, ident.as_str(), *address)
|
||||
})
|
||||
});
|
||||
|
||||
let input = compilation.input().clone();
|
||||
let output = compilation.try_build(compiler).await;
|
||||
|
||||
match (output.as_ref(), deployed_libraries.is_some()) {
|
||||
(Ok(output), true) => {
|
||||
reporter
|
||||
.report_post_link_contracts_compilation_succeeded_event(
|
||||
compiler.version().clone(),
|
||||
compiler.path(),
|
||||
false,
|
||||
input,
|
||||
output.clone(),
|
||||
)
|
||||
.expect("Can't happen");
|
||||
}
|
||||
(Ok(output), false) => {
|
||||
reporter
|
||||
.report_pre_link_contracts_compilation_succeeded_event(
|
||||
compiler.version().clone(),
|
||||
compiler.path(),
|
||||
false,
|
||||
input,
|
||||
output.clone(),
|
||||
)
|
||||
.expect("Can't happen");
|
||||
}
|
||||
(Err(err), true) => {
|
||||
reporter
|
||||
.report_post_link_contracts_compilation_failed_event(
|
||||
compiler.version().clone(),
|
||||
compiler.path().to_path_buf(),
|
||||
input,
|
||||
format!("{err:#}"),
|
||||
)
|
||||
.expect("Can't happen");
|
||||
}
|
||||
(Err(err), false) => {
|
||||
reporter
|
||||
.report_pre_link_contracts_compilation_failed_event(
|
||||
compiler.version().clone(),
|
||||
compiler.path().to_path_buf(),
|
||||
input,
|
||||
format!("{err:#}"),
|
||||
)
|
||||
.expect("Can't happen");
|
||||
}
|
||||
}
|
||||
|
||||
output
|
||||
}
|
||||
|
||||
struct ArtifactsCache {
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl ArtifactsCache {
|
||||
pub fn new(path: impl AsRef<Path>) -> Self {
|
||||
Self {
|
||||
path: path.as_ref().to_path_buf(),
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub async fn with_invalidated_cache(self) -> Result<Self> {
|
||||
cacache::clear(self.path.as_path())
|
||||
.await
|
||||
.map_err(Into::<Error>::into)
|
||||
.with_context(|| format!("Failed to clear cache at {}", self.path.display()))?;
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub async fn insert(&self, key: &CacheKey<'_>, value: &CacheValue) -> Result<()> {
|
||||
let key = bson::to_vec(key).context("Failed to serialize cache key (bson)")?;
|
||||
let value = bson::to_vec(value).context("Failed to serialize cache value (bson)")?;
|
||||
cacache::write(self.path.as_path(), key.encode_hex(), value)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("Failed to write cache entry under {}", self.path.display())
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get(&self, key: &CacheKey<'_>) -> Option<CacheValue> {
|
||||
let key = bson::to_vec(key).ok()?;
|
||||
let value = cacache::read(self.path.as_path(), key.encode_hex())
|
||||
.await
|
||||
.ok()?;
|
||||
let value = bson::from_slice::<CacheValue>(&value).ok()?;
|
||||
Some(value)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub async fn get_or_insert_with(
|
||||
&self,
|
||||
key: &CacheKey<'_>,
|
||||
callback: impl AsyncFnOnce() -> Result<CacheValue>,
|
||||
) -> Result<CacheValue> {
|
||||
match self.get(key).await {
|
||||
Some(value) => {
|
||||
debug!("Cache hit");
|
||||
Ok(value)
|
||||
}
|
||||
None => {
|
||||
debug!("Cache miss");
|
||||
let value = callback().await?;
|
||||
self.insert(key, &value).await?;
|
||||
Ok(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)]
|
||||
struct CacheKey<'a> {
|
||||
/// The identifier of the used compiler.
|
||||
compiler_identifier: CompilerIdentifier,
|
||||
|
||||
/// The version of the compiler that was used to compile the artifacts.
|
||||
compiler_version: Version,
|
||||
|
||||
/// The path of the metadata file that the compilation artifacts are for.
|
||||
metadata_file_path: &'a Path,
|
||||
|
||||
/// The mode that the compilation artifacts where compiled with.
|
||||
solc_mode: Cow<'a, Mode>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
struct CacheValue {
|
||||
/// The compiler output from the compilation run.
|
||||
compiler_output: CompilerOutput,
|
||||
}
|
||||
|
||||
impl CacheValue {
|
||||
pub fn new(compiler_output: CompilerOutput) -> Self {
|
||||
Self { compiler_output }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,900 @@
|
||||
//! The test driver handles the compilation and execution of the test cases.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use alloy::consensus::EMPTY_ROOT_HASH;
|
||||
use alloy::hex;
|
||||
use alloy::json_abi::JsonAbi;
|
||||
use alloy::network::{Ethereum, TransactionBuilder};
|
||||
use alloy::primitives::{TxHash, U256};
|
||||
use alloy::rpc::types::TransactionReceipt;
|
||||
use alloy::rpc::types::trace::geth::{
|
||||
CallFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType,
|
||||
GethDebugTracingOptions, GethTrace, PreStateConfig,
|
||||
};
|
||||
use alloy::{
|
||||
primitives::Address,
|
||||
rpc::types::{TransactionRequest, trace::geth::DiffMode},
|
||||
};
|
||||
use anyhow::{Context as _, bail};
|
||||
use futures::{TryStreamExt, future::try_join_all};
|
||||
use indexmap::IndexMap;
|
||||
use revive_dt_common::types::{PlatformIdentifier, PrivateKeyAllocator};
|
||||
use revive_dt_format::traits::{ResolutionContext, ResolverApi};
|
||||
use revive_dt_report::ExecutionSpecificReporter;
|
||||
use semver::Version;
|
||||
|
||||
use revive_dt_format::case::Case;
|
||||
use revive_dt_format::metadata::{ContractIdent, ContractInstance, ContractPathAndIdent};
|
||||
use revive_dt_format::steps::{
|
||||
BalanceAssertionStep, Calldata, EtherValue, Expected, ExpectedOutput, FunctionCallStep, Method,
|
||||
StepIdx, StepPath, StorageEmptyAssertionStep,
|
||||
};
|
||||
use revive_dt_format::{metadata::Metadata, steps::Step};
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::try_join;
|
||||
use tracing::{Instrument, info, info_span, instrument};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CaseState {
|
||||
/// A map of all of the compiled contracts for the given metadata file.
|
||||
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
|
||||
/// This map stores the contracts deployments for this case.
|
||||
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||
|
||||
/// This map stores the variables used for each one of the cases contained in the metadata
|
||||
/// file.
|
||||
variables: HashMap<String, U256>,
|
||||
|
||||
/// Stores the version used for the current case.
|
||||
compiler_version: Version,
|
||||
|
||||
/// The execution reporter.
|
||||
execution_reporter: ExecutionSpecificReporter,
|
||||
|
||||
/// The private key allocator used for this case state. This is an Arc Mutex to allow for the
|
||||
/// state to be cloned and for all of the clones to refer to the same allocator.
|
||||
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
|
||||
}
|
||||
|
||||
impl CaseState {
|
||||
pub fn new(
|
||||
compiler_version: Version,
|
||||
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||
execution_reporter: ExecutionSpecificReporter,
|
||||
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
compiled_contracts,
|
||||
deployed_contracts,
|
||||
variables: Default::default(),
|
||||
compiler_version,
|
||||
execution_reporter,
|
||||
private_key_allocator,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_step(
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
step: &Step,
|
||||
step_path: &StepPath,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<StepOutput> {
|
||||
match step {
|
||||
Step::FunctionCall(input) => {
|
||||
let (receipt, geth_trace, diff_mode) = self
|
||||
.handle_input(metadata, input, node)
|
||||
.await
|
||||
.context("Failed to handle function call step")?;
|
||||
Ok(StepOutput::FunctionCall(receipt, geth_trace, diff_mode))
|
||||
}
|
||||
Step::BalanceAssertion(balance_assertion) => {
|
||||
self.handle_balance_assertion(metadata, balance_assertion, node)
|
||||
.await
|
||||
.context("Failed to handle balance assertion step")?;
|
||||
Ok(StepOutput::BalanceAssertion)
|
||||
}
|
||||
Step::StorageEmptyAssertion(storage_empty) => {
|
||||
self.handle_storage_empty(metadata, storage_empty, node)
|
||||
.await
|
||||
.context("Failed to handle storage empty assertion step")?;
|
||||
Ok(StepOutput::StorageEmptyAssertion)
|
||||
}
|
||||
Step::Repeat(repetition_step) => {
|
||||
self.handle_repeat(
|
||||
metadata,
|
||||
repetition_step.repeat,
|
||||
&repetition_step.steps,
|
||||
step_path,
|
||||
node,
|
||||
)
|
||||
.await
|
||||
.context("Failed to handle the repetition step")?;
|
||||
Ok(StepOutput::Repetition)
|
||||
}
|
||||
Step::AllocateAccount(account_allocation) => {
|
||||
self.handle_account_allocation(account_allocation.variable_name.as_str())
|
||||
.await
|
||||
.context("Failed to allocate account")?;
|
||||
Ok(StepOutput::AccountAllocation)
|
||||
}
|
||||
}
|
||||
.inspect(|_| info!("Step Succeeded"))
|
||||
}
|
||||
|
||||
#[instrument(level = "info", name = "Handling Input", skip_all)]
|
||||
pub async fn handle_input(
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
input: &FunctionCallStep,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> {
|
||||
let resolver = node.resolver().await?;
|
||||
|
||||
let deployment_receipts = self
|
||||
.handle_input_contract_deployment(metadata, input, node)
|
||||
.await
|
||||
.context("Failed during contract deployment phase of input handling")?;
|
||||
let execution_receipt = self
|
||||
.handle_input_execution(input, deployment_receipts, node)
|
||||
.await
|
||||
.context("Failed during transaction execution phase of input handling")?;
|
||||
let tracing_result = self
|
||||
.handle_input_call_frame_tracing(execution_receipt.transaction_hash, node)
|
||||
.await
|
||||
.context("Failed during callframe tracing phase of input handling")?;
|
||||
self.handle_input_variable_assignment(input, &tracing_result)
|
||||
.context("Failed to assign variables from callframe output")?;
|
||||
let (_, (geth_trace, diff_mode)) = try_join!(
|
||||
self.handle_input_expectations(
|
||||
input,
|
||||
&execution_receipt,
|
||||
resolver.as_ref(),
|
||||
&tracing_result
|
||||
),
|
||||
self.handle_input_diff(execution_receipt.transaction_hash, node)
|
||||
)
|
||||
.context("Failed while evaluating expectations and diffs in parallel")?;
|
||||
Ok((execution_receipt, geth_trace, diff_mode))
|
||||
}
|
||||
|
||||
#[instrument(level = "info", name = "Handling Balance Assertion", skip_all)]
|
||||
pub async fn handle_balance_assertion(
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
balance_assertion: &BalanceAssertionStep,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<()> {
|
||||
self.handle_balance_assertion_contract_deployment(metadata, balance_assertion, node)
|
||||
.await
|
||||
.context("Failed to deploy contract for balance assertion")?;
|
||||
self.handle_balance_assertion_execution(balance_assertion, node)
|
||||
.await
|
||||
.context("Failed to execute balance assertion")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", name = "Handling Storage Assertion", skip_all)]
|
||||
pub async fn handle_storage_empty(
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
storage_empty: &StorageEmptyAssertionStep,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<()> {
|
||||
self.handle_storage_empty_assertion_contract_deployment(metadata, storage_empty, node)
|
||||
.await
|
||||
.context("Failed to deploy contract for storage empty assertion")?;
|
||||
self.handle_storage_empty_assertion_execution(storage_empty, node)
|
||||
.await
|
||||
.context("Failed to execute storage empty assertion")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", name = "Handling Repetition", skip_all)]
|
||||
pub async fn handle_repeat(
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
repetitions: usize,
|
||||
steps: &[Step],
|
||||
step_path: &StepPath,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<()> {
|
||||
let tasks = (0..repetitions).map(|_| {
|
||||
let mut state = self.clone();
|
||||
async move {
|
||||
for (step_idx, step) in steps.iter().enumerate() {
|
||||
let step_path = step_path.append(step_idx);
|
||||
state.handle_step(metadata, step, &step_path, node).await?;
|
||||
}
|
||||
Ok::<(), anyhow::Error>(())
|
||||
}
|
||||
});
|
||||
try_join_all(tasks).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", name = "Handling Account Allocation", skip_all)]
|
||||
pub async fn handle_account_allocation(&mut self, variable_name: &str) -> anyhow::Result<()> {
|
||||
let Some(variable_name) = variable_name.strip_prefix("$VARIABLE:") else {
|
||||
bail!("Account allocation must start with $VARIABLE:");
|
||||
};
|
||||
|
||||
let private_key = self.private_key_allocator.lock().await.allocate()?;
|
||||
let account = private_key.address();
|
||||
let variable = U256::from_be_slice(account.0.as_slice());
|
||||
|
||||
self.variables.insert(variable_name.to_string(), variable);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handles the contract deployment for a given input performing it if it needs to be performed.
|
||||
#[instrument(level = "info", skip_all)]
|
||||
async fn handle_input_contract_deployment(
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
input: &FunctionCallStep,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<HashMap<ContractInstance, TransactionReceipt>> {
|
||||
let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new();
|
||||
for instance in input.find_all_contract_instances().into_iter() {
|
||||
if !self.deployed_contracts.contains_key(&instance) {
|
||||
instances_we_must_deploy.entry(instance).or_insert(false);
|
||||
}
|
||||
}
|
||||
if let Method::Deployer = input.method {
|
||||
instances_we_must_deploy.swap_remove(&input.instance);
|
||||
instances_we_must_deploy.insert(input.instance.clone(), true);
|
||||
}
|
||||
|
||||
let mut receipts = HashMap::new();
|
||||
for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() {
|
||||
let calldata = deploy_with_constructor_arguments.then_some(&input.calldata);
|
||||
let value = deploy_with_constructor_arguments
|
||||
.then_some(input.value)
|
||||
.flatten();
|
||||
|
||||
let caller = {
|
||||
let context = self.default_resolution_context();
|
||||
let resolver = node.resolver().await?;
|
||||
input
|
||||
.caller
|
||||
.resolve_address(resolver.as_ref(), context)
|
||||
.await?
|
||||
};
|
||||
if let (_, _, Some(receipt)) = self
|
||||
.get_or_deploy_contract_instance(&instance, metadata, caller, calldata, value, node)
|
||||
.await
|
||||
.context("Failed to get or deploy contract instance during input execution")?
|
||||
{
|
||||
receipts.insert(instance.clone(), receipt);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(receipts)
|
||||
}
|
||||
|
||||
/// Handles the execution of the input in terms of the calls that need to be made.
|
||||
#[instrument(level = "info", skip_all)]
|
||||
async fn handle_input_execution(
|
||||
&mut self,
|
||||
input: &FunctionCallStep,
|
||||
mut deployment_receipts: HashMap<ContractInstance, TransactionReceipt>,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<TransactionReceipt> {
|
||||
match input.method {
|
||||
// This input was already executed when `handle_input` was called. We just need to
|
||||
// lookup the transaction receipt in this case and continue on.
|
||||
Method::Deployer => deployment_receipts
|
||||
.remove(&input.instance)
|
||||
.context("Failed to find deployment receipt for constructor call"),
|
||||
Method::Fallback | Method::FunctionName(_) => {
|
||||
let resolver = node.resolver().await?;
|
||||
let tx = match input
|
||||
.legacy_transaction(resolver.as_ref(), self.default_resolution_context())
|
||||
.await
|
||||
{
|
||||
Ok(tx) => tx,
|
||||
Err(err) => {
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
|
||||
match node.execute_transaction(tx).await {
|
||||
Ok(receipt) => Ok(receipt),
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
async fn handle_input_call_frame_tracing(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<CallFrame> {
|
||||
node.trace_transaction(
|
||||
tx_hash,
|
||||
GethDebugTracingOptions {
|
||||
tracer: Some(GethDebugTracerType::BuiltInTracer(
|
||||
GethDebugBuiltInTracerType::CallTracer,
|
||||
)),
|
||||
tracer_config: GethDebugTracerConfig(serde_json::json! {{
|
||||
"onlyTopCall": true,
|
||||
"withLog": false,
|
||||
"withStorage": false,
|
||||
"withMemory": false,
|
||||
"withStack": false,
|
||||
"withReturnData": true
|
||||
}}),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map(|trace| {
|
||||
trace
|
||||
.try_into_call_frame()
|
||||
.expect("Impossible - we requested a callframe trace so we must get it back")
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
fn handle_input_variable_assignment(
|
||||
&mut self,
|
||||
input: &FunctionCallStep,
|
||||
tracing_result: &CallFrame,
|
||||
) -> anyhow::Result<()> {
|
||||
let Some(ref assignments) = input.variable_assignments else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
// Handling the return data variable assignments.
|
||||
for (variable_name, output_word) in assignments.return_data.iter().zip(
|
||||
tracing_result
|
||||
.output
|
||||
.as_ref()
|
||||
.unwrap_or_default()
|
||||
.to_vec()
|
||||
.chunks(32),
|
||||
) {
|
||||
let value = U256::from_be_slice(output_word);
|
||||
self.variables.insert(variable_name.clone(), value);
|
||||
tracing::info!(
|
||||
variable_name,
|
||||
variable_value = hex::encode(value.to_be_bytes::<32>()),
|
||||
"Assigned variable"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
async fn handle_input_expectations(
|
||||
&self,
|
||||
input: &FunctionCallStep,
|
||||
execution_receipt: &TransactionReceipt,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
tracing_result: &CallFrame,
|
||||
) -> anyhow::Result<()> {
|
||||
// Resolving the `input.expected` into a series of expectations that we can then assert on.
|
||||
let mut expectations = match input {
|
||||
FunctionCallStep {
|
||||
expected: Some(Expected::Calldata(calldata)),
|
||||
..
|
||||
} => vec![ExpectedOutput::new().with_calldata(calldata.clone())],
|
||||
FunctionCallStep {
|
||||
expected: Some(Expected::Expected(expected)),
|
||||
..
|
||||
} => vec![expected.clone()],
|
||||
FunctionCallStep {
|
||||
expected: Some(Expected::ExpectedMany(expected)),
|
||||
..
|
||||
} => expected.clone(),
|
||||
FunctionCallStep { expected: None, .. } => vec![ExpectedOutput::new().with_success()],
|
||||
};
|
||||
|
||||
// This is a bit of a special case and we have to support it separately on it's own. If it's
|
||||
// a call to the deployer method, then the tests will assert that it "returns" the address
|
||||
// of the contract. Deployments do not return the address of the contract but the runtime
|
||||
// code of the contracts. Therefore, this assertion would always fail. So, we replace it
|
||||
// with an assertion of "check if it succeeded"
|
||||
if let Method::Deployer = &input.method {
|
||||
for expectation in expectations.iter_mut() {
|
||||
expectation.return_data = None;
|
||||
}
|
||||
}
|
||||
|
||||
futures::stream::iter(expectations.into_iter().map(Ok))
|
||||
.try_for_each_concurrent(None, |expectation| async move {
|
||||
self.handle_input_expectation_item(
|
||||
execution_receipt,
|
||||
resolver,
|
||||
expectation,
|
||||
tracing_result,
|
||||
)
|
||||
.await
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
async fn handle_input_expectation_item(
|
||||
&self,
|
||||
execution_receipt: &TransactionReceipt,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
expectation: ExpectedOutput,
|
||||
tracing_result: &CallFrame,
|
||||
) -> anyhow::Result<()> {
|
||||
if let Some(ref version_requirement) = expectation.compiler_version {
|
||||
if !version_requirement.matches(&self.compiler_version) {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
let resolution_context = self
|
||||
.default_resolution_context()
|
||||
.with_block_number(execution_receipt.block_number.as_ref())
|
||||
.with_transaction_hash(&execution_receipt.transaction_hash);
|
||||
|
||||
// Handling the receipt state assertion.
|
||||
let expected = !expectation.exception;
|
||||
let actual = execution_receipt.status();
|
||||
if actual != expected {
|
||||
tracing::error!(
|
||||
expected,
|
||||
actual,
|
||||
?execution_receipt,
|
||||
?tracing_result,
|
||||
"Transaction status assertion failed"
|
||||
);
|
||||
anyhow::bail!(
|
||||
"Transaction status assertion failed - Expected {expected} but got {actual}",
|
||||
);
|
||||
}
|
||||
|
||||
// Handling the calldata assertion
|
||||
if let Some(ref expected_calldata) = expectation.return_data {
|
||||
let expected = expected_calldata;
|
||||
let actual = &tracing_result.output.as_ref().unwrap_or_default();
|
||||
if !expected
|
||||
.is_equivalent(actual, resolver, resolution_context)
|
||||
.await
|
||||
.context("Failed to resolve calldata equivalence for return data assertion")?
|
||||
{
|
||||
tracing::error!(
|
||||
?execution_receipt,
|
||||
?expected,
|
||||
%actual,
|
||||
"Calldata assertion failed"
|
||||
);
|
||||
anyhow::bail!("Calldata assertion failed - Expected {expected:?} but got {actual}",);
|
||||
}
|
||||
}
|
||||
|
||||
// Handling the events assertion
|
||||
if let Some(ref expected_events) = expectation.events {
|
||||
// Handling the events length assertion.
|
||||
let expected = expected_events.len();
|
||||
let actual = execution_receipt.logs().len();
|
||||
if actual != expected {
|
||||
tracing::error!(expected, actual, "Event count assertion failed",);
|
||||
anyhow::bail!(
|
||||
"Event count assertion failed - Expected {expected} but got {actual}",
|
||||
);
|
||||
}
|
||||
|
||||
// Handling the events assertion.
|
||||
for (event_idx, (expected_event, actual_event)) in expected_events
|
||||
.iter()
|
||||
.zip(execution_receipt.logs())
|
||||
.enumerate()
|
||||
{
|
||||
// Handling the emitter assertion.
|
||||
if let Some(ref expected_address) = expected_event.address {
|
||||
let expected = expected_address
|
||||
.resolve_address(resolver, resolution_context)
|
||||
.await?;
|
||||
let actual = actual_event.address();
|
||||
if actual != expected {
|
||||
tracing::error!(
|
||||
event_idx,
|
||||
%expected,
|
||||
%actual,
|
||||
"Event emitter assertion failed",
|
||||
);
|
||||
anyhow::bail!(
|
||||
"Event emitter assertion failed - Expected {expected} but got {actual}",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Handling the topics assertion.
|
||||
for (expected, actual) in expected_event
|
||||
.topics
|
||||
.as_slice()
|
||||
.iter()
|
||||
.zip(actual_event.topics())
|
||||
{
|
||||
let expected = Calldata::new_compound([expected]);
|
||||
if !expected
|
||||
.is_equivalent(&actual.0, resolver, resolution_context)
|
||||
.await
|
||||
.context("Failed to resolve event topic equivalence")?
|
||||
{
|
||||
tracing::error!(
|
||||
event_idx,
|
||||
?execution_receipt,
|
||||
?expected,
|
||||
?actual,
|
||||
"Event topics assertion failed",
|
||||
);
|
||||
anyhow::bail!(
|
||||
"Event topics assertion failed - Expected {expected:?} but got {actual:?}",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Handling the values assertion.
|
||||
let expected = &expected_event.values;
|
||||
let actual = &actual_event.data().data;
|
||||
if !expected
|
||||
.is_equivalent(&actual.0, resolver, resolution_context)
|
||||
.await
|
||||
.context("Failed to resolve event value equivalence")?
|
||||
{
|
||||
tracing::error!(
|
||||
event_idx,
|
||||
?execution_receipt,
|
||||
?expected,
|
||||
?actual,
|
||||
"Event value assertion failed",
|
||||
);
|
||||
anyhow::bail!(
|
||||
"Event value assertion failed - Expected {expected:?} but got {actual:?}",
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
async fn handle_input_diff(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<(GethTrace, DiffMode)> {
|
||||
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
|
||||
diff_mode: Some(true),
|
||||
disable_code: None,
|
||||
disable_storage: None,
|
||||
});
|
||||
|
||||
let trace = node
|
||||
.trace_transaction(tx_hash, trace_options)
|
||||
.await
|
||||
.context("Failed to obtain geth prestate tracer output")?;
|
||||
let diff = node
|
||||
.state_diff(tx_hash)
|
||||
.await
|
||||
.context("Failed to obtain state diff for transaction")?;
|
||||
|
||||
Ok((trace, diff))
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub async fn handle_balance_assertion_contract_deployment(
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
balance_assertion: &BalanceAssertionStep,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<()> {
|
||||
let Some(address) = balance_assertion.address.as_resolvable_address() else {
|
||||
return Ok(());
|
||||
};
|
||||
let Some(instance) = address.strip_suffix(".address").map(ContractInstance::new) else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
self.get_or_deploy_contract_instance(
|
||||
&instance,
|
||||
metadata,
|
||||
FunctionCallStep::default_caller_address(),
|
||||
None,
|
||||
None,
|
||||
node,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub async fn handle_balance_assertion_execution(
|
||||
&mut self,
|
||||
BalanceAssertionStep {
|
||||
address,
|
||||
expected_balance: amount,
|
||||
..
|
||||
}: &BalanceAssertionStep,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<()> {
|
||||
let resolver = node.resolver().await?;
|
||||
let address = address
|
||||
.resolve_address(resolver.as_ref(), self.default_resolution_context())
|
||||
.await?;
|
||||
|
||||
let balance = node.balance_of(address).await?;
|
||||
|
||||
let expected = *amount;
|
||||
let actual = balance;
|
||||
if expected != actual {
|
||||
tracing::error!(%expected, %actual, %address, "Balance assertion failed");
|
||||
anyhow::bail!(
|
||||
"Balance assertion failed - Expected {} but got {} for {} resolved to {}",
|
||||
expected,
|
||||
actual,
|
||||
address,
|
||||
address,
|
||||
)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub async fn handle_storage_empty_assertion_contract_deployment(
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
storage_empty_assertion: &StorageEmptyAssertionStep,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<()> {
|
||||
let Some(address) = storage_empty_assertion.address.as_resolvable_address() else {
|
||||
return Ok(());
|
||||
};
|
||||
let Some(instance) = address.strip_suffix(".address").map(ContractInstance::new) else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
self.get_or_deploy_contract_instance(
|
||||
&instance,
|
||||
metadata,
|
||||
FunctionCallStep::default_caller_address(),
|
||||
None,
|
||||
None,
|
||||
node,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub async fn handle_storage_empty_assertion_execution(
|
||||
&mut self,
|
||||
StorageEmptyAssertionStep {
|
||||
address,
|
||||
is_storage_empty,
|
||||
..
|
||||
}: &StorageEmptyAssertionStep,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<()> {
|
||||
let resolver = node.resolver().await?;
|
||||
let address = address
|
||||
.resolve_address(resolver.as_ref(), self.default_resolution_context())
|
||||
.await?;
|
||||
|
||||
let storage = node.latest_state_proof(address, Default::default()).await?;
|
||||
let is_empty = storage.storage_hash == EMPTY_ROOT_HASH;
|
||||
|
||||
let expected = is_storage_empty;
|
||||
let actual = is_empty;
|
||||
|
||||
if *expected != actual {
|
||||
tracing::error!(%expected, %actual, %address, "Storage Empty Assertion failed");
|
||||
anyhow::bail!(
|
||||
"Storage Empty Assertion failed - Expected {} but got {} for {} resolved to {}",
|
||||
expected,
|
||||
actual,
|
||||
address,
|
||||
address,
|
||||
)
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets the information of a deployed contract or library from the state. If it's found to not
|
||||
/// be deployed then it will be deployed.
|
||||
///
|
||||
/// If a [`CaseIdx`] is not specified then this contact instance address will be stored in the
|
||||
/// cross-case deployed contracts address mapping.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn get_or_deploy_contract_instance(
|
||||
&mut self,
|
||||
contract_instance: &ContractInstance,
|
||||
metadata: &Metadata,
|
||||
deployer: Address,
|
||||
calldata: Option<&Calldata>,
|
||||
value: Option<EtherValue>,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<(Address, JsonAbi, Option<TransactionReceipt>)> {
|
||||
if let Some((_, address, abi)) = self.deployed_contracts.get(contract_instance) {
|
||||
return Ok((*address, abi.clone(), None));
|
||||
}
|
||||
|
||||
let Some(ContractPathAndIdent {
|
||||
contract_source_path,
|
||||
contract_ident,
|
||||
}) = metadata.contract_sources()?.remove(contract_instance)
|
||||
else {
|
||||
anyhow::bail!(
|
||||
"Contract source not found for instance {:?}",
|
||||
contract_instance
|
||||
)
|
||||
};
|
||||
|
||||
let Some((code, abi)) = self
|
||||
.compiled_contracts
|
||||
.get(&contract_source_path)
|
||||
.and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref()))
|
||||
.cloned()
|
||||
else {
|
||||
anyhow::bail!(
|
||||
"Failed to find information for contract {:?}",
|
||||
contract_instance
|
||||
)
|
||||
};
|
||||
|
||||
let mut code = match alloy::hex::decode(&code) {
|
||||
Ok(code) => code,
|
||||
Err(error) => {
|
||||
tracing::error!(
|
||||
?error,
|
||||
contract_source_path = contract_source_path.display().to_string(),
|
||||
contract_ident = contract_ident.as_ref(),
|
||||
"Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking"
|
||||
);
|
||||
anyhow::bail!("Failed to hex-decode the byte code {}", error)
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(calldata) = calldata {
|
||||
let resolver = node.resolver().await?;
|
||||
let calldata = calldata
|
||||
.calldata(resolver.as_ref(), self.default_resolution_context())
|
||||
.await?;
|
||||
code.extend(calldata);
|
||||
}
|
||||
|
||||
let tx = {
|
||||
let tx = TransactionRequest::default().from(deployer);
|
||||
let tx = match value {
|
||||
Some(ref value) => tx.value(value.into_inner()),
|
||||
_ => tx,
|
||||
};
|
||||
TransactionBuilder::<Ethereum>::with_deploy_code(tx, code)
|
||||
};
|
||||
|
||||
let receipt = match node.execute_transaction(tx).await {
|
||||
Ok(receipt) => receipt,
|
||||
Err(error) => {
|
||||
tracing::error!(?error, "Contract deployment transaction failed.");
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
|
||||
let Some(address) = receipt.contract_address else {
|
||||
anyhow::bail!("Contract deployment didn't return an address");
|
||||
};
|
||||
tracing::info!(
|
||||
instance_name = ?contract_instance,
|
||||
instance_address = ?address,
|
||||
"Deployed contract"
|
||||
);
|
||||
self.execution_reporter
|
||||
.report_contract_deployed_event(contract_instance.clone(), address)?;
|
||||
|
||||
self.deployed_contracts.insert(
|
||||
contract_instance.clone(),
|
||||
(contract_ident, address, abi.clone()),
|
||||
);
|
||||
|
||||
Ok((address, abi, Some(receipt)))
|
||||
}
|
||||
|
||||
fn default_resolution_context(&self) -> ResolutionContext<'_> {
|
||||
ResolutionContext::default()
|
||||
.with_deployed_contracts(&self.deployed_contracts)
|
||||
.with_variables(&self.variables)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CaseDriver<'a> {
|
||||
metadata: &'a Metadata,
|
||||
case: &'a Case,
|
||||
platform_state: Vec<(&'a dyn EthereumNode, PlatformIdentifier, CaseState)>,
|
||||
}
|
||||
|
||||
impl<'a> CaseDriver<'a> {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
metadata: &'a Metadata,
|
||||
case: &'a Case,
|
||||
platform_state: Vec<(&'a dyn EthereumNode, PlatformIdentifier, CaseState)>,
|
||||
) -> CaseDriver<'a> {
|
||||
Self {
|
||||
metadata,
|
||||
case,
|
||||
platform_state,
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "info", name = "Executing Case", skip_all)]
|
||||
pub async fn execute(&mut self) -> anyhow::Result<usize> {
|
||||
let mut steps_executed = 0;
|
||||
for (step_idx, step) in self
|
||||
.case
|
||||
.steps_iterator()
|
||||
.enumerate()
|
||||
.map(|(idx, v)| (StepIdx::new(idx), v))
|
||||
{
|
||||
let metadata = self.metadata;
|
||||
let step_futures =
|
||||
self.platform_state
|
||||
.iter_mut()
|
||||
.map(|(node, platform_id, case_state)| {
|
||||
let platform_id = *platform_id;
|
||||
let node_ref = *node;
|
||||
let step = step.clone();
|
||||
let span = info_span!(
|
||||
"Handling Step",
|
||||
%step_idx,
|
||||
platform = %platform_id,
|
||||
);
|
||||
async move {
|
||||
let step_path = StepPath::from_iterator([step_idx]);
|
||||
case_state
|
||||
.handle_step(metadata, &step, &step_path, node_ref)
|
||||
.await
|
||||
.map_err(|e| (platform_id, e))
|
||||
}
|
||||
.instrument(span)
|
||||
});
|
||||
|
||||
match try_join_all(step_futures).await {
|
||||
Ok(_outputs) => {
|
||||
steps_executed += 1;
|
||||
}
|
||||
Err((platform_id, error)) => {
|
||||
tracing::error!(
|
||||
%step_idx,
|
||||
platform = %platform_id,
|
||||
?error,
|
||||
"Step failed on platform",
|
||||
);
|
||||
return Err(error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(steps_executed)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum StepOutput {
|
||||
FunctionCall(TransactionReceipt, GethTrace, DiffMode),
|
||||
BalanceAssertion,
|
||||
StorageEmptyAssertion,
|
||||
Repetition,
|
||||
AccountAllocation,
|
||||
}
|
||||
@@ -0,0 +1,425 @@
|
||||
//! The revive differential testing core library.
|
||||
//!
|
||||
//! This crate defines the testing configuration and
|
||||
//! provides a helper utility to execute tests.
|
||||
|
||||
use std::{
|
||||
pin::Pin,
|
||||
thread::{self, JoinHandle},
|
||||
};
|
||||
|
||||
use alloy::genesis::Genesis;
|
||||
use anyhow::Context as _;
|
||||
use revive_dt_common::types::*;
|
||||
use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc};
|
||||
use revive_dt_config::*;
|
||||
use revive_dt_node::{
|
||||
Node, geth::GethNode, lighthouse_geth::LighthouseGethNode, substrate::SubstrateNode,
|
||||
};
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
use tracing::info;
|
||||
|
||||
pub mod driver;
|
||||
|
||||
/// A trait that describes the interface for the platforms that are supported by the tool.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub trait Platform {
|
||||
/// Returns the identifier of this platform. This is a combination of the node and the compiler
|
||||
/// used.
|
||||
fn platform_identifier(&self) -> PlatformIdentifier;
|
||||
|
||||
/// Returns a full identifier for the platform.
|
||||
fn full_identifier(&self) -> (NodeIdentifier, VmIdentifier, CompilerIdentifier) {
|
||||
(
|
||||
self.node_identifier(),
|
||||
self.vm_identifier(),
|
||||
self.compiler_identifier(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns the identifier of the node used.
|
||||
fn node_identifier(&self) -> NodeIdentifier;
|
||||
|
||||
/// Returns the identifier of the vm used.
|
||||
fn vm_identifier(&self) -> VmIdentifier;
|
||||
|
||||
/// Returns the identifier of the compiler used.
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier;
|
||||
|
||||
/// Creates a new node for the platform by spawning a new thread, creating the node object,
|
||||
/// initializing it, spawning it, and waiting for it to start up.
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>>;
|
||||
|
||||
/// Creates a new compiler for the provided platform
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct GethEvmSolcPlatform;
|
||||
|
||||
impl Platform for GethEvmSolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::GethEvmSolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::Geth
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = GethNode::new(context);
|
||||
let node = spawn_node::<GethNode>(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct LighthouseGethEvmSolcPlatform;
|
||||
|
||||
impl Platform for LighthouseGethEvmSolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::LighthouseGethEvmSolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::LighthouseGeth
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = LighthouseGethNode::new(context);
|
||||
let node = spawn_node::<LighthouseGethNode>(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct KitchensinkPolkavmResolcPlatform;
|
||||
|
||||
impl Platform for KitchensinkPolkavmResolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::KitchensinkPolkavmResolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::Kitchensink
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::PolkaVM
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Resolc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context)
|
||||
.path
|
||||
.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = SubstrateNode::new(
|
||||
kitchensink_path,
|
||||
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
|
||||
context,
|
||||
);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Resolc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct KitchensinkRevmSolcPlatform;
|
||||
|
||||
impl Platform for KitchensinkRevmSolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::KitchensinkRevmSolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::Kitchensink
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context)
|
||||
.path
|
||||
.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = SubstrateNode::new(
|
||||
kitchensink_path,
|
||||
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
|
||||
context,
|
||||
);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct ReviveDevNodePolkavmResolcPlatform;
|
||||
|
||||
impl Platform for ReviveDevNodePolkavmResolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::ReviveDevNodePolkavmResolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::ReviveDevNode
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::PolkaVM
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Resolc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
|
||||
.path
|
||||
.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = SubstrateNode::new(
|
||||
revive_dev_node_path,
|
||||
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
||||
context,
|
||||
);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Resolc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct ReviveDevNodeRevmSolcPlatform;
|
||||
|
||||
impl Platform for ReviveDevNodeRevmSolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::ReviveDevNodeRevmSolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::ReviveDevNode
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
|
||||
.path
|
||||
.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = SubstrateNode::new(
|
||||
revive_dev_node_path,
|
||||
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
||||
context,
|
||||
);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PlatformIdentifier> for Box<dyn Platform> {
|
||||
fn from(value: PlatformIdentifier) -> Self {
|
||||
match value {
|
||||
PlatformIdentifier::GethEvmSolc => Box::new(GethEvmSolcPlatform) as Box<_>,
|
||||
PlatformIdentifier::LighthouseGethEvmSolc => {
|
||||
Box::new(LighthouseGethEvmSolcPlatform) as Box<_>
|
||||
}
|
||||
PlatformIdentifier::KitchensinkPolkavmResolc => {
|
||||
Box::new(KitchensinkPolkavmResolcPlatform) as Box<_>
|
||||
}
|
||||
PlatformIdentifier::KitchensinkRevmSolc => {
|
||||
Box::new(KitchensinkRevmSolcPlatform) as Box<_>
|
||||
}
|
||||
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
||||
Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_>
|
||||
}
|
||||
PlatformIdentifier::ReviveDevNodeRevmSolc => {
|
||||
Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_>
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PlatformIdentifier> for &dyn Platform {
|
||||
fn from(value: PlatformIdentifier) -> Self {
|
||||
match value {
|
||||
PlatformIdentifier::GethEvmSolc => &GethEvmSolcPlatform as &dyn Platform,
|
||||
PlatformIdentifier::LighthouseGethEvmSolc => {
|
||||
&LighthouseGethEvmSolcPlatform as &dyn Platform
|
||||
}
|
||||
PlatformIdentifier::KitchensinkPolkavmResolc => {
|
||||
&KitchensinkPolkavmResolcPlatform as &dyn Platform
|
||||
}
|
||||
PlatformIdentifier::KitchensinkRevmSolc => {
|
||||
&KitchensinkRevmSolcPlatform as &dyn Platform
|
||||
}
|
||||
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
||||
&ReviveDevNodePolkavmResolcPlatform as &dyn Platform
|
||||
}
|
||||
PlatformIdentifier::ReviveDevNodeRevmSolc => {
|
||||
&ReviveDevNodeRevmSolcPlatform as &dyn Platform
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn spawn_node<T: Node + EthereumNode + Send + Sync>(
|
||||
mut node: T,
|
||||
genesis: Genesis,
|
||||
) -> anyhow::Result<T> {
|
||||
info!(
|
||||
id = node.id(),
|
||||
connection_string = node.connection_string(),
|
||||
"Spawning node"
|
||||
);
|
||||
node.spawn(genesis)
|
||||
.context("Failed to spawn node process")?;
|
||||
info!(
|
||||
id = node.id(),
|
||||
connection_string = node.connection_string(),
|
||||
"Spawned node"
|
||||
);
|
||||
Ok(node)
|
||||
}
|
||||
@@ -0,0 +1,783 @@
|
||||
mod cached_compiler;
|
||||
mod pool;
|
||||
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::{BTreeSet, HashMap},
|
||||
io::{BufWriter, Write, stderr},
|
||||
path::Path,
|
||||
sync::Arc,
|
||||
time::Instant,
|
||||
};
|
||||
|
||||
use alloy::{
|
||||
network::{Ethereum, TransactionBuilder},
|
||||
rpc::types::TransactionRequest,
|
||||
};
|
||||
use anyhow::Context as _;
|
||||
use clap::Parser;
|
||||
use futures::stream;
|
||||
use futures::{Stream, StreamExt};
|
||||
use indexmap::{IndexMap, indexmap};
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
use revive_dt_report::{
|
||||
ExecutionSpecificReporter, ReportAggregator, Reporter, ReporterEvent, TestCaseStatus,
|
||||
TestSpecificReporter, TestSpecifier,
|
||||
};
|
||||
use schemars::schema_for;
|
||||
use serde_json::{Value, json};
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::{debug, error, info, info_span, instrument};
|
||||
use tracing_subscriber::{EnvFilter, FmtSubscriber};
|
||||
|
||||
use revive_dt_common::{
|
||||
iterators::EitherIter,
|
||||
types::{Mode, PrivateKeyAllocator},
|
||||
};
|
||||
use revive_dt_compiler::SolidityCompiler;
|
||||
use revive_dt_config::{Context, *};
|
||||
use revive_dt_core::{
|
||||
Platform,
|
||||
driver::{CaseDriver, CaseState},
|
||||
};
|
||||
use revive_dt_format::{
|
||||
case::{Case, CaseIdx},
|
||||
corpus::Corpus,
|
||||
metadata::{ContractPathAndIdent, Metadata, MetadataFile},
|
||||
mode::ParsedMode,
|
||||
steps::{FunctionCallStep, Step},
|
||||
};
|
||||
|
||||
use crate::cached_compiler::CachedCompiler;
|
||||
use crate::pool::NodePool;
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let (writer, _guard) = tracing_appender::non_blocking::NonBlockingBuilder::default()
|
||||
.lossy(false)
|
||||
// Assuming that each line contains 255 characters and that each character is one byte, then
|
||||
// this means that our buffer is about 4GBs large.
|
||||
.buffered_lines_limit(0x1000000)
|
||||
.thread_name("buffered writer")
|
||||
.finish(std::io::stdout());
|
||||
|
||||
let subscriber = FmtSubscriber::builder()
|
||||
.with_writer(writer)
|
||||
.with_thread_ids(false)
|
||||
.with_thread_names(false)
|
||||
.with_env_filter(EnvFilter::from_default_env())
|
||||
.with_ansi(false)
|
||||
.pretty()
|
||||
.finish();
|
||||
tracing::subscriber::set_global_default(subscriber)?;
|
||||
info!("Differential testing tool is starting");
|
||||
|
||||
let context = Context::try_parse()?;
|
||||
let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task();
|
||||
|
||||
match context {
|
||||
Context::ExecuteTests(context) => {
|
||||
let tests = collect_corpora(&context)
|
||||
.context("Failed to collect corpus files from provided arguments")?
|
||||
.into_iter()
|
||||
.inspect(|(corpus, _)| {
|
||||
reporter
|
||||
.report_corpus_file_discovery_event(corpus.clone())
|
||||
.expect("Can't fail")
|
||||
})
|
||||
.flat_map(|(_, files)| files.into_iter())
|
||||
.inspect(|metadata_file| {
|
||||
reporter
|
||||
.report_metadata_file_discovery_event(
|
||||
metadata_file.metadata_file_path.clone(),
|
||||
metadata_file.content.clone(),
|
||||
)
|
||||
.expect("Can't fail")
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.worker_threads(context.concurrency_configuration.number_of_threads)
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Failed building the Runtime")
|
||||
.block_on(async move {
|
||||
execute_corpus(*context, &tests, reporter, report_aggregator_task)
|
||||
.await
|
||||
.context("Failed to execute corpus")
|
||||
})
|
||||
}
|
||||
Context::ExportJsonSchema => {
|
||||
let schema = schema_for!(Metadata);
|
||||
println!("{}", serde_json::to_string_pretty(&schema).unwrap());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", name = "Collecting Corpora", skip_all)]
|
||||
fn collect_corpora(
|
||||
context: &TestExecutionContext,
|
||||
) -> anyhow::Result<HashMap<Corpus, Vec<MetadataFile>>> {
|
||||
let mut corpora = HashMap::new();
|
||||
|
||||
for path in &context.corpus {
|
||||
let span = info_span!("Processing corpus file", path = %path.display());
|
||||
let _guard = span.enter();
|
||||
|
||||
let corpus = Corpus::try_from_path(path)?;
|
||||
info!(
|
||||
name = corpus.name(),
|
||||
number_of_contained_paths = corpus.path_count(),
|
||||
"Deserialized corpus file"
|
||||
);
|
||||
let tests = corpus.enumerate_tests();
|
||||
corpora.insert(corpus, tests);
|
||||
}
|
||||
|
||||
Ok(corpora)
|
||||
}
|
||||
|
||||
async fn run_driver(
|
||||
context: TestExecutionContext,
|
||||
metadata_files: &[MetadataFile],
|
||||
reporter: Reporter,
|
||||
report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
|
||||
platforms: Vec<&dyn Platform>,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut nodes = Vec::<(&dyn Platform, NodePool)>::new();
|
||||
for platform in platforms.into_iter() {
|
||||
let pool = NodePool::new(Context::ExecuteTests(Box::new(context.clone())), platform)
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
platform_identifier = %platform.platform_identifier(),
|
||||
"Failed to initialize the node pool for the platform."
|
||||
)
|
||||
})
|
||||
.context("Failed to initialize the node pool")?;
|
||||
nodes.push((platform, pool));
|
||||
}
|
||||
|
||||
let tests_stream = tests_stream(
|
||||
&context,
|
||||
metadata_files.iter(),
|
||||
nodes.as_slice(),
|
||||
reporter.clone(),
|
||||
)
|
||||
.await;
|
||||
let driver_task = start_driver_task(&context, tests_stream)
|
||||
.await
|
||||
.context("Failed to start driver task")?;
|
||||
let cli_reporting_task = start_cli_reporting_task(reporter);
|
||||
|
||||
let (_, _, rtn) = tokio::join!(cli_reporting_task, driver_task, report_aggregator_task);
|
||||
rtn?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn tests_stream<'a>(
|
||||
args: &TestExecutionContext,
|
||||
metadata_files: impl IntoIterator<Item = &'a MetadataFile> + Clone,
|
||||
nodes: &'a [(&dyn Platform, NodePool)],
|
||||
reporter: Reporter,
|
||||
) -> impl Stream<Item = Test<'a>> {
|
||||
let tests = metadata_files
|
||||
.into_iter()
|
||||
.flat_map(|metadata_file| {
|
||||
metadata_file
|
||||
.cases
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(move |(case_idx, case)| (metadata_file, case_idx, case))
|
||||
})
|
||||
// Flatten over the modes, prefer the case modes over the metadata file modes.
|
||||
.flat_map(|(metadata_file, case_idx, case)| {
|
||||
let reporter = reporter.clone();
|
||||
|
||||
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
|
||||
let modes = match modes {
|
||||
Some(modes) => EitherIter::A(
|
||||
ParsedMode::many_to_modes(modes.iter()).map(Cow::<'static, _>::Owned),
|
||||
),
|
||||
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
|
||||
};
|
||||
|
||||
modes.into_iter().map(move |mode| {
|
||||
(
|
||||
metadata_file,
|
||||
case_idx,
|
||||
case,
|
||||
mode.clone(),
|
||||
reporter.test_specific_reporter(Arc::new(TestSpecifier {
|
||||
solc_mode: mode.as_ref().clone(),
|
||||
metadata_file_path: metadata_file.metadata_file_path.clone(),
|
||||
case_idx: CaseIdx::new(case_idx),
|
||||
})),
|
||||
)
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Note: before we do any kind of filtering or process the iterator in any way, we need to
|
||||
// inform the report aggregator of all of the cases that were found as it keeps a state of the
|
||||
// test cases for its internal use.
|
||||
for (_, _, _, _, reporter) in tests.iter() {
|
||||
reporter
|
||||
.report_test_case_discovery_event()
|
||||
.expect("Can't fail")
|
||||
}
|
||||
|
||||
stream::iter(tests.into_iter())
|
||||
.filter_map(
|
||||
move |(metadata_file, case_idx, case, mode, reporter)| async move {
|
||||
let mut platforms = Vec::new();
|
||||
for (platform, node_pool) in nodes.iter() {
|
||||
let node = node_pool.round_robbin();
|
||||
let compiler = platform
|
||||
.new_compiler(
|
||||
Context::ExecuteTests(Box::new(args.clone())),
|
||||
mode.version.clone().map(Into::into),
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
platform_identifier = %platform.platform_identifier(),
|
||||
"Failed to instantiate the compiler"
|
||||
)
|
||||
})
|
||||
.ok()?;
|
||||
|
||||
let reporter = reporter
|
||||
.execution_specific_reporter(node.id(), platform.platform_identifier());
|
||||
platforms.push((*platform, node, compiler, reporter));
|
||||
}
|
||||
|
||||
Some(Test {
|
||||
metadata: metadata_file,
|
||||
metadata_file_path: metadata_file.metadata_file_path.as_path(),
|
||||
mode: mode.clone(),
|
||||
case_idx: CaseIdx::new(case_idx),
|
||||
case,
|
||||
platforms,
|
||||
reporter,
|
||||
})
|
||||
},
|
||||
)
|
||||
.filter_map(move |test| async move {
|
||||
match test.check_compatibility() {
|
||||
Ok(()) => Some(test),
|
||||
Err((reason, additional_information)) => {
|
||||
debug!(
|
||||
metadata_file_path = %test.metadata.metadata_file_path.display(),
|
||||
case_idx = %test.case_idx,
|
||||
mode = %test.mode,
|
||||
reason,
|
||||
additional_information =
|
||||
serde_json::to_string(&additional_information).unwrap(),
|
||||
|
||||
"Ignoring Test Case"
|
||||
);
|
||||
test.reporter
|
||||
.report_test_ignored_event(
|
||||
reason.to_string(),
|
||||
additional_information
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.into(), v))
|
||||
.collect::<IndexMap<_, _>>(),
|
||||
)
|
||||
.expect("Can't fail");
|
||||
None
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async fn start_driver_task<'a>(
|
||||
context: &TestExecutionContext,
|
||||
tests: impl Stream<Item = Test<'a>>,
|
||||
) -> anyhow::Result<impl Future<Output = ()>> {
|
||||
info!("Starting driver task");
|
||||
|
||||
let cached_compiler = Arc::new(
|
||||
CachedCompiler::new(
|
||||
context
|
||||
.working_directory
|
||||
.as_path()
|
||||
.join("compilation_cache"),
|
||||
context
|
||||
.compilation_configuration
|
||||
.invalidate_compilation_cache,
|
||||
)
|
||||
.await
|
||||
.context("Failed to initialize cached compiler")?,
|
||||
);
|
||||
|
||||
Ok(tests.for_each_concurrent(
|
||||
context.concurrency_configuration.concurrency_limit(),
|
||||
move |test| {
|
||||
let cached_compiler = cached_compiler.clone();
|
||||
|
||||
async move {
|
||||
for (platform, node, _, _) in test.platforms.iter() {
|
||||
test.reporter
|
||||
.report_node_assigned_event(
|
||||
node.id(),
|
||||
platform.platform_identifier(),
|
||||
node.connection_string(),
|
||||
)
|
||||
.expect("Can't fail");
|
||||
}
|
||||
|
||||
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
|
||||
context.wallet_configuration.highest_private_key_exclusive(),
|
||||
)));
|
||||
|
||||
let reporter = test.reporter.clone();
|
||||
let result =
|
||||
handle_case_driver(&test, cached_compiler, private_key_allocator).await;
|
||||
|
||||
match result {
|
||||
Ok(steps_executed) => reporter
|
||||
.report_test_succeeded_event(steps_executed)
|
||||
.expect("Can't fail"),
|
||||
Err(error) => reporter
|
||||
.report_test_failed_event(format!("{error:#}"))
|
||||
.expect("Can't fail"),
|
||||
}
|
||||
}
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
#[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
|
||||
async fn start_cli_reporting_task(reporter: Reporter) {
|
||||
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
|
||||
drop(reporter);
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
const GREEN: &str = "\x1B[32m";
|
||||
const RED: &str = "\x1B[31m";
|
||||
const GREY: &str = "\x1B[90m";
|
||||
const COLOR_RESET: &str = "\x1B[0m";
|
||||
const BOLD: &str = "\x1B[1m";
|
||||
const BOLD_RESET: &str = "\x1B[22m";
|
||||
|
||||
let mut number_of_successes = 0;
|
||||
let mut number_of_failures = 0;
|
||||
|
||||
let mut buf = BufWriter::new(stderr());
|
||||
while let Ok(event) = aggregator_events_rx.recv().await {
|
||||
let ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
||||
metadata_file_path,
|
||||
mode,
|
||||
case_status,
|
||||
} = event
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
|
||||
for (case_idx, case_status) in case_status.into_iter() {
|
||||
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
|
||||
let _ = match case_status {
|
||||
TestCaseStatus::Succeeded { steps_executed } => {
|
||||
number_of_successes += 1;
|
||||
writeln!(
|
||||
buf,
|
||||
"{}{}Case Succeeded{} - Steps Executed: {}{}",
|
||||
GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET
|
||||
)
|
||||
}
|
||||
TestCaseStatus::Failed { reason } => {
|
||||
number_of_failures += 1;
|
||||
writeln!(
|
||||
buf,
|
||||
"{}{}Case Failed{} - Reason: {}{}",
|
||||
RED,
|
||||
BOLD,
|
||||
BOLD_RESET,
|
||||
reason.trim(),
|
||||
COLOR_RESET,
|
||||
)
|
||||
}
|
||||
TestCaseStatus::Ignored { reason, .. } => writeln!(
|
||||
buf,
|
||||
"{}{}Case Ignored{} - Reason: {}{}",
|
||||
GREY,
|
||||
BOLD,
|
||||
BOLD_RESET,
|
||||
reason.trim(),
|
||||
COLOR_RESET,
|
||||
),
|
||||
};
|
||||
}
|
||||
let _ = writeln!(buf);
|
||||
}
|
||||
|
||||
// Summary at the end.
|
||||
let _ = writeln!(
|
||||
buf,
|
||||
"{} cases: {}{}{} cases succeeded, {}{}{} cases failed in {} seconds",
|
||||
number_of_successes + number_of_failures,
|
||||
GREEN,
|
||||
number_of_successes,
|
||||
COLOR_RESET,
|
||||
RED,
|
||||
number_of_failures,
|
||||
COLOR_RESET,
|
||||
start.elapsed().as_secs()
|
||||
);
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(
|
||||
level = "info",
|
||||
name = "Handling Case"
|
||||
skip_all,
|
||||
fields(
|
||||
metadata_file_path = %test.metadata.relative_path().display(),
|
||||
mode = %test.mode,
|
||||
case_idx = %test.case_idx,
|
||||
case_name = test.case.name.as_deref().unwrap_or("Unnamed Case"),
|
||||
)
|
||||
)]
|
||||
async fn handle_case_driver<'a>(
|
||||
test: &Test<'a>,
|
||||
cached_compiler: Arc<CachedCompiler<'a>>,
|
||||
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
|
||||
) -> anyhow::Result<usize> {
|
||||
let platform_state = stream::iter(test.platforms.iter())
|
||||
// Compiling the pre-link contracts.
|
||||
.filter_map(|(platform, node, compiler, reporter)| {
|
||||
let cached_compiler = cached_compiler.clone();
|
||||
|
||||
async move {
|
||||
let compiler_output = cached_compiler
|
||||
.compile_contracts(
|
||||
test.metadata,
|
||||
test.metadata_file_path,
|
||||
test.mode.clone(),
|
||||
None,
|
||||
compiler.as_ref(),
|
||||
*platform,
|
||||
reporter,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
platform_identifier = %platform.platform_identifier(),
|
||||
"Pre-linking compilation failed"
|
||||
)
|
||||
})
|
||||
.ok()?;
|
||||
Some((test, platform, node, compiler, reporter, compiler_output))
|
||||
}
|
||||
})
|
||||
// Deploying the libraries for the platform.
|
||||
.filter_map(
|
||||
|(test, platform, node, compiler, reporter, compiler_output)| async move {
|
||||
let mut deployed_libraries = None::<HashMap<_, _>>;
|
||||
let mut contract_sources = test
|
||||
.metadata
|
||||
.contract_sources()
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
platform_identifier = %platform.platform_identifier(),
|
||||
"Failed to retrieve contract sources from metadata"
|
||||
)
|
||||
})
|
||||
.ok()?;
|
||||
for library_instance in test
|
||||
.metadata
|
||||
.libraries
|
||||
.iter()
|
||||
.flatten()
|
||||
.flat_map(|(_, map)| map.values())
|
||||
{
|
||||
debug!(%library_instance, "Deploying Library Instance");
|
||||
|
||||
let ContractPathAndIdent {
|
||||
contract_source_path: library_source_path,
|
||||
contract_ident: library_ident,
|
||||
} = contract_sources.remove(library_instance)?;
|
||||
|
||||
let (code, abi) = compiler_output
|
||||
.contracts
|
||||
.get(&library_source_path)
|
||||
.and_then(|contracts| contracts.get(library_ident.as_str()))?;
|
||||
|
||||
let code = alloy::hex::decode(code).ok()?;
|
||||
|
||||
// Getting the deployer address from the cases themselves. This is to ensure
|
||||
// that we're doing the deployments from different accounts and therefore we're
|
||||
// not slowed down by the nonce.
|
||||
let deployer_address = test
|
||||
.case
|
||||
.steps
|
||||
.iter()
|
||||
.filter_map(|step| match step {
|
||||
Step::FunctionCall(input) => input.caller.as_address().copied(),
|
||||
Step::BalanceAssertion(..) => None,
|
||||
Step::StorageEmptyAssertion(..) => None,
|
||||
Step::Repeat(..) => None,
|
||||
Step::AllocateAccount(..) => None,
|
||||
})
|
||||
.next()
|
||||
.unwrap_or(FunctionCallStep::default_caller_address());
|
||||
let tx = TransactionBuilder::<Ethereum>::with_deploy_code(
|
||||
TransactionRequest::default().from(deployer_address),
|
||||
code,
|
||||
);
|
||||
let receipt = node
|
||||
.execute_transaction(tx)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
%library_instance,
|
||||
platform_identifier = %platform.platform_identifier(),
|
||||
"Failed to deploy the library"
|
||||
)
|
||||
})
|
||||
.ok()?;
|
||||
|
||||
debug!(
|
||||
?library_instance,
|
||||
platform_identifier = %platform.platform_identifier(),
|
||||
"Deployed library"
|
||||
);
|
||||
|
||||
let library_address = receipt.contract_address?;
|
||||
|
||||
deployed_libraries.get_or_insert_default().insert(
|
||||
library_instance.clone(),
|
||||
(library_ident.clone(), library_address, abi.clone()),
|
||||
);
|
||||
}
|
||||
|
||||
Some((
|
||||
test,
|
||||
platform,
|
||||
node,
|
||||
compiler,
|
||||
reporter,
|
||||
compiler_output,
|
||||
deployed_libraries,
|
||||
))
|
||||
},
|
||||
)
|
||||
// Compiling the post-link contracts.
|
||||
.filter_map(
|
||||
|(test, platform, node, compiler, reporter, _, deployed_libraries)| {
|
||||
let cached_compiler = cached_compiler.clone();
|
||||
let private_key_allocator = private_key_allocator.clone();
|
||||
|
||||
async move {
|
||||
let compiler_output = cached_compiler
|
||||
.compile_contracts(
|
||||
test.metadata,
|
||||
test.metadata_file_path,
|
||||
test.mode.clone(),
|
||||
deployed_libraries.as_ref(),
|
||||
compiler.as_ref(),
|
||||
*platform,
|
||||
reporter,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
platform_identifier = %platform.platform_identifier(),
|
||||
"Pre-linking compilation failed"
|
||||
)
|
||||
})
|
||||
.ok()?;
|
||||
|
||||
let case_state = CaseState::new(
|
||||
compiler.version().clone(),
|
||||
compiler_output.contracts,
|
||||
deployed_libraries.unwrap_or_default(),
|
||||
reporter.clone(),
|
||||
private_key_allocator,
|
||||
);
|
||||
|
||||
Some((*node, platform.platform_identifier(), case_state))
|
||||
}
|
||||
},
|
||||
)
|
||||
// Collect
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
let mut driver = CaseDriver::new(test.metadata, test.case, platform_state);
|
||||
driver
|
||||
.execute()
|
||||
.await
|
||||
.inspect(|steps_executed| info!(steps_executed, "Case succeeded"))
|
||||
}
|
||||
|
||||
async fn execute_corpus(
|
||||
context: TestExecutionContext,
|
||||
tests: &[MetadataFile],
|
||||
reporter: Reporter,
|
||||
report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
|
||||
) -> anyhow::Result<()> {
|
||||
let platforms = context
|
||||
.platforms
|
||||
.iter()
|
||||
.copied()
|
||||
.collect::<BTreeSet<_>>()
|
||||
.into_iter()
|
||||
.map(Into::<&dyn Platform>::into)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
run_driver(context, tests, reporter, report_aggregator_task, platforms).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// this represents a single "test"; a mode, path and collection of cases.
|
||||
#[allow(clippy::type_complexity)]
|
||||
struct Test<'a> {
|
||||
metadata: &'a MetadataFile,
|
||||
metadata_file_path: &'a Path,
|
||||
mode: Cow<'a, Mode>,
|
||||
case_idx: CaseIdx,
|
||||
case: &'a Case,
|
||||
platforms: Vec<(
|
||||
&'a dyn Platform,
|
||||
&'a dyn EthereumNode,
|
||||
Box<dyn SolidityCompiler>,
|
||||
ExecutionSpecificReporter,
|
||||
)>,
|
||||
reporter: TestSpecificReporter,
|
||||
}
|
||||
|
||||
impl<'a> Test<'a> {
|
||||
/// Checks if this test can be ran with the current configuration.
|
||||
pub fn check_compatibility(&self) -> TestCheckFunctionResult {
|
||||
self.check_metadata_file_ignored()?;
|
||||
self.check_case_file_ignored()?;
|
||||
self.check_target_compatibility()?;
|
||||
self.check_evm_version_compatibility()?;
|
||||
self.check_compiler_compatibility()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks if the metadata file is ignored or not.
|
||||
fn check_metadata_file_ignored(&self) -> TestCheckFunctionResult {
|
||||
if self.metadata.ignore.is_some_and(|ignore| ignore) {
|
||||
Err(("Metadata file is ignored.", indexmap! {}))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the case file is ignored or not.
|
||||
fn check_case_file_ignored(&self) -> TestCheckFunctionResult {
|
||||
if self.case.ignore.is_some_and(|ignore| ignore) {
|
||||
Err(("Case is ignored.", indexmap! {}))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the platforms all support the desired targets in the metadata file.
|
||||
fn check_target_compatibility(&self) -> TestCheckFunctionResult {
|
||||
let mut error_map = indexmap! {
|
||||
"test_desired_targets" => json!(self.metadata.targets.as_ref()),
|
||||
};
|
||||
let mut is_allowed = true;
|
||||
for (platform, ..) in self.platforms.iter() {
|
||||
let is_allowed_for_platform = match self.metadata.targets.as_ref() {
|
||||
None => true,
|
||||
Some(targets) => {
|
||||
let mut target_matches = false;
|
||||
for target in targets.iter() {
|
||||
if &platform.vm_identifier() == target {
|
||||
target_matches = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
target_matches
|
||||
}
|
||||
};
|
||||
is_allowed &= is_allowed_for_platform;
|
||||
error_map.insert(
|
||||
platform.platform_identifier().into(),
|
||||
json!(is_allowed_for_platform),
|
||||
);
|
||||
}
|
||||
|
||||
if is_allowed {
|
||||
Ok(())
|
||||
} else {
|
||||
Err((
|
||||
"One of the platforms do do not support the targets allowed by the test.",
|
||||
error_map,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// Checks for the compatibility of the EVM version with the platforms specified.
|
||||
fn check_evm_version_compatibility(&self) -> TestCheckFunctionResult {
|
||||
let Some(evm_version_requirement) = self.metadata.required_evm_version else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let mut error_map = indexmap! {
|
||||
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
||||
};
|
||||
let mut is_allowed = true;
|
||||
for (platform, node, ..) in self.platforms.iter() {
|
||||
let is_allowed_for_platform = evm_version_requirement.matches(&node.evm_version());
|
||||
is_allowed &= is_allowed_for_platform;
|
||||
error_map.insert(
|
||||
platform.platform_identifier().into(),
|
||||
json!(is_allowed_for_platform),
|
||||
);
|
||||
}
|
||||
|
||||
if is_allowed {
|
||||
Ok(())
|
||||
} else {
|
||||
Err((
|
||||
"EVM version is incompatible for the platforms specified",
|
||||
error_map,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the platforms compilers support the mode that the test is for.
|
||||
fn check_compiler_compatibility(&self) -> TestCheckFunctionResult {
|
||||
let mut error_map = indexmap! {
|
||||
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
||||
};
|
||||
let mut is_allowed = true;
|
||||
for (platform, _, compiler, ..) in self.platforms.iter() {
|
||||
let is_allowed_for_platform =
|
||||
compiler.supports_mode(self.mode.optimize_setting, self.mode.pipeline);
|
||||
is_allowed &= is_allowed_for_platform;
|
||||
error_map.insert(
|
||||
platform.platform_identifier().into(),
|
||||
json!(is_allowed_for_platform),
|
||||
);
|
||||
}
|
||||
|
||||
if is_allowed {
|
||||
Ok(())
|
||||
} else {
|
||||
Err((
|
||||
"Compilers do not support this mode either for the provided platforms.",
|
||||
error_map,
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type TestCheckFunctionResult = Result<(), (&'static str, IndexMap<&'static str, Value>)>;
|
||||
@@ -0,0 +1,52 @@
|
||||
//! This crate implements concurrent handling of testing node.
|
||||
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use anyhow::Context as _;
|
||||
use revive_dt_config::*;
|
||||
use revive_dt_core::Platform;
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
|
||||
/// The node pool starts one or more [Node] which then can be accessed
|
||||
/// in a round robbin fashion.
|
||||
pub struct NodePool {
|
||||
next: AtomicUsize,
|
||||
nodes: Vec<Box<dyn EthereumNode + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl NodePool {
|
||||
/// Create a new Pool. This will start as many nodes as there are workers in `config`.
|
||||
pub fn new(context: Context, platform: &dyn Platform) -> anyhow::Result<Self> {
|
||||
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
|
||||
let nodes = concurrency_configuration.number_of_nodes;
|
||||
|
||||
let mut handles = Vec::with_capacity(nodes);
|
||||
for _ in 0..nodes {
|
||||
let context = context.clone();
|
||||
handles.push(platform.new_node(context)?);
|
||||
}
|
||||
|
||||
let mut nodes = Vec::with_capacity(nodes);
|
||||
for handle in handles {
|
||||
nodes.push(
|
||||
handle
|
||||
.join()
|
||||
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))
|
||||
.context("Failed to join node spawn thread")?
|
||||
.map_err(|error| anyhow::anyhow!("node failed to spawn: {error}"))
|
||||
.context("Node failed to spawn")?,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
nodes,
|
||||
next: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Get a handle to the next node.
|
||||
pub fn round_robbin(&self) -> &dyn EthereumNode {
|
||||
let current = self.next.fetch_add(1, Ordering::SeqCst) % self.nodes.len();
|
||||
self.nodes.get(current).unwrap().as_ref()
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
[package]
|
||||
name = "revive-dt-format"
|
||||
description = "declarative test definition format"
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
revive-dt-common = { workspace = true }
|
||||
|
||||
revive-common = { workspace = true }
|
||||
|
||||
alloy = { workspace = true }
|
||||
alloy-primitives = { workspace = true }
|
||||
alloy-sol-types = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
regex = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
schemars = { workspace = true }
|
||||
semver = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -0,0 +1,117 @@
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use revive_dt_common::{macros::define_wrapper_type, types::Mode};
|
||||
|
||||
use crate::{
|
||||
mode::ParsedMode,
|
||||
steps::{Expected, RepeatStep, Step},
|
||||
};
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)]
|
||||
pub struct Case {
|
||||
/// An optional name of the test case.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub name: Option<String>,
|
||||
|
||||
/// An optional comment on the case which has no impact on the execution in any way.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
|
||||
/// This represents a mode that has been parsed from test metadata.
|
||||
///
|
||||
/// Mode strings can take the following form (in pseudo-regex):
|
||||
///
|
||||
/// ```text
|
||||
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
||||
/// ```
|
||||
///
|
||||
/// If this is provided then it takes higher priority than the modes specified in the metadata
|
||||
/// file.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub modes: Option<Vec<ParsedMode>>,
|
||||
|
||||
/// The set of steps to run as part of this test case.
|
||||
#[serde(rename = "inputs")]
|
||||
pub steps: Vec<Step>,
|
||||
|
||||
/// An optional name of the group of tests that this test belongs to.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub group: Option<String>,
|
||||
|
||||
/// An optional set of expectations and assertions to make about the transaction after it ran.
|
||||
///
|
||||
/// If this is not specified then the only assertion that will be ran is that the transaction
|
||||
/// was successful.
|
||||
///
|
||||
/// This expectation that's on the case itself will be attached to the final step of the case.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub expected: Option<Expected>,
|
||||
|
||||
/// An optional boolean which defines if the case as a whole should be ignored. If null then the
|
||||
/// case will not be ignored.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ignore: Option<bool>,
|
||||
}
|
||||
|
||||
impl Case {
|
||||
pub fn steps_iterator(&self) -> impl Iterator<Item = Step> {
|
||||
let steps_len = self.steps.len();
|
||||
self.steps
|
||||
.clone()
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(move |(idx, mut step)| {
|
||||
let Step::FunctionCall(ref mut input) = step else {
|
||||
return step;
|
||||
};
|
||||
|
||||
if idx + 1 == steps_len {
|
||||
if input.expected.is_none() {
|
||||
input.expected = self.expected.clone();
|
||||
}
|
||||
|
||||
// TODO: What does it mean for us to have an `expected` field on the case itself
|
||||
// but the final input also has an expected field that doesn't match the one on
|
||||
// the case? What are we supposed to do with that final expected field on the
|
||||
// case?
|
||||
|
||||
step
|
||||
} else {
|
||||
step
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn steps_iterator_for_benchmarks(
|
||||
&self,
|
||||
default_repeat_count: usize,
|
||||
) -> Box<dyn Iterator<Item = Step> + '_> {
|
||||
let contains_repeat = self
|
||||
.steps_iterator()
|
||||
.any(|step| matches!(&step, Step::Repeat(..)));
|
||||
if contains_repeat {
|
||||
Box::new(self.steps_iterator()) as Box<_>
|
||||
} else {
|
||||
Box::new(std::iter::once(Step::Repeat(Box::new(RepeatStep {
|
||||
comment: None,
|
||||
repeat: default_repeat_count,
|
||||
steps: self.steps_iterator().collect(),
|
||||
})))) as Box<_>
|
||||
}
|
||||
}
|
||||
|
||||
pub fn solc_modes(&self) -> Vec<Mode> {
|
||||
match &self.modes {
|
||||
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
|
||||
None => Mode::all().cloned().collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
define_wrapper_type!(
|
||||
/// A wrapper type for the index of test cases found in metadata file.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct CaseIdx(usize) impl Display, FromStr;
|
||||
);
|
||||
@@ -0,0 +1,131 @@
|
||||
use std::{
|
||||
fs::File,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use revive_dt_common::iterators::FilesWithExtensionIterator;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::{debug, info};
|
||||
|
||||
use crate::metadata::{Metadata, MetadataFile};
|
||||
use anyhow::Context as _;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum Corpus {
|
||||
SinglePath { name: String, path: PathBuf },
|
||||
MultiplePaths { name: String, paths: Vec<PathBuf> },
|
||||
}
|
||||
|
||||
impl Corpus {
|
||||
pub fn try_from_path(file_path: impl AsRef<Path>) -> anyhow::Result<Self> {
|
||||
let mut corpus = File::open(file_path.as_ref())
|
||||
.map_err(anyhow::Error::from)
|
||||
.and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into))
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to open and deserialize corpus file at {}",
|
||||
file_path.as_ref().display()
|
||||
)
|
||||
})?;
|
||||
|
||||
let corpus_directory = file_path
|
||||
.as_ref()
|
||||
.canonicalize()
|
||||
.context("Failed to canonicalize the path to the corpus file")?
|
||||
.parent()
|
||||
.context("Corpus file has no parent")?
|
||||
.to_path_buf();
|
||||
|
||||
for path in corpus.paths_iter_mut() {
|
||||
*path = corpus_directory.join(path.as_path())
|
||||
}
|
||||
|
||||
Ok(corpus)
|
||||
}
|
||||
|
||||
pub fn enumerate_tests(&self) -> Vec<MetadataFile> {
|
||||
let mut tests = self
|
||||
.paths_iter()
|
||||
.flat_map(|root_path| {
|
||||
if !root_path.is_dir() {
|
||||
Box::new(std::iter::once(root_path.to_path_buf()))
|
||||
as Box<dyn Iterator<Item = _>>
|
||||
} else {
|
||||
Box::new(
|
||||
FilesWithExtensionIterator::new(root_path)
|
||||
.with_use_cached_fs(true)
|
||||
.with_allowed_extension("sol")
|
||||
.with_allowed_extension("json"),
|
||||
)
|
||||
}
|
||||
.map(move |metadata_file_path| (root_path, metadata_file_path))
|
||||
})
|
||||
.filter_map(|(root_path, metadata_file_path)| {
|
||||
Metadata::try_from_file(&metadata_file_path)
|
||||
.or_else(|| {
|
||||
debug!(
|
||||
discovered_from = %root_path.display(),
|
||||
metadata_file_path = %metadata_file_path.display(),
|
||||
"Skipping file since it doesn't contain valid metadata"
|
||||
);
|
||||
None
|
||||
})
|
||||
.map(|metadata| MetadataFile {
|
||||
metadata_file_path,
|
||||
corpus_file_path: root_path.to_path_buf(),
|
||||
content: metadata,
|
||||
})
|
||||
.inspect(|metadata_file| {
|
||||
debug!(
|
||||
metadata_file_path = %metadata_file.relative_path().display(),
|
||||
"Loaded metadata file"
|
||||
)
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
|
||||
tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
|
||||
info!(
|
||||
len = tests.len(),
|
||||
corpus_name = self.name(),
|
||||
"Found tests in Corpus"
|
||||
);
|
||||
tests
|
||||
}
|
||||
|
||||
pub fn name(&self) -> &str {
|
||||
match self {
|
||||
Corpus::SinglePath { name, .. } | Corpus::MultiplePaths { name, .. } => name.as_str(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn paths_iter(&self) -> impl Iterator<Item = &Path> {
|
||||
match self {
|
||||
Corpus::SinglePath { path, .. } => {
|
||||
Box::new(std::iter::once(path.as_path())) as Box<dyn Iterator<Item = _>>
|
||||
}
|
||||
Corpus::MultiplePaths { paths, .. } => {
|
||||
Box::new(paths.iter().map(|path| path.as_path())) as Box<dyn Iterator<Item = _>>
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn paths_iter_mut(&mut self) -> impl Iterator<Item = &mut PathBuf> {
|
||||
match self {
|
||||
Corpus::SinglePath { path, .. } => {
|
||||
Box::new(std::iter::once(path)) as Box<dyn Iterator<Item = _>>
|
||||
}
|
||||
Corpus::MultiplePaths { paths, .. } => {
|
||||
Box::new(paths.iter_mut()) as Box<dyn Iterator<Item = _>>
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn path_count(&self) -> usize {
|
||||
match self {
|
||||
Corpus::SinglePath { .. } => 1,
|
||||
Corpus::MultiplePaths { paths, .. } => paths.len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
//! The revive differential tests case format.
|
||||
|
||||
pub mod case;
|
||||
pub mod corpus;
|
||||
pub mod metadata;
|
||||
pub mod mode;
|
||||
pub mod steps;
|
||||
pub mod traits;
|
||||
@@ -0,0 +1,623 @@
|
||||
use std::{
|
||||
cmp::Ordering,
|
||||
collections::BTreeMap,
|
||||
fmt::Display,
|
||||
fs::File,
|
||||
ops::Deref,
|
||||
path::{Path, PathBuf},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use revive_common::EVMVersion;
|
||||
use revive_dt_common::{
|
||||
cached_fs::read_to_string,
|
||||
iterators::FilesWithExtensionIterator,
|
||||
macros::define_wrapper_type,
|
||||
types::{Mode, VmIdentifier},
|
||||
};
|
||||
use tracing::error;
|
||||
|
||||
use crate::{case::Case, mode::ParsedMode};
|
||||
|
||||
pub const METADATA_FILE_EXTENSION: &str = "json";
|
||||
pub const SOLIDITY_CASE_FILE_EXTENSION: &str = "sol";
|
||||
pub const SOLIDITY_CASE_COMMENT_MARKER: &str = "//!";
|
||||
|
||||
#[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)]
|
||||
pub struct MetadataFile {
|
||||
/// The path of the metadata file. This will either be a JSON or solidity file.
|
||||
pub metadata_file_path: PathBuf,
|
||||
|
||||
/// This is the path contained within the corpus file. This could either be the path of some dir
|
||||
/// or could be the actual metadata file path.
|
||||
pub corpus_file_path: PathBuf,
|
||||
|
||||
/// The metadata contained within the file.
|
||||
pub content: Metadata,
|
||||
}
|
||||
|
||||
impl MetadataFile {
|
||||
pub fn relative_path(&self) -> &Path {
|
||||
if self.corpus_file_path.is_file() {
|
||||
&self.corpus_file_path
|
||||
} else {
|
||||
self.metadata_file_path
|
||||
.strip_prefix(&self.corpus_file_path)
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for MetadataFile {
|
||||
type Target = Metadata;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.content
|
||||
}
|
||||
}
|
||||
|
||||
/// A MatterLabs metadata file.
|
||||
///
|
||||
/// This defines the structure that the MatterLabs metadata files follow for defining the tests or
|
||||
/// the workloads.
|
||||
///
|
||||
/// Each metadata file is composed of multiple test cases where each test case is isolated from the
|
||||
/// others and runs in a completely different address space. Each test case is composed of a number
|
||||
/// of steps and assertions that should be performed as part of the test case.
|
||||
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema, Clone, Eq, PartialEq)]
|
||||
pub struct Metadata {
|
||||
/// This is an optional comment on the metadata file which has no impact on the execution in any
|
||||
/// way.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
|
||||
/// An optional boolean which defines if the metadata file as a whole should be ignored. If null
|
||||
/// then the metadata file will not be ignored.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ignore: Option<bool>,
|
||||
|
||||
/// An optional vector of targets that this Metadata file's cases can be executed on. As an
|
||||
/// example, if we wish for the metadata file's cases to only be run on PolkaVM then we'd
|
||||
/// specify a target of "PolkaVM" in here.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub targets: Option<Vec<VmIdentifier>>,
|
||||
|
||||
/// A vector of the test cases and workloads contained within the metadata file. This is their
|
||||
/// primary description.
|
||||
pub cases: Vec<Case>,
|
||||
|
||||
/// A map of all of the contracts that the test requires to run.
|
||||
///
|
||||
/// This is a map where the key is the name of the contract instance and the value is the
|
||||
/// contract's path and ident in the file.
|
||||
///
|
||||
/// If any contract is to be used by the test then it must be included in here first so that the
|
||||
/// framework is aware of its path, compiles it, and prepares it.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub contracts: Option<BTreeMap<ContractInstance, ContractPathAndIdent>>,
|
||||
|
||||
/// The set of libraries that this metadata file requires.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub libraries: Option<BTreeMap<PathBuf, BTreeMap<ContractIdent, ContractInstance>>>,
|
||||
|
||||
/// This represents a mode that has been parsed from test metadata.
|
||||
///
|
||||
/// Mode strings can take the following form (in pseudo-regex):
|
||||
///
|
||||
/// ```text
|
||||
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
||||
/// ```
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub modes: Option<Vec<ParsedMode>>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[schemars(skip)]
|
||||
pub file_path: Option<PathBuf>,
|
||||
|
||||
/// This field specifies an EVM version requirement that the test case has where the test might
|
||||
/// be run of the evm version of the nodes match the evm version specified here.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub required_evm_version: Option<EvmVersionRequirement>,
|
||||
|
||||
/// A set of compilation directives that will be passed to the compiler whenever the contracts
|
||||
/// for the test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`]
|
||||
/// is just a filter for when a test can run whereas this is an instruction to the compiler.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub compiler_directives: Option<CompilationDirectives>,
|
||||
}
|
||||
|
||||
impl Metadata {
|
||||
/// Returns the modes that we should test from this metadata.
|
||||
pub fn solc_modes(&self) -> Vec<Mode> {
|
||||
match &self.modes {
|
||||
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
|
||||
None => Mode::all().cloned().collect(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the base directory of this metadata.
|
||||
pub fn directory(&self) -> anyhow::Result<PathBuf> {
|
||||
Ok(self
|
||||
.file_path
|
||||
.as_ref()
|
||||
.and_then(|path| path.parent())
|
||||
.ok_or_else(|| anyhow::anyhow!("metadata invalid file path: {:?}", self.file_path))?
|
||||
.to_path_buf())
|
||||
}
|
||||
|
||||
/// Returns the contract sources with canonicalized paths for the files
|
||||
pub fn contract_sources(
|
||||
&self,
|
||||
) -> anyhow::Result<BTreeMap<ContractInstance, ContractPathAndIdent>> {
|
||||
let directory = self.directory()?;
|
||||
let mut sources = BTreeMap::new();
|
||||
let Some(contracts) = &self.contracts else {
|
||||
return Ok(sources);
|
||||
};
|
||||
|
||||
for (
|
||||
alias,
|
||||
ContractPathAndIdent {
|
||||
contract_source_path,
|
||||
contract_ident,
|
||||
},
|
||||
) in contracts
|
||||
{
|
||||
let alias = alias.clone();
|
||||
let absolute_path = directory
|
||||
.join(contract_source_path)
|
||||
.canonicalize()
|
||||
.map_err(|error| {
|
||||
anyhow::anyhow!(
|
||||
"Failed to canonicalize contract source path '{}': {error}",
|
||||
directory.join(contract_source_path).display()
|
||||
)
|
||||
})?;
|
||||
let contract_ident = contract_ident.clone();
|
||||
|
||||
sources.insert(
|
||||
alias,
|
||||
ContractPathAndIdent {
|
||||
contract_source_path: absolute_path,
|
||||
contract_ident,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
Ok(sources)
|
||||
}
|
||||
|
||||
/// Try to parse the test metadata struct from the given file at `path`.
|
||||
///
|
||||
/// Returns `None` if `path` didn't contain a test metadata or case definition.
|
||||
///
|
||||
/// # Panics
|
||||
/// Expects the supplied `path` to be a file.
|
||||
pub fn try_from_file(path: &Path) -> Option<Self> {
|
||||
assert!(path.is_file(), "not a file: {}", path.display());
|
||||
|
||||
let file_extension = path.extension()?;
|
||||
|
||||
if file_extension == METADATA_FILE_EXTENSION {
|
||||
return Self::try_from_json(path);
|
||||
}
|
||||
|
||||
if file_extension == SOLIDITY_CASE_FILE_EXTENSION {
|
||||
return Self::try_from_solidity(path);
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn try_from_json(path: &Path) -> Option<Self> {
|
||||
let file = File::open(path)
|
||||
.inspect_err(|err| error!(path = %path.display(), %err, "Failed to open file"))
|
||||
.ok()?;
|
||||
|
||||
match serde_json::from_reader::<_, Metadata>(file) {
|
||||
Ok(mut metadata) => {
|
||||
metadata.file_path = Some(path.to_path_buf());
|
||||
Some(metadata)
|
||||
}
|
||||
Err(err) => {
|
||||
error!(path = %path.display(), %err, "Deserialization of metadata failed");
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn try_from_solidity(path: &Path) -> Option<Self> {
|
||||
let spec = read_to_string(path)
|
||||
.inspect_err(|err| error!(path = %path.display(), %err, "Failed to read file content"))
|
||||
.ok()?
|
||||
.lines()
|
||||
.filter_map(|line| line.strip_prefix(SOLIDITY_CASE_COMMENT_MARKER))
|
||||
.fold(String::new(), |mut buf, string| {
|
||||
buf.push_str(string);
|
||||
buf
|
||||
});
|
||||
|
||||
if spec.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
match serde_json::from_str::<Self>(&spec) {
|
||||
Ok(mut metadata) => {
|
||||
metadata.file_path = Some(path.to_path_buf());
|
||||
metadata.contracts = Some(
|
||||
[(
|
||||
ContractInstance::new("Test"),
|
||||
ContractPathAndIdent {
|
||||
contract_source_path: path.to_path_buf(),
|
||||
contract_ident: ContractIdent::new("Test"),
|
||||
},
|
||||
)]
|
||||
.into(),
|
||||
);
|
||||
Some(metadata)
|
||||
}
|
||||
Err(err) => {
|
||||
error!(path = %path.display(), %err, "Failed to deserialize metadata");
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an iterator over all of the solidity files that needs to be compiled for this
|
||||
/// [`Metadata`] object
|
||||
///
|
||||
/// Note: if the metadata is contained within a solidity file then this is the only file that
|
||||
/// we wish to compile since this is a self-contained test. Otherwise, if it's a JSON file
|
||||
/// then we need to compile all of the contracts that are in the directory since imports are
|
||||
/// allowed in there.
|
||||
pub fn files_to_compile(&self) -> anyhow::Result<Box<dyn Iterator<Item = PathBuf>>> {
|
||||
let Some(ref metadata_file_path) = self.file_path else {
|
||||
anyhow::bail!("The metadata file path is not defined");
|
||||
};
|
||||
if metadata_file_path
|
||||
.extension()
|
||||
.is_some_and(|extension| extension.eq_ignore_ascii_case("sol"))
|
||||
{
|
||||
Ok(Box::new(std::iter::once(metadata_file_path.clone())))
|
||||
} else {
|
||||
Ok(Box::new(
|
||||
FilesWithExtensionIterator::new(self.directory()?)
|
||||
.with_allowed_extension("sol")
|
||||
.with_use_cached_fs(true),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
define_wrapper_type!(
|
||||
/// Represents a contract instance found a metadata file.
|
||||
///
|
||||
/// Typically, this is used as the key to the "contracts" field of metadata files.
|
||||
#[derive(
|
||||
Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema
|
||||
)]
|
||||
#[serde(transparent)]
|
||||
pub struct ContractInstance(String) impl Display;
|
||||
);
|
||||
|
||||
define_wrapper_type!(
|
||||
/// Represents a contract identifier found a metadata file.
|
||||
///
|
||||
/// A contract identifier is the name of the contract in the source code.
|
||||
#[derive(
|
||||
Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema
|
||||
)]
|
||||
#[serde(transparent)]
|
||||
pub struct ContractIdent(String) impl Display;
|
||||
);
|
||||
|
||||
/// Represents an identifier used for contracts.
|
||||
///
|
||||
/// The type supports serialization from and into the following string format:
|
||||
///
|
||||
/// ```text
|
||||
/// ${path}:${contract_ident}
|
||||
/// ```
|
||||
#[derive(
|
||||
Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema,
|
||||
)]
|
||||
#[serde(try_from = "String", into = "String")]
|
||||
pub struct ContractPathAndIdent {
|
||||
/// The path of the contract source code relative to the directory containing the metadata file.
|
||||
pub contract_source_path: PathBuf,
|
||||
|
||||
/// The identifier of the contract.
|
||||
pub contract_ident: ContractIdent,
|
||||
}
|
||||
|
||||
impl Display for ContractPathAndIdent {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}:{}",
|
||||
self.contract_source_path.display(),
|
||||
self.contract_ident.as_ref()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for ContractPathAndIdent {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let mut splitted_string = s.split(":").peekable();
|
||||
let mut path = None::<String>;
|
||||
let mut identifier = None::<String>;
|
||||
loop {
|
||||
let Some(next_item) = splitted_string.next() else {
|
||||
break;
|
||||
};
|
||||
if splitted_string.peek().is_some() {
|
||||
match path {
|
||||
Some(ref mut path) => {
|
||||
path.push(':');
|
||||
path.push_str(next_item);
|
||||
}
|
||||
None => path = Some(next_item.to_owned()),
|
||||
}
|
||||
} else {
|
||||
identifier = Some(next_item.to_owned())
|
||||
}
|
||||
}
|
||||
match (path, identifier) {
|
||||
(Some(path), Some(identifier)) => Ok(Self {
|
||||
contract_source_path: PathBuf::from(path),
|
||||
contract_ident: ContractIdent::new(identifier),
|
||||
}),
|
||||
(None, Some(path)) | (Some(path), None) => {
|
||||
let Some(identifier) = path.split(".").next().map(ToOwned::to_owned) else {
|
||||
anyhow::bail!("Failed to find identifier");
|
||||
};
|
||||
Ok(Self {
|
||||
contract_source_path: PathBuf::from(path),
|
||||
contract_ident: ContractIdent::new(identifier),
|
||||
})
|
||||
}
|
||||
(None, None) => anyhow::bail!("Failed to find the path and identifier"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for ContractPathAndIdent {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
Self::from_str(&value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ContractPathAndIdent> for String {
|
||||
fn from(value: ContractPathAndIdent) -> Self {
|
||||
value.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
/// An EVM version requirement that the test case has. This gets serialized and deserialized from
|
||||
/// and into [`String`]. This follows a simple format of (>=|<=|=|>|<) followed by a string of the
|
||||
/// EVM version.
|
||||
///
|
||||
/// When specified, the framework will only run the test if the node's EVM version matches that
|
||||
/// required by the metadata file.
|
||||
#[derive(
|
||||
Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema,
|
||||
)]
|
||||
#[serde(try_from = "String", into = "String")]
|
||||
pub struct EvmVersionRequirement {
|
||||
ordering: Ordering,
|
||||
or_equal: bool,
|
||||
evm_version: EVMVersion,
|
||||
}
|
||||
|
||||
impl EvmVersionRequirement {
|
||||
pub fn new_greater_than_or_equals(version: EVMVersion) -> Self {
|
||||
Self {
|
||||
ordering: Ordering::Greater,
|
||||
or_equal: true,
|
||||
evm_version: version,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_greater_than(version: EVMVersion) -> Self {
|
||||
Self {
|
||||
ordering: Ordering::Greater,
|
||||
or_equal: false,
|
||||
evm_version: version,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_equals(version: EVMVersion) -> Self {
|
||||
Self {
|
||||
ordering: Ordering::Equal,
|
||||
or_equal: false,
|
||||
evm_version: version,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_less_than(version: EVMVersion) -> Self {
|
||||
Self {
|
||||
ordering: Ordering::Less,
|
||||
or_equal: false,
|
||||
evm_version: version,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_less_than_or_equals(version: EVMVersion) -> Self {
|
||||
Self {
|
||||
ordering: Ordering::Less,
|
||||
or_equal: true,
|
||||
evm_version: version,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn matches(&self, other: &EVMVersion) -> bool {
|
||||
let ordering = other.cmp(&self.evm_version);
|
||||
ordering == self.ordering || (self.or_equal && matches!(ordering, Ordering::Equal))
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for EvmVersionRequirement {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let Self {
|
||||
ordering,
|
||||
or_equal,
|
||||
evm_version,
|
||||
} = self;
|
||||
match ordering {
|
||||
Ordering::Less => write!(f, "<")?,
|
||||
Ordering::Equal => write!(f, "=")?,
|
||||
Ordering::Greater => write!(f, ">")?,
|
||||
}
|
||||
if *or_equal && !matches!(ordering, Ordering::Equal) {
|
||||
write!(f, "=")?;
|
||||
}
|
||||
write!(f, "{evm_version}")
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for EvmVersionRequirement {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s.as_bytes() {
|
||||
[b'>', b'=', remaining @ ..] => Ok(Self {
|
||||
ordering: Ordering::Greater,
|
||||
or_equal: true,
|
||||
evm_version: str::from_utf8(remaining)?.try_into()?,
|
||||
}),
|
||||
[b'>', remaining @ ..] => Ok(Self {
|
||||
ordering: Ordering::Greater,
|
||||
or_equal: false,
|
||||
evm_version: str::from_utf8(remaining)?.try_into()?,
|
||||
}),
|
||||
[b'<', b'=', remaining @ ..] => Ok(Self {
|
||||
ordering: Ordering::Less,
|
||||
or_equal: true,
|
||||
evm_version: str::from_utf8(remaining)?.try_into()?,
|
||||
}),
|
||||
[b'<', remaining @ ..] => Ok(Self {
|
||||
ordering: Ordering::Less,
|
||||
or_equal: false,
|
||||
evm_version: str::from_utf8(remaining)?.try_into()?,
|
||||
}),
|
||||
[b'=', remaining @ ..] => Ok(Self {
|
||||
ordering: Ordering::Equal,
|
||||
or_equal: false,
|
||||
evm_version: str::from_utf8(remaining)?.try_into()?,
|
||||
}),
|
||||
_ => anyhow::bail!("Invalid EVM version requirement {s}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for EvmVersionRequirement {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
value.parse()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<EvmVersionRequirement> for String {
|
||||
fn from(value: EvmVersionRequirement) -> Self {
|
||||
value.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
/// A set of compilation directives that will be passed to the compiler whenever the contracts for
|
||||
/// the test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`] is
|
||||
/// just a filter for when a test can run whereas this is an instruction to the compiler.
|
||||
/// Defines how the compiler should handle revert strings.
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
Copy,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Default,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
JsonSchema,
|
||||
)]
|
||||
pub struct CompilationDirectives {
|
||||
/// Defines how the revert strings should be handled.
|
||||
pub revert_string_handling: Option<RevertString>,
|
||||
}
|
||||
|
||||
/// Defines how the compiler should handle revert strings.
|
||||
#[derive(
|
||||
Clone,
|
||||
Debug,
|
||||
Copy,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Default,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
JsonSchema,
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum RevertString {
|
||||
/// The default handling of the revert strings.
|
||||
#[default]
|
||||
Default,
|
||||
/// The debug handling of the revert strings.
|
||||
Debug,
|
||||
/// Strip the revert strings.
|
||||
Strip,
|
||||
/// Provide verbose debug strings for the revert string.
|
||||
VerboseDebug,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn contract_identifier_respects_roundtrip_property() {
|
||||
// Arrange
|
||||
let string = "ERC20/ERC20.sol:ERC20";
|
||||
|
||||
// Act
|
||||
let identifier = ContractPathAndIdent::from_str(string);
|
||||
|
||||
// Assert
|
||||
let identifier = identifier.expect("Failed to parse");
|
||||
assert_eq!(
|
||||
identifier.contract_source_path.display().to_string(),
|
||||
"ERC20/ERC20.sol"
|
||||
);
|
||||
assert_eq!(identifier.contract_ident, "ERC20".to_owned().into());
|
||||
|
||||
// Act
|
||||
let reserialized = identifier.to_string();
|
||||
|
||||
// Assert
|
||||
assert_eq!(string, reserialized);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn complex_metadata_file_can_be_deserialized() {
|
||||
// Arrange
|
||||
const JSON: &str = include_str!("../../../assets/test_metadata.json");
|
||||
|
||||
// Act
|
||||
let metadata = serde_json::from_str::<Metadata>(JSON);
|
||||
|
||||
// Assert
|
||||
metadata.expect("Failed to deserialize metadata");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,257 @@
|
||||
use anyhow::Context as _;
|
||||
use regex::Regex;
|
||||
use revive_dt_common::iterators::EitherIter;
|
||||
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashSet;
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
/// This represents a mode that has been parsed from test metadata.
|
||||
///
|
||||
/// Mode strings can take the following form (in pseudo-regex):
|
||||
///
|
||||
/// ```text
|
||||
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
||||
/// ```
|
||||
///
|
||||
/// We can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)]
|
||||
#[serde(try_from = "String", into = "String")]
|
||||
pub struct ParsedMode {
|
||||
pub pipeline: Option<ModePipeline>,
|
||||
pub optimize_flag: Option<bool>,
|
||||
pub optimize_setting: Option<ModeOptimizerSetting>,
|
||||
pub version: Option<semver::VersionReq>,
|
||||
}
|
||||
|
||||
impl FromStr for ParsedMode {
|
||||
type Err = anyhow::Error;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
static REGEX: LazyLock<Regex> = LazyLock::new(|| {
|
||||
Regex::new(r"(?x)
|
||||
^
|
||||
(?:(?P<pipeline>[YEILV])(?P<optimize_flag>[+-])?)? # Pipeline to use eg Y, E+, E-
|
||||
\s*
|
||||
(?P<optimize_setting>M[a-zA-Z0-9])? # Optimize setting eg M0, Ms, Mz
|
||||
\s*
|
||||
(?P<version>[>=<]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8
|
||||
$
|
||||
").unwrap()
|
||||
});
|
||||
|
||||
let Some(caps) = REGEX.captures(s) else {
|
||||
anyhow::bail!("Cannot parse mode '{s}' from string");
|
||||
};
|
||||
|
||||
let pipeline = match caps.name("pipeline") {
|
||||
Some(m) => Some(
|
||||
ModePipeline::from_str(m.as_str())
|
||||
.context("Failed to parse mode pipeline from string")?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
|
||||
|
||||
let optimize_setting = match caps.name("optimize_setting") {
|
||||
Some(m) => Some(
|
||||
ModeOptimizerSetting::from_str(m.as_str())
|
||||
.context("Failed to parse optimizer setting from string")?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let version = match caps.name("version") {
|
||||
Some(m) => Some(
|
||||
semver::VersionReq::parse(m.as_str())
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"Cannot parse the version requirement '{}': {e}",
|
||||
m.as_str()
|
||||
)
|
||||
})
|
||||
.context("Failed to parse semver requirement from mode string")?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
|
||||
Ok(ParsedMode {
|
||||
pipeline,
|
||||
optimize_flag,
|
||||
optimize_setting,
|
||||
version,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ParsedMode {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let mut has_written = false;
|
||||
|
||||
if let Some(pipeline) = self.pipeline {
|
||||
pipeline.fmt(f)?;
|
||||
if let Some(optimize_flag) = self.optimize_flag {
|
||||
f.write_str(if optimize_flag { "+" } else { "-" })?;
|
||||
}
|
||||
has_written = true;
|
||||
}
|
||||
|
||||
if let Some(optimize_setting) = self.optimize_setting {
|
||||
if has_written {
|
||||
f.write_str(" ")?;
|
||||
}
|
||||
optimize_setting.fmt(f)?;
|
||||
has_written = true;
|
||||
}
|
||||
|
||||
if let Some(version) = &self.version {
|
||||
if has_written {
|
||||
f.write_str(" ")?;
|
||||
}
|
||||
version.fmt(f)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ParsedMode> for String {
|
||||
fn from(parsed_mode: ParsedMode) -> Self {
|
||||
parsed_mode.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for ParsedMode {
|
||||
type Error = anyhow::Error;
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
ParsedMode::from_str(&value)
|
||||
}
|
||||
}
|
||||
|
||||
impl ParsedMode {
|
||||
/// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try.
|
||||
pub fn to_modes(&self) -> impl Iterator<Item = Mode> {
|
||||
let pipeline_iter = self.pipeline.as_ref().map_or_else(
|
||||
|| EitherIter::A(ModePipeline::test_cases()),
|
||||
|p| EitherIter::B(std::iter::once(*p)),
|
||||
);
|
||||
|
||||
let optimize_flag_setting = self.optimize_flag.map(|flag| {
|
||||
if flag {
|
||||
ModeOptimizerSetting::M3
|
||||
} else {
|
||||
ModeOptimizerSetting::M0
|
||||
}
|
||||
});
|
||||
|
||||
let optimize_flag_iter = match optimize_flag_setting {
|
||||
Some(setting) => EitherIter::A(std::iter::once(setting)),
|
||||
None => EitherIter::B(ModeOptimizerSetting::test_cases()),
|
||||
};
|
||||
|
||||
let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else(
|
||||
|| EitherIter::A(optimize_flag_iter),
|
||||
|s| EitherIter::B(std::iter::once(*s)),
|
||||
);
|
||||
|
||||
pipeline_iter.flat_map(move |pipeline| {
|
||||
optimize_settings_iter
|
||||
.clone()
|
||||
.map(move |optimize_setting| Mode {
|
||||
pipeline,
|
||||
optimize_setting,
|
||||
version: self.version.clone(),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s.
|
||||
/// This avoids any duplicate entries.
|
||||
pub fn many_to_modes<'a>(
|
||||
parsed: impl Iterator<Item = &'a ParsedMode>,
|
||||
) -> impl Iterator<Item = Mode> {
|
||||
let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect();
|
||||
modes.into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parsed_mode_from_str() {
|
||||
let strings = vec![
|
||||
("Mz", "Mz"),
|
||||
("Y", "Y"),
|
||||
("Y+", "Y+"),
|
||||
("Y-", "Y-"),
|
||||
("E", "E"),
|
||||
("E+", "E+"),
|
||||
("E-", "E-"),
|
||||
("Y M0", "Y M0"),
|
||||
("Y M1", "Y M1"),
|
||||
("Y M2", "Y M2"),
|
||||
("Y M3", "Y M3"),
|
||||
("Y Ms", "Y Ms"),
|
||||
("Y Mz", "Y Mz"),
|
||||
("E M0", "E M0"),
|
||||
("E M1", "E M1"),
|
||||
("E M2", "E M2"),
|
||||
("E M3", "E M3"),
|
||||
("E Ms", "E Ms"),
|
||||
("E Mz", "E Mz"),
|
||||
// When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning)
|
||||
("Y 0.8.0", "Y ^0.8.0"),
|
||||
("E+ 0.8.0", "E+ ^0.8.0"),
|
||||
("Y M3 >=0.8.0", "Y M3 >=0.8.0"),
|
||||
("E Mz <0.7.0", "E Mz <0.7.0"),
|
||||
// We can parse +- _and_ M1/M2 but the latter takes priority.
|
||||
("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"),
|
||||
("E- M2 0.7.0", "E- M2 ^0.7.0"),
|
||||
// We don't see this in the wild but it is parsed.
|
||||
("<=0.8", "<=0.8"),
|
||||
];
|
||||
|
||||
for (actual, expected) in strings {
|
||||
let parsed = ParsedMode::from_str(actual)
|
||||
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||
assert_eq!(
|
||||
expected,
|
||||
parsed.to_string(),
|
||||
"Mode string '{actual}' did not parse to '{expected}': got '{parsed}'"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parsed_mode_to_test_modes() {
|
||||
let strings = vec![
|
||||
("Mz", vec!["Y Mz", "E Mz"]),
|
||||
("Y", vec!["Y M0", "Y M3"]),
|
||||
("E", vec!["E M0", "E M3"]),
|
||||
("Y+", vec!["Y M3"]),
|
||||
("Y-", vec!["Y M0"]),
|
||||
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
|
||||
(
|
||||
"<=0.8",
|
||||
vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"],
|
||||
),
|
||||
];
|
||||
|
||||
for (actual, expected) in strings {
|
||||
let parsed = ParsedMode::from_str(actual)
|
||||
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||
let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect();
|
||||
let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect();
|
||||
|
||||
assert_eq!(
|
||||
expected_set, actual_set,
|
||||
"Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,176 @@
|
||||
use std::collections::HashMap;
|
||||
use std::pin::Pin;
|
||||
|
||||
use alloy::eips::BlockNumberOrTag;
|
||||
use alloy::json_abi::JsonAbi;
|
||||
use alloy::primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, U256};
|
||||
use alloy_primitives::TxHash;
|
||||
use anyhow::Result;
|
||||
|
||||
use crate::metadata::{ContractIdent, ContractInstance};
|
||||
|
||||
/// A trait of the interface are required to implement to be used by the resolution logic that this
|
||||
/// crate implements to go from string calldata and into the bytes calldata.
|
||||
pub trait ResolverApi {
|
||||
/// Returns the ID of the chain that the node is on.
|
||||
fn chain_id(&self) -> Pin<Box<dyn Future<Output = Result<ChainId>> + '_>>;
|
||||
|
||||
/// Returns the gas price for the specified transaction.
|
||||
fn transaction_gas_price(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
) -> Pin<Box<dyn Future<Output = Result<u128>> + '_>>;
|
||||
|
||||
// TODO: This is currently a u128 due to substrate needing more than 64 bits for its gas limit
|
||||
// when we implement the changes to the gas we need to adjust this to be a u64.
|
||||
/// Returns the gas limit of the specified block.
|
||||
fn block_gas_limit(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<u128>> + '_>>;
|
||||
|
||||
/// Returns the coinbase of the specified block.
|
||||
fn block_coinbase(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<Address>> + '_>>;
|
||||
|
||||
/// Returns the difficulty of the specified block.
|
||||
fn block_difficulty(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<U256>> + '_>>;
|
||||
|
||||
/// Returns the base fee of the specified block.
|
||||
fn block_base_fee(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<u64>> + '_>>;
|
||||
|
||||
/// Returns the hash of the specified block.
|
||||
fn block_hash(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<BlockHash>> + '_>>;
|
||||
|
||||
/// Returns the timestamp of the specified block,
|
||||
fn block_timestamp(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<BlockTimestamp>> + '_>>;
|
||||
|
||||
/// Returns the number of the last block.
|
||||
fn last_block_number(&self) -> Pin<Box<dyn Future<Output = Result<BlockNumber>> + '_>>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
/// Contextual information required by the code that's performing the resolution.
|
||||
pub struct ResolutionContext<'a> {
|
||||
/// When provided the contracts provided here will be used for resolutions.
|
||||
deployed_contracts: Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||
|
||||
/// When provided the variables in here will be used for performing resolutions.
|
||||
variables: Option<&'a HashMap<String, U256>>,
|
||||
|
||||
/// When provided this block number will be treated as the tip of the chain.
|
||||
block_number: Option<&'a BlockNumber>,
|
||||
|
||||
/// When provided the resolver will use this transaction hash for all of its resolutions.
|
||||
transaction_hash: Option<&'a TxHash>,
|
||||
}
|
||||
|
||||
impl<'a> ResolutionContext<'a> {
|
||||
pub fn new() -> Self {
|
||||
Default::default()
|
||||
}
|
||||
|
||||
pub fn new_from_parts(
|
||||
deployed_contracts: impl Into<
|
||||
Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||
>,
|
||||
variables: impl Into<Option<&'a HashMap<String, U256>>>,
|
||||
block_number: impl Into<Option<&'a BlockNumber>>,
|
||||
transaction_hash: impl Into<Option<&'a TxHash>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
deployed_contracts: deployed_contracts.into(),
|
||||
variables: variables.into(),
|
||||
block_number: block_number.into(),
|
||||
transaction_hash: transaction_hash.into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_deployed_contracts(
|
||||
mut self,
|
||||
deployed_contracts: impl Into<
|
||||
Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||
>,
|
||||
) -> Self {
|
||||
self.deployed_contracts = deployed_contracts.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_variables(
|
||||
mut self,
|
||||
variables: impl Into<Option<&'a HashMap<String, U256>>>,
|
||||
) -> Self {
|
||||
self.variables = variables.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_block_number(mut self, block_number: impl Into<Option<&'a BlockNumber>>) -> Self {
|
||||
self.block_number = block_number.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_transaction_hash(
|
||||
mut self,
|
||||
transaction_hash: impl Into<Option<&'a TxHash>>,
|
||||
) -> Self {
|
||||
self.transaction_hash = transaction_hash.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn resolve_block_number(&self, number: BlockNumberOrTag) -> BlockNumberOrTag {
|
||||
match self.block_number {
|
||||
Some(block_number) => match number {
|
||||
BlockNumberOrTag::Latest => BlockNumberOrTag::Number(*block_number),
|
||||
n @ (BlockNumberOrTag::Finalized
|
||||
| BlockNumberOrTag::Safe
|
||||
| BlockNumberOrTag::Earliest
|
||||
| BlockNumberOrTag::Pending
|
||||
| BlockNumberOrTag::Number(_)) => n,
|
||||
},
|
||||
None => number,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deployed_contract(
|
||||
&self,
|
||||
instance: &ContractInstance,
|
||||
) -> Option<&(ContractIdent, Address, JsonAbi)> {
|
||||
self.deployed_contracts
|
||||
.and_then(|deployed_contracts| deployed_contracts.get(instance))
|
||||
}
|
||||
|
||||
pub fn deployed_contract_address(&self, instance: &ContractInstance) -> Option<&Address> {
|
||||
self.deployed_contract(instance).map(|(_, a, _)| a)
|
||||
}
|
||||
|
||||
pub fn deployed_contract_abi(&self, instance: &ContractInstance) -> Option<&JsonAbi> {
|
||||
self.deployed_contract(instance).map(|(_, _, a)| a)
|
||||
}
|
||||
|
||||
pub fn variable(&self, name: impl AsRef<str>) -> Option<&U256> {
|
||||
self.variables
|
||||
.and_then(|variables| variables.get(name.as_ref()))
|
||||
}
|
||||
|
||||
pub fn tip_block_number(&self) -> Option<&'a BlockNumber> {
|
||||
self.block_number
|
||||
}
|
||||
|
||||
pub fn transaction_hash(&self) -> Option<&'a TxHash> {
|
||||
self.transaction_hash
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
[package]
|
||||
name = "revive-dt-node-interaction"
|
||||
description = "send and trace transactions to nodes"
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
revive-common = { workspace = true }
|
||||
|
||||
revive-dt-format = { workspace = true }
|
||||
|
||||
alloy = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -0,0 +1,53 @@
|
||||
//! This crate implements all node interactions.
|
||||
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use alloy::primitives::{Address, StorageKey, TxHash, U256};
|
||||
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace};
|
||||
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest};
|
||||
use anyhow::Result;
|
||||
|
||||
use revive_common::EVMVersion;
|
||||
use revive_dt_format::traits::ResolverApi;
|
||||
|
||||
/// An interface for all interactions with Ethereum compatible nodes.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub trait EthereumNode {
|
||||
fn id(&self) -> usize;
|
||||
|
||||
/// Returns the nodes connection string.
|
||||
fn connection_string(&self) -> &str;
|
||||
|
||||
/// Execute the [TransactionRequest] and return a [TransactionReceipt].
|
||||
fn execute_transaction(
|
||||
&self,
|
||||
transaction: TransactionRequest,
|
||||
) -> Pin<Box<dyn Future<Output = Result<TransactionReceipt>> + '_>>;
|
||||
|
||||
/// Trace the transaction in the [TransactionReceipt] and return a [GethTrace].
|
||||
fn trace_transaction(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
trace_options: GethDebugTracingOptions,
|
||||
) -> Pin<Box<dyn Future<Output = Result<GethTrace>> + '_>>;
|
||||
|
||||
/// Returns the state diff of the transaction hash in the [TransactionReceipt].
|
||||
fn state_diff(&self, tx_hash: TxHash) -> Pin<Box<dyn Future<Output = Result<DiffMode>> + '_>>;
|
||||
|
||||
/// Returns the balance of the provided [`Address`] back.
|
||||
fn balance_of(&self, address: Address) -> Pin<Box<dyn Future<Output = Result<U256>> + '_>>;
|
||||
|
||||
/// Returns the latest storage proof of the provided [`Address`]
|
||||
fn latest_state_proof(
|
||||
&self,
|
||||
address: Address,
|
||||
keys: Vec<StorageKey>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<EIP1186AccountProofResponse>> + '_>>;
|
||||
|
||||
/// Returns the resolver that is to use with this ethereum node.
|
||||
fn resolver(&self) -> Pin<Box<dyn Future<Output = Result<Arc<dyn ResolverApi + '_>>> + '_>>;
|
||||
|
||||
/// Returns the EVM version of the node.
|
||||
fn evm_version(&self) -> EVMVersion;
|
||||
}
|
||||
@@ -0,0 +1,36 @@
|
||||
[package]
|
||||
name = "revive-dt-node"
|
||||
description = "abstraction over blockchain nodes"
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
alloy = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
|
||||
revive-common = { workspace = true }
|
||||
revive-dt-common = { workspace = true }
|
||||
revive-dt-config = { workspace = true }
|
||||
revive-dt-format = { workspace = true }
|
||||
revive-dt-node-interaction = { workspace = true }
|
||||
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde_with = { workspace = true }
|
||||
serde_yaml_ng = { workspace = true }
|
||||
|
||||
sp-core = { workspace = true }
|
||||
sp-runtime = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
temp-dir = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -0,0 +1,78 @@
|
||||
use alloy::{
|
||||
network::{Network, TransactionBuilder},
|
||||
providers::{
|
||||
Provider, SendableTx,
|
||||
fillers::{GasFiller, TxFiller},
|
||||
},
|
||||
transports::TransportResult,
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct FallbackGasFiller {
|
||||
inner: GasFiller,
|
||||
default_gas_limit: u64,
|
||||
default_max_fee_per_gas: u128,
|
||||
default_priority_fee: u128,
|
||||
}
|
||||
|
||||
impl FallbackGasFiller {
|
||||
pub fn new(
|
||||
default_gas_limit: u64,
|
||||
default_max_fee_per_gas: u128,
|
||||
default_priority_fee: u128,
|
||||
) -> Self {
|
||||
Self {
|
||||
inner: GasFiller,
|
||||
default_gas_limit,
|
||||
default_max_fee_per_gas,
|
||||
default_priority_fee,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<N> TxFiller<N> for FallbackGasFiller
|
||||
where
|
||||
N: Network,
|
||||
{
|
||||
type Fillable = Option<<GasFiller as TxFiller<N>>::Fillable>;
|
||||
|
||||
fn status(
|
||||
&self,
|
||||
tx: &<N as Network>::TransactionRequest,
|
||||
) -> alloy::providers::fillers::FillerControlFlow {
|
||||
<GasFiller as TxFiller<N>>::status(&self.inner, tx)
|
||||
}
|
||||
|
||||
fn fill_sync(&self, _: &mut alloy::providers::SendableTx<N>) {}
|
||||
|
||||
async fn prepare<P: Provider<N>>(
|
||||
&self,
|
||||
provider: &P,
|
||||
tx: &<N as Network>::TransactionRequest,
|
||||
) -> TransportResult<Self::Fillable> {
|
||||
// Try to fetch GasFiller’s “fillable” (gas_price, base_fee, estimate_gas, …)
|
||||
// If it errors (i.e. tx would revert under eth_estimateGas), swallow it.
|
||||
match self.inner.prepare(provider, tx).await {
|
||||
Ok(fill) => Ok(Some(fill)),
|
||||
Err(_) => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
async fn fill(
|
||||
&self,
|
||||
fillable: Self::Fillable,
|
||||
mut tx: alloy::providers::SendableTx<N>,
|
||||
) -> TransportResult<SendableTx<N>> {
|
||||
if let Some(fill) = fillable {
|
||||
// our inner GasFiller succeeded — use it
|
||||
self.inner.fill(fill, tx).await
|
||||
} else {
|
||||
if let Some(builder) = tx.as_mut_builder() {
|
||||
builder.set_gas_limit(self.default_gas_limit);
|
||||
builder.set_max_fee_per_gas(self.default_max_fee_per_gas);
|
||||
builder.set_max_priority_fee_per_gas(self.default_priority_fee);
|
||||
}
|
||||
Ok(tx)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
/// This constant defines how much Wei accounts are pre-seeded with in genesis.
|
||||
///
|
||||
/// Note: After changing this number, check that the tests for substrate work as we encountered
|
||||
/// some issues with different values of the initial balance on substrate.
|
||||
pub const INITIAL_BALANCE: u128 = 10u128.pow(37);
|
||||
@@ -0,0 +1,781 @@
|
||||
//! The go-ethereum node implementation.
|
||||
|
||||
use std::{
|
||||
fs::{File, create_dir_all, remove_dir_all},
|
||||
io::Read,
|
||||
ops::ControlFlow,
|
||||
path::PathBuf,
|
||||
pin::Pin,
|
||||
process::{Command, Stdio},
|
||||
sync::{
|
||||
Arc,
|
||||
atomic::{AtomicU32, Ordering},
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use alloy::{
|
||||
eips::BlockNumberOrTag,
|
||||
genesis::{Genesis, GenesisAccount},
|
||||
network::{Ethereum, EthereumWallet, NetworkWallet},
|
||||
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
|
||||
providers::{
|
||||
Provider, ProviderBuilder,
|
||||
ext::DebugApi,
|
||||
fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller},
|
||||
},
|
||||
rpc::types::{
|
||||
EIP1186AccountProofResponse, TransactionRequest,
|
||||
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
|
||||
},
|
||||
};
|
||||
use anyhow::Context as _;
|
||||
use revive_common::EVMVersion;
|
||||
use tracing::{Instrument, instrument};
|
||||
|
||||
use revive_dt_common::{
|
||||
fs::clear_directory,
|
||||
futures::{PollingWaitBehavior, poll},
|
||||
};
|
||||
use revive_dt_config::*;
|
||||
use revive_dt_format::traits::ResolverApi;
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
|
||||
use crate::{
|
||||
Node,
|
||||
common::FallbackGasFiller,
|
||||
constants::INITIAL_BALANCE,
|
||||
process::{Process, ProcessReadinessWaitBehavior},
|
||||
};
|
||||
|
||||
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
|
||||
|
||||
/// The go-ethereum node instance implementation.
|
||||
///
|
||||
/// Implements helpers to initialize, spawn and wait the node.
|
||||
///
|
||||
/// Assumes dev mode and IPC only (`P2P`, `http`` etc. are kept disabled).
|
||||
///
|
||||
/// Prunes the child process and the base directory on drop.
|
||||
#[derive(Debug)]
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub struct GethNode {
|
||||
connection_string: String,
|
||||
base_directory: PathBuf,
|
||||
data_directory: PathBuf,
|
||||
logs_directory: PathBuf,
|
||||
geth: PathBuf,
|
||||
id: u32,
|
||||
handle: Option<Process>,
|
||||
start_timeout: Duration,
|
||||
wallet: Arc<EthereumWallet>,
|
||||
nonce_manager: CachedNonceManager,
|
||||
chain_id_filler: ChainIdFiller,
|
||||
}
|
||||
|
||||
impl GethNode {
|
||||
const BASE_DIRECTORY: &str = "geth";
|
||||
const DATA_DIRECTORY: &str = "data";
|
||||
const LOGS_DIRECTORY: &str = "logs";
|
||||
|
||||
const IPC_FILE: &str = "geth.ipc";
|
||||
const GENESIS_JSON_FILE: &str = "genesis.json";
|
||||
|
||||
const READY_MARKER: &str = "IPC endpoint opened";
|
||||
const ERROR_MARKER: &str = "Fatal:";
|
||||
|
||||
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
|
||||
const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet";
|
||||
|
||||
const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60);
|
||||
const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60);
|
||||
|
||||
pub fn new(
|
||||
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||
+ AsRef<WalletConfiguration>
|
||||
+ AsRef<GethConfiguration>
|
||||
+ Clone,
|
||||
) -> Self {
|
||||
let working_directory_configuration =
|
||||
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
||||
let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context);
|
||||
let geth_configuration = AsRef::<GethConfiguration>::as_ref(&context);
|
||||
|
||||
let geth_directory = working_directory_configuration
|
||||
.as_path()
|
||||
.join(Self::BASE_DIRECTORY);
|
||||
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||
let base_directory = geth_directory.join(id.to_string());
|
||||
|
||||
let wallet = wallet_configuration.wallet();
|
||||
|
||||
Self {
|
||||
connection_string: base_directory.join(Self::IPC_FILE).display().to_string(),
|
||||
data_directory: base_directory.join(Self::DATA_DIRECTORY),
|
||||
logs_directory: base_directory.join(Self::LOGS_DIRECTORY),
|
||||
base_directory,
|
||||
geth: geth_configuration.path.clone(),
|
||||
id,
|
||||
handle: None,
|
||||
start_timeout: geth_configuration.start_timeout_ms,
|
||||
wallet: wallet.clone(),
|
||||
chain_id_filler: Default::default(),
|
||||
nonce_manager: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create the node directory and call `geth init` to configure the genesis.
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
|
||||
let _ = clear_directory(&self.base_directory);
|
||||
let _ = clear_directory(&self.logs_directory);
|
||||
|
||||
create_dir_all(&self.base_directory)
|
||||
.context("Failed to create base directory for geth node")?;
|
||||
create_dir_all(&self.logs_directory)
|
||||
.context("Failed to create logs directory for geth node")?;
|
||||
|
||||
for signer_address in
|
||||
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
|
||||
{
|
||||
// Note, the use of the entry API here means that we only modify the entries for any
|
||||
// account that is not in the `alloc` field of the genesis state.
|
||||
genesis
|
||||
.alloc
|
||||
.entry(signer_address)
|
||||
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
|
||||
}
|
||||
let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE);
|
||||
serde_json::to_writer(
|
||||
File::create(&genesis_path).context("Failed to create geth genesis file")?,
|
||||
&genesis,
|
||||
)
|
||||
.context("Failed to serialize geth genesis JSON to file")?;
|
||||
|
||||
let mut child = Command::new(&self.geth)
|
||||
.arg("--state.scheme")
|
||||
.arg("hash")
|
||||
.arg("init")
|
||||
.arg("--datadir")
|
||||
.arg(&self.data_directory)
|
||||
.arg(genesis_path)
|
||||
.stderr(Stdio::piped())
|
||||
.stdout(Stdio::null())
|
||||
.spawn()
|
||||
.context("Failed to spawn geth --init process")?;
|
||||
|
||||
let mut stderr = String::new();
|
||||
child
|
||||
.stderr
|
||||
.take()
|
||||
.expect("should be piped")
|
||||
.read_to_string(&mut stderr)
|
||||
.context("Failed to read geth --init stderr")?;
|
||||
|
||||
if !child
|
||||
.wait()
|
||||
.context("Failed waiting for geth --init process to finish")?
|
||||
.success()
|
||||
{
|
||||
anyhow::bail!("failed to initialize geth node #{:?}: {stderr}", &self.id);
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Spawn the go-ethereum node child process.
|
||||
///
|
||||
/// [Instance::init] must be called prior.
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn spawn_process(&mut self) -> anyhow::Result<&mut Self> {
|
||||
let process = Process::new(
|
||||
None,
|
||||
self.logs_directory.as_path(),
|
||||
self.geth.as_path(),
|
||||
|command, stdout_file, stderr_file| {
|
||||
command
|
||||
.arg("--dev")
|
||||
.arg("--datadir")
|
||||
.arg(&self.data_directory)
|
||||
.arg("--ipcpath")
|
||||
.arg(&self.connection_string)
|
||||
.arg("--nodiscover")
|
||||
.arg("--maxpeers")
|
||||
.arg("0")
|
||||
.arg("--txlookuplimit")
|
||||
.arg("0")
|
||||
.arg("--cache.blocklogs")
|
||||
.arg("512")
|
||||
.arg("--state.scheme")
|
||||
.arg("hash")
|
||||
.arg("--syncmode")
|
||||
.arg("full")
|
||||
.arg("--gcmode")
|
||||
.arg("archive")
|
||||
.stderr(stderr_file)
|
||||
.stdout(stdout_file);
|
||||
},
|
||||
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
|
||||
max_wait_duration: self.start_timeout,
|
||||
check_function: Box::new(|_, stderr_line| match stderr_line {
|
||||
Some(line) => {
|
||||
if line.contains(Self::ERROR_MARKER) {
|
||||
anyhow::bail!("Failed to start geth {line}");
|
||||
} else if line.contains(Self::READY_MARKER) {
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
None => Ok(false),
|
||||
}),
|
||||
},
|
||||
);
|
||||
|
||||
match process {
|
||||
Ok(process) => self.handle = Some(process),
|
||||
Err(err) => {
|
||||
tracing::error!(?err, "Failed to start geth, shutting down gracefully");
|
||||
self.shutdown()
|
||||
.context("Failed to gracefully shutdown after geth start error")?;
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
async fn provider(
|
||||
&self,
|
||||
) -> anyhow::Result<FillProvider<impl TxFiller<Ethereum>, impl Provider<Ethereum>, Ethereum>>
|
||||
{
|
||||
ProviderBuilder::new()
|
||||
.disable_recommended_fillers()
|
||||
.filler(FallbackGasFiller::new(
|
||||
25_000_000,
|
||||
1_000_000_000,
|
||||
1_000_000_000,
|
||||
))
|
||||
.filler(self.chain_id_filler.clone())
|
||||
.filler(NonceFiller::new(self.nonce_manager.clone()))
|
||||
.wallet(self.wallet.clone())
|
||||
.connect(&self.connection_string)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl EthereumNode for GethNode {
|
||||
fn id(&self) -> usize {
|
||||
self.id as _
|
||||
}
|
||||
|
||||
fn connection_string(&self) -> &str {
|
||||
&self.connection_string
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
level = "info",
|
||||
skip_all,
|
||||
fields(geth_node_id = self.id, connection_string = self.connection_string),
|
||||
err,
|
||||
)]
|
||||
fn execute_transaction(
|
||||
&self,
|
||||
transaction: TransactionRequest,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::rpc::types::TransactionReceipt>> + '_>>
|
||||
{
|
||||
Box::pin(async move {
|
||||
let provider = self
|
||||
.provider()
|
||||
.await
|
||||
.context("Failed to create provider for transaction submission")?;
|
||||
|
||||
let pending_transaction = provider
|
||||
.send_transaction(transaction)
|
||||
.await
|
||||
.inspect_err(
|
||||
|err| tracing::error!(%err, "Encountered an error when submitting the transaction"),
|
||||
)
|
||||
.context("Failed to submit transaction to geth node")?;
|
||||
let transaction_hash = *pending_transaction.tx_hash();
|
||||
|
||||
// The following is a fix for the "transaction indexing is in progress" error that we used
|
||||
// to get. You can find more information on this in the following GH issue in geth
|
||||
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
|
||||
// before we can get the receipt of the transaction it needs to have been indexed by the
|
||||
// node's indexer. Just because the transaction has been confirmed it doesn't mean that it
|
||||
// has been indexed. When we call alloy's `get_receipt` it checks if the transaction was
|
||||
// confirmed. If it has been, then it will call `eth_getTransactionReceipt` method which
|
||||
// _might_ return the above error if the tx has not yet been indexed yet. So, we need to
|
||||
// implement a retry mechanism for the receipt to keep retrying to get it until it
|
||||
// eventually works, but we only do that if the error we get back is the "transaction
|
||||
// indexing is in progress" error or if the receipt is None.
|
||||
//
|
||||
// Getting the transaction indexed and taking a receipt can take a long time especially when
|
||||
// a lot of transactions are being submitted to the node. Thus, while initially we only
|
||||
// allowed for 60 seconds of waiting with a 1 second delay in polling, we need to allow for
|
||||
// a larger wait time. Therefore, in here we allow for 5 minutes of waiting with exponential
|
||||
// backoff each time we attempt to get the receipt and find that it's not available.
|
||||
let provider = Arc::new(provider);
|
||||
poll(
|
||||
Self::RECEIPT_POLLING_DURATION,
|
||||
PollingWaitBehavior::Constant(Duration::from_millis(200)),
|
||||
move || {
|
||||
let provider = provider.clone();
|
||||
async move {
|
||||
match provider.get_transaction_receipt(transaction_hash).await {
|
||||
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
|
||||
Ok(None) => Ok(ControlFlow::Continue(())),
|
||||
Err(error) => {
|
||||
let error_string = error.to_string();
|
||||
match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) {
|
||||
true => Ok(ControlFlow::Continue(())),
|
||||
false => Err(error.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
.instrument(tracing::info_span!(
|
||||
"Awaiting transaction receipt",
|
||||
?transaction_hash
|
||||
))
|
||||
.await
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn trace_transaction(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
trace_options: GethDebugTracingOptions,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::rpc::types::trace::geth::GethTrace>> + '_>>
|
||||
{
|
||||
Box::pin(async move {
|
||||
let provider = Arc::new(
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to create provider for tracing")?,
|
||||
);
|
||||
poll(
|
||||
Self::TRACE_POLLING_DURATION,
|
||||
PollingWaitBehavior::Constant(Duration::from_millis(200)),
|
||||
move || {
|
||||
let provider = provider.clone();
|
||||
let trace_options = trace_options.clone();
|
||||
async move {
|
||||
match provider
|
||||
.debug_trace_transaction(tx_hash, trace_options)
|
||||
.await
|
||||
{
|
||||
Ok(trace) => Ok(ControlFlow::Break(trace)),
|
||||
Err(error) => {
|
||||
let error_string = error.to_string();
|
||||
match error_string.contains(Self::TRANSACTION_TRACING_ERROR) {
|
||||
true => Ok(ControlFlow::Continue(())),
|
||||
false => Err(error.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
.await
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn state_diff(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<DiffMode>> + '_>> {
|
||||
Box::pin(async move {
|
||||
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
|
||||
diff_mode: Some(true),
|
||||
disable_code: None,
|
||||
disable_storage: None,
|
||||
});
|
||||
match self
|
||||
.trace_transaction(tx_hash, trace_options)
|
||||
.await
|
||||
.context("Failed to trace transaction for prestate diff")?
|
||||
.try_into_pre_state_frame()
|
||||
.context("Failed to convert trace into pre-state frame")?
|
||||
{
|
||||
PreStateFrame::Diff(diff) => Ok(diff),
|
||||
_ => anyhow::bail!("expected a diff mode trace"),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn balance_of(
|
||||
&self,
|
||||
address: Address,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to get the Geth provider")?
|
||||
.get_balance(address)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn latest_state_proof(
|
||||
&self,
|
||||
address: Address,
|
||||
keys: Vec<StorageKey>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<EIP1186AccountProofResponse>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to get the Geth provider")?
|
||||
.get_proof(address, keys)
|
||||
.latest()
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
})
|
||||
}
|
||||
|
||||
// #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn resolver(
|
||||
&self,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Arc<dyn ResolverApi + '_>>> + '_>> {
|
||||
Box::pin(async move {
|
||||
let id = self.id;
|
||||
let provider = self.provider().await?;
|
||||
Ok(Arc::new(GethNodeResolver { id, provider }) as Arc<dyn ResolverApi>)
|
||||
})
|
||||
}
|
||||
|
||||
fn evm_version(&self) -> EVMVersion {
|
||||
EVMVersion::Cancun
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GethNodeResolver<F: TxFiller<Ethereum>, P: Provider<Ethereum>> {
|
||||
id: u32,
|
||||
provider: FillProvider<F, P, Ethereum>,
|
||||
}
|
||||
|
||||
impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi for GethNodeResolver<F, P> {
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn chain_id(
|
||||
&self,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::primitives::ChainId>> + '_>> {
|
||||
Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) })
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn transaction_gas_price(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<u128>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_transaction_receipt(tx_hash)
|
||||
.await?
|
||||
.context("Failed to get the transaction receipt")
|
||||
.map(|receipt| receipt.effective_gas_price)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn block_gas_limit(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<u128>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the geth block")?
|
||||
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||
.map(|block| block.header.gas_limit as _)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn block_coinbase(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Address>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the geth block")?
|
||||
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||
.map(|block| block.header.beneficiary)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn block_difficulty(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the geth block")?
|
||||
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||
.map(|block| U256::from_be_bytes(block.header.mix_hash.0))
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn block_base_fee(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<u64>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the geth block")?
|
||||
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||
.and_then(|block| {
|
||||
block
|
||||
.header
|
||||
.base_fee_per_gas
|
||||
.context("Failed to get the base fee per gas")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn block_hash(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockHash>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the geth block")?
|
||||
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||
.map(|block| block.header.hash)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn block_timestamp(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockTimestamp>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the geth block")?
|
||||
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||
.map(|block| block.header.timestamp)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn last_block_number(&self) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockNumber>> + '_>> {
|
||||
Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) })
|
||||
}
|
||||
}
|
||||
|
||||
impl Node for GethNode {
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn shutdown(&mut self) -> anyhow::Result<()> {
|
||||
drop(self.handle.take());
|
||||
|
||||
// Remove the node's database so that subsequent runs do not run on the same database. We
|
||||
// ignore the error just in case the directory didn't exist in the first place and therefore
|
||||
// there's nothing to be deleted.
|
||||
let _ = remove_dir_all(self.base_directory.join(Self::DATA_DIRECTORY));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
|
||||
self.init(genesis)?.spawn_process()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn version(&self) -> anyhow::Result<String> {
|
||||
let output = Command::new(&self.geth)
|
||||
.arg("--version")
|
||||
.stdin(Stdio::null())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::null())
|
||||
.spawn()
|
||||
.context("Failed to spawn geth --version process")?
|
||||
.wait_with_output()
|
||||
.context("Failed to wait for geth --version output")?
|
||||
.stdout;
|
||||
Ok(String::from_utf8_lossy(&output).into())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for GethNode {
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn drop(&mut self) {
|
||||
self.shutdown().expect("Failed to shutdown")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use super::*;
|
||||
|
||||
fn test_config() -> TestExecutionContext {
|
||||
TestExecutionContext::default()
|
||||
}
|
||||
|
||||
fn new_node() -> (TestExecutionContext, GethNode) {
|
||||
let context = test_config();
|
||||
let mut node = GethNode::new(&context);
|
||||
node.init(context.genesis_configuration.genesis().unwrap().clone())
|
||||
.expect("Failed to initialize the node")
|
||||
.spawn_process()
|
||||
.expect("Failed to spawn the node process");
|
||||
(context, node)
|
||||
}
|
||||
|
||||
fn shared_node() -> &'static GethNode {
|
||||
static NODE: LazyLock<(TestExecutionContext, GethNode)> = LazyLock::new(new_node);
|
||||
&NODE.1
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn version_works() {
|
||||
// Arrange
|
||||
let node = shared_node();
|
||||
|
||||
// Act
|
||||
let version = node.version();
|
||||
|
||||
// Assert
|
||||
let version = version.expect("Failed to get the version");
|
||||
assert!(
|
||||
version.starts_with("geth version"),
|
||||
"expected version string, got: '{version}'"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_get_chain_id_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node();
|
||||
|
||||
// Act
|
||||
let chain_id = node.resolver().await.unwrap().chain_id().await;
|
||||
|
||||
// Assert
|
||||
let chain_id = chain_id.expect("Failed to get the chain id");
|
||||
assert_eq!(chain_id, 420_420_420);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_get_gas_limit_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node();
|
||||
|
||||
// Act
|
||||
let gas_limit = node
|
||||
.resolver()
|
||||
.await
|
||||
.unwrap()
|
||||
.block_gas_limit(BlockNumberOrTag::Latest)
|
||||
.await;
|
||||
|
||||
// Assert
|
||||
let _ = gas_limit.expect("Failed to get the gas limit");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_get_coinbase_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node();
|
||||
|
||||
// Act
|
||||
let coinbase = node
|
||||
.resolver()
|
||||
.await
|
||||
.unwrap()
|
||||
.block_coinbase(BlockNumberOrTag::Latest)
|
||||
.await;
|
||||
|
||||
// Assert
|
||||
let _ = coinbase.expect("Failed to get the coinbase");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_get_block_difficulty_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node();
|
||||
|
||||
// Act
|
||||
let block_difficulty = node
|
||||
.resolver()
|
||||
.await
|
||||
.unwrap()
|
||||
.block_difficulty(BlockNumberOrTag::Latest)
|
||||
.await;
|
||||
|
||||
// Assert
|
||||
let _ = block_difficulty.expect("Failed to get the block difficulty");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_get_block_hash_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node();
|
||||
|
||||
// Act
|
||||
let block_hash = node
|
||||
.resolver()
|
||||
.await
|
||||
.unwrap()
|
||||
.block_hash(BlockNumberOrTag::Latest)
|
||||
.await;
|
||||
|
||||
// Assert
|
||||
let _ = block_hash.expect("Failed to get the block hash");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_get_block_timestamp_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node();
|
||||
|
||||
// Act
|
||||
let block_timestamp = node
|
||||
.resolver()
|
||||
.await
|
||||
.unwrap()
|
||||
.block_timestamp(BlockNumberOrTag::Latest)
|
||||
.await;
|
||||
|
||||
// Assert
|
||||
let _ = block_timestamp.expect("Failed to get the block timestamp");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_get_block_number_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node();
|
||||
|
||||
// Act
|
||||
let block_number = node.resolver().await.unwrap().last_block_number().await;
|
||||
|
||||
// Assert
|
||||
let _ = block_number.expect("Failed to get the block number");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
//! This crate implements the testing nodes.
|
||||
|
||||
use alloy::genesis::Genesis;
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
|
||||
pub mod common;
|
||||
pub mod constants;
|
||||
pub mod geth;
|
||||
pub mod lighthouse_geth;
|
||||
pub mod process;
|
||||
pub mod substrate;
|
||||
|
||||
/// An abstract interface for testing nodes.
|
||||
pub trait Node: EthereumNode {
|
||||
/// Spawns a node configured according to the genesis json.
|
||||
///
|
||||
/// Blocking until it's ready to accept transactions.
|
||||
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()>;
|
||||
|
||||
/// Prune the node instance and related data.
|
||||
///
|
||||
/// Blocking until it's completely stopped.
|
||||
fn shutdown(&mut self) -> anyhow::Result<()>;
|
||||
|
||||
/// Returns the node version.
|
||||
fn version(&self) -> anyhow::Result<String>;
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,188 @@
|
||||
use std::{
|
||||
fs::{File, OpenOptions},
|
||||
io::{BufRead, BufReader, Write},
|
||||
path::Path,
|
||||
process::{Child, Command},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use anyhow::{Context, Result, bail};
|
||||
|
||||
/// A wrapper around processes which allows for their stdout and stderr to be logged and flushed
|
||||
/// when the process is dropped.
|
||||
#[derive(Debug)]
|
||||
pub struct Process {
|
||||
/// The handle of the child process.
|
||||
child: Child,
|
||||
|
||||
/// The file that stdout is being logged to.
|
||||
stdout_logs_file: File,
|
||||
|
||||
/// The file that stderr is being logged to.
|
||||
stderr_logs_file: File,
|
||||
}
|
||||
|
||||
impl Process {
|
||||
pub fn new(
|
||||
log_file_prefix: impl Into<Option<&'static str>>,
|
||||
logs_directory: impl AsRef<Path>,
|
||||
binary_path: impl AsRef<Path>,
|
||||
command_building_callback: impl FnOnce(&mut Command, File, File),
|
||||
process_readiness_wait_behavior: ProcessReadinessWaitBehavior,
|
||||
) -> Result<Self> {
|
||||
let log_file_prefix = log_file_prefix.into();
|
||||
|
||||
let (stdout_file_name, stderr_file_name) = match log_file_prefix {
|
||||
Some(prefix) => (
|
||||
format!("{prefix}_stdout.log"),
|
||||
format!("{prefix}_stderr.log"),
|
||||
),
|
||||
None => ("stdout.log".to_string(), "stderr.log".to_string()),
|
||||
};
|
||||
|
||||
let stdout_logs_file_path = logs_directory.as_ref().join(stdout_file_name);
|
||||
let stderr_logs_file_path = logs_directory.as_ref().join(stderr_file_name);
|
||||
|
||||
let stdout_logs_file = OpenOptions::new()
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.create(true)
|
||||
.open(stdout_logs_file_path.as_path())
|
||||
.context("Failed to open the stdout logs file")?;
|
||||
let stderr_logs_file = OpenOptions::new()
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.create(true)
|
||||
.open(stderr_logs_file_path.as_path())
|
||||
.context("Failed to open the stderr logs file")?;
|
||||
|
||||
let mut command = {
|
||||
let stdout_logs_file = stdout_logs_file
|
||||
.try_clone()
|
||||
.context("Failed to clone the stdout logs file")?;
|
||||
let stderr_logs_file = stderr_logs_file
|
||||
.try_clone()
|
||||
.context("Failed to clone the stderr logs file")?;
|
||||
|
||||
let mut command = Command::new(binary_path.as_ref());
|
||||
command_building_callback(&mut command, stdout_logs_file, stderr_logs_file);
|
||||
command
|
||||
};
|
||||
let mut child = command
|
||||
.spawn()
|
||||
.context("Failed to spawn the built command")?;
|
||||
|
||||
match process_readiness_wait_behavior {
|
||||
ProcessReadinessWaitBehavior::NoStartupWait => {}
|
||||
ProcessReadinessWaitBehavior::WaitDuration(duration) => std::thread::sleep(duration),
|
||||
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
|
||||
max_wait_duration,
|
||||
mut check_function,
|
||||
} => {
|
||||
let spawn_time = Instant::now();
|
||||
|
||||
let stdout_logs_file = OpenOptions::new()
|
||||
.read(true)
|
||||
.open(stdout_logs_file_path)
|
||||
.context("Failed to open the stdout logs file")?;
|
||||
let stderr_logs_file = OpenOptions::new()
|
||||
.read(true)
|
||||
.open(stderr_logs_file_path)
|
||||
.context("Failed to open the stderr logs file")?;
|
||||
|
||||
let mut stdout_lines = BufReader::new(stdout_logs_file).lines();
|
||||
let mut stderr_lines = BufReader::new(stderr_logs_file).lines();
|
||||
|
||||
let mut stdout = String::new();
|
||||
let mut stderr = String::new();
|
||||
|
||||
loop {
|
||||
let stdout_line = stdout_lines.next().and_then(Result::ok);
|
||||
let stderr_line = stderr_lines.next().and_then(Result::ok);
|
||||
|
||||
if let Some(stdout_line) = stdout_line.as_ref() {
|
||||
stdout.push_str(stdout_line);
|
||||
stdout.push('\n');
|
||||
}
|
||||
if let Some(stderr_line) = stderr_line.as_ref() {
|
||||
stderr.push_str(stderr_line);
|
||||
stderr.push('\n');
|
||||
}
|
||||
|
||||
let check_result =
|
||||
check_function(stdout_line.as_deref(), stderr_line.as_deref())
|
||||
.context("Failed to wait for the process to be ready")?;
|
||||
|
||||
if check_result {
|
||||
break;
|
||||
}
|
||||
|
||||
if Instant::now().duration_since(spawn_time) > max_wait_duration {
|
||||
bail!(
|
||||
"Waited for the process to start but it failed to start in time. stderr {stderr} - stdout {stdout}"
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
ProcessReadinessWaitBehavior::WaitForCommandToExit => {
|
||||
if !child
|
||||
.wait()
|
||||
.context("Failed waiting for kurtosis run process to finish")?
|
||||
.success()
|
||||
{
|
||||
anyhow::bail!("Failed to initialize kurtosis network",);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
child,
|
||||
stdout_logs_file,
|
||||
stderr_logs_file,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Process {
|
||||
fn drop(&mut self) {
|
||||
self.child.kill().expect("Failed to kill the process");
|
||||
self.stdout_logs_file
|
||||
.flush()
|
||||
.expect("Failed to flush the stdout logs file");
|
||||
self.stderr_logs_file
|
||||
.flush()
|
||||
.expect("Failed to flush the stderr logs file");
|
||||
}
|
||||
}
|
||||
|
||||
pub enum ProcessReadinessWaitBehavior {
|
||||
/// The process does not require any kind of wait after it's been spawned and can be used
|
||||
/// straight away.
|
||||
NoStartupWait,
|
||||
|
||||
/// Waits for the command to exit.
|
||||
WaitForCommandToExit,
|
||||
|
||||
/// The process does require some amount of wait duration after it's been started.
|
||||
WaitDuration(Duration),
|
||||
|
||||
/// The process requires a time bounded wait function which is a function of the lines that
|
||||
/// appear in the log files.
|
||||
TimeBoundedWaitFunction {
|
||||
/// The maximum amount of time to wait for the check function to return true.
|
||||
max_wait_duration: Duration,
|
||||
|
||||
/// The function to use to check if the process spawned is ready to use or not. This
|
||||
/// function should return the following in the following cases:
|
||||
///
|
||||
/// - `Ok(true)`: Returned when the condition the process is waiting for has been fulfilled
|
||||
/// and the wait is completed.
|
||||
/// - `Ok(false)`: The process is not ready yet but it might be ready in the future.
|
||||
/// - `Err`: The process is not ready yet and will not be ready in the future as it appears
|
||||
/// that it has encountered an error when it was being spawned.
|
||||
///
|
||||
/// The first argument is a line from stdout and the second argument is a line from stderr.
|
||||
#[allow(clippy::type_complexity)]
|
||||
check_function: Box<dyn FnMut(Option<&str>, Option<&str>) -> anyhow::Result<bool>>,
|
||||
},
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,28 @@
|
||||
[package]
|
||||
name = "revive-dt-report"
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
revive-dt-common = { workspace = true }
|
||||
revive-dt-config = { workspace = true }
|
||||
revive-dt-format = { workspace = true }
|
||||
revive-dt-compiler = { workspace = true }
|
||||
|
||||
alloy-primitives = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
paste = { workspace = true }
|
||||
indexmap = { workspace = true, features = ["serde"] }
|
||||
semver = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde_with = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -0,0 +1,538 @@
|
||||
//! Implementation of the report aggregator task which consumes the events sent by the various
|
||||
//! reporters and combines them into a single unified report.
|
||||
|
||||
use std::{
|
||||
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
|
||||
fs::OpenOptions,
|
||||
path::PathBuf,
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
|
||||
use alloy_primitives::Address;
|
||||
use anyhow::{Context as _, Result};
|
||||
use indexmap::IndexMap;
|
||||
use revive_dt_common::types::PlatformIdentifier;
|
||||
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
|
||||
use revive_dt_config::Context;
|
||||
use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance};
|
||||
use semver::Version;
|
||||
use serde::Serialize;
|
||||
use serde_with::{DisplayFromStr, serde_as};
|
||||
use tokio::sync::{
|
||||
broadcast::{Sender, channel},
|
||||
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
|
||||
};
|
||||
use tracing::debug;
|
||||
|
||||
use crate::*;
|
||||
|
||||
pub struct ReportAggregator {
|
||||
/* Internal Report State */
|
||||
report: Report,
|
||||
remaining_cases: HashMap<MetadataFilePath, HashMap<Mode, HashSet<CaseIdx>>>,
|
||||
/* Channels */
|
||||
runner_tx: Option<UnboundedSender<RunnerEvent>>,
|
||||
runner_rx: UnboundedReceiver<RunnerEvent>,
|
||||
listener_tx: Sender<ReporterEvent>,
|
||||
}
|
||||
|
||||
impl ReportAggregator {
|
||||
pub fn new(context: Context) -> Self {
|
||||
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
|
||||
let (listener_tx, _) = channel::<ReporterEvent>(1024);
|
||||
Self {
|
||||
report: Report::new(context),
|
||||
remaining_cases: Default::default(),
|
||||
runner_tx: Some(runner_tx),
|
||||
runner_rx,
|
||||
listener_tx,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<()>>) {
|
||||
let reporter = self
|
||||
.runner_tx
|
||||
.take()
|
||||
.map(Into::into)
|
||||
.expect("Can't fail since this can only be called once");
|
||||
(reporter, async move { self.aggregate().await })
|
||||
}
|
||||
|
||||
async fn aggregate(mut self) -> Result<()> {
|
||||
debug!("Starting to aggregate report");
|
||||
|
||||
while let Some(event) = self.runner_rx.recv().await {
|
||||
debug!(?event, "Received Event");
|
||||
match event {
|
||||
RunnerEvent::SubscribeToEvents(event) => {
|
||||
self.handle_subscribe_to_events_event(*event);
|
||||
}
|
||||
RunnerEvent::CorpusFileDiscovery(event) => {
|
||||
self.handle_corpus_file_discovered_event(*event)
|
||||
}
|
||||
RunnerEvent::MetadataFileDiscovery(event) => {
|
||||
self.handle_metadata_file_discovery_event(*event);
|
||||
}
|
||||
RunnerEvent::TestCaseDiscovery(event) => {
|
||||
self.handle_test_case_discovery(*event);
|
||||
}
|
||||
RunnerEvent::TestSucceeded(event) => {
|
||||
self.handle_test_succeeded_event(*event);
|
||||
}
|
||||
RunnerEvent::TestFailed(event) => {
|
||||
self.handle_test_failed_event(*event);
|
||||
}
|
||||
RunnerEvent::TestIgnored(event) => {
|
||||
self.handle_test_ignored_event(*event);
|
||||
}
|
||||
RunnerEvent::NodeAssigned(event) => {
|
||||
self.handle_node_assigned_event(*event);
|
||||
}
|
||||
RunnerEvent::PreLinkContractsCompilationSucceeded(event) => {
|
||||
self.handle_pre_link_contracts_compilation_succeeded_event(*event)
|
||||
}
|
||||
RunnerEvent::PostLinkContractsCompilationSucceeded(event) => {
|
||||
self.handle_post_link_contracts_compilation_succeeded_event(*event)
|
||||
}
|
||||
RunnerEvent::PreLinkContractsCompilationFailed(event) => {
|
||||
self.handle_pre_link_contracts_compilation_failed_event(*event)
|
||||
}
|
||||
RunnerEvent::PostLinkContractsCompilationFailed(event) => {
|
||||
self.handle_post_link_contracts_compilation_failed_event(*event)
|
||||
}
|
||||
RunnerEvent::LibrariesDeployed(event) => {
|
||||
self.handle_libraries_deployed_event(*event);
|
||||
}
|
||||
RunnerEvent::ContractDeployed(event) => {
|
||||
self.handle_contract_deployed_event(*event);
|
||||
}
|
||||
}
|
||||
}
|
||||
debug!("Report aggregation completed");
|
||||
|
||||
let file_name = {
|
||||
let current_timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.context("System clock is before UNIX_EPOCH; cannot compute report timestamp")?
|
||||
.as_secs();
|
||||
let mut file_name = current_timestamp.to_string();
|
||||
file_name.push_str(".json");
|
||||
file_name
|
||||
};
|
||||
let file_path = self
|
||||
.report
|
||||
.context
|
||||
.working_directory_configuration()
|
||||
.as_path()
|
||||
.join(file_name);
|
||||
let file = OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.read(false)
|
||||
.open(&file_path)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to open report file for writing: {}",
|
||||
file_path.display()
|
||||
)
|
||||
})?;
|
||||
serde_json::to_writer_pretty(&file, &self.report).with_context(|| {
|
||||
format!("Failed to serialize report JSON to {}", file_path.display())
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) {
|
||||
let _ = event.tx.send(self.listener_tx.subscribe());
|
||||
}
|
||||
|
||||
fn handle_corpus_file_discovered_event(&mut self, event: CorpusFileDiscoveryEvent) {
|
||||
self.report.corpora.push(event.corpus);
|
||||
}
|
||||
|
||||
fn handle_metadata_file_discovery_event(&mut self, event: MetadataFileDiscoveryEvent) {
|
||||
self.report.metadata_files.insert(event.path.clone());
|
||||
}
|
||||
|
||||
fn handle_test_case_discovery(&mut self, event: TestCaseDiscoveryEvent) {
|
||||
self.remaining_cases
|
||||
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(event.test_specifier.solc_mode.clone())
|
||||
.or_default()
|
||||
.insert(event.test_specifier.case_idx);
|
||||
}
|
||||
|
||||
fn handle_test_succeeded_event(&mut self, event: TestSucceededEvent) {
|
||||
// Remove this from the set of cases we're tracking since it has completed.
|
||||
self.remaining_cases
|
||||
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(event.test_specifier.solc_mode.clone())
|
||||
.or_default()
|
||||
.remove(&event.test_specifier.case_idx);
|
||||
|
||||
// Add information on the fact that the case was ignored to the report.
|
||||
let test_case_report = self.test_case_report(&event.test_specifier);
|
||||
test_case_report.status = Some(TestCaseStatus::Succeeded {
|
||||
steps_executed: event.steps_executed,
|
||||
});
|
||||
self.handle_post_test_case_status_update(&event.test_specifier);
|
||||
}
|
||||
|
||||
fn handle_test_failed_event(&mut self, event: TestFailedEvent) {
|
||||
// Remove this from the set of cases we're tracking since it has completed.
|
||||
self.remaining_cases
|
||||
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(event.test_specifier.solc_mode.clone())
|
||||
.or_default()
|
||||
.remove(&event.test_specifier.case_idx);
|
||||
|
||||
// Add information on the fact that the case was ignored to the report.
|
||||
let test_case_report = self.test_case_report(&event.test_specifier);
|
||||
test_case_report.status = Some(TestCaseStatus::Failed {
|
||||
reason: event.reason,
|
||||
});
|
||||
self.handle_post_test_case_status_update(&event.test_specifier);
|
||||
}
|
||||
|
||||
fn handle_test_ignored_event(&mut self, event: TestIgnoredEvent) {
|
||||
// Remove this from the set of cases we're tracking since it has completed.
|
||||
self.remaining_cases
|
||||
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(event.test_specifier.solc_mode.clone())
|
||||
.or_default()
|
||||
.remove(&event.test_specifier.case_idx);
|
||||
|
||||
// Add information on the fact that the case was ignored to the report.
|
||||
let test_case_report = self.test_case_report(&event.test_specifier);
|
||||
test_case_report.status = Some(TestCaseStatus::Ignored {
|
||||
reason: event.reason,
|
||||
additional_fields: event.additional_fields,
|
||||
});
|
||||
self.handle_post_test_case_status_update(&event.test_specifier);
|
||||
}
|
||||
|
||||
fn handle_post_test_case_status_update(&mut self, specifier: &TestSpecifier) {
|
||||
let remaining_cases = self
|
||||
.remaining_cases
|
||||
.entry(specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(specifier.solc_mode.clone())
|
||||
.or_default();
|
||||
if !remaining_cases.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let case_status = self
|
||||
.report
|
||||
.test_case_information
|
||||
.entry(specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(specifier.solc_mode.clone())
|
||||
.or_default()
|
||||
.iter()
|
||||
.map(|(case_idx, case_report)| {
|
||||
(
|
||||
*case_idx,
|
||||
case_report.status.clone().expect("Can't be uninitialized"),
|
||||
)
|
||||
})
|
||||
.collect::<BTreeMap<_, _>>();
|
||||
let event = ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
||||
metadata_file_path: specifier.metadata_file_path.clone().into(),
|
||||
mode: specifier.solc_mode.clone(),
|
||||
case_status,
|
||||
};
|
||||
|
||||
// According to the documentation on send, the sending fails if there are no more receiver
|
||||
// handles. Therefore, this isn't an error that we want to bubble up or anything. If we fail
|
||||
// to send then we ignore the error.
|
||||
let _ = self.listener_tx.send(event);
|
||||
}
|
||||
|
||||
fn handle_node_assigned_event(&mut self, event: NodeAssignedEvent) {
|
||||
let execution_information = self.execution_information(&ExecutionSpecifier {
|
||||
test_specifier: event.test_specifier,
|
||||
node_id: event.id,
|
||||
platform_identifier: event.platform_identifier,
|
||||
});
|
||||
execution_information.node = Some(TestCaseNodeInformation {
|
||||
id: event.id,
|
||||
platform_identifier: event.platform_identifier,
|
||||
connection_string: event.connection_string,
|
||||
});
|
||||
}
|
||||
|
||||
fn handle_pre_link_contracts_compilation_succeeded_event(
|
||||
&mut self,
|
||||
event: PreLinkContractsCompilationSucceededEvent,
|
||||
) {
|
||||
let include_input = self
|
||||
.report
|
||||
.context
|
||||
.report_configuration()
|
||||
.include_compiler_input;
|
||||
let include_output = self
|
||||
.report
|
||||
.context
|
||||
.report_configuration()
|
||||
.include_compiler_output;
|
||||
|
||||
let execution_information = self.execution_information(&event.execution_specifier);
|
||||
|
||||
let compiler_input = if include_input {
|
||||
event.compiler_input
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let compiler_output = if include_output {
|
||||
Some(event.compiler_output)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
execution_information.pre_link_compilation_status = Some(CompilationStatus::Success {
|
||||
is_cached: event.is_cached,
|
||||
compiler_version: event.compiler_version,
|
||||
compiler_path: event.compiler_path,
|
||||
compiler_input,
|
||||
compiler_output,
|
||||
});
|
||||
}
|
||||
|
||||
fn handle_post_link_contracts_compilation_succeeded_event(
|
||||
&mut self,
|
||||
event: PostLinkContractsCompilationSucceededEvent,
|
||||
) {
|
||||
let include_input = self
|
||||
.report
|
||||
.context
|
||||
.report_configuration()
|
||||
.include_compiler_input;
|
||||
let include_output = self
|
||||
.report
|
||||
.context
|
||||
.report_configuration()
|
||||
.include_compiler_output;
|
||||
|
||||
let execution_information = self.execution_information(&event.execution_specifier);
|
||||
|
||||
let compiler_input = if include_input {
|
||||
event.compiler_input
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let compiler_output = if include_output {
|
||||
Some(event.compiler_output)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
execution_information.post_link_compilation_status = Some(CompilationStatus::Success {
|
||||
is_cached: event.is_cached,
|
||||
compiler_version: event.compiler_version,
|
||||
compiler_path: event.compiler_path,
|
||||
compiler_input,
|
||||
compiler_output,
|
||||
});
|
||||
}
|
||||
|
||||
fn handle_pre_link_contracts_compilation_failed_event(
|
||||
&mut self,
|
||||
event: PreLinkContractsCompilationFailedEvent,
|
||||
) {
|
||||
let execution_information = self.execution_information(&event.execution_specifier);
|
||||
|
||||
execution_information.pre_link_compilation_status = Some(CompilationStatus::Failure {
|
||||
reason: event.reason,
|
||||
compiler_version: event.compiler_version,
|
||||
compiler_path: event.compiler_path,
|
||||
compiler_input: event.compiler_input,
|
||||
});
|
||||
}
|
||||
|
||||
fn handle_post_link_contracts_compilation_failed_event(
|
||||
&mut self,
|
||||
event: PostLinkContractsCompilationFailedEvent,
|
||||
) {
|
||||
let execution_information = self.execution_information(&event.execution_specifier);
|
||||
|
||||
execution_information.post_link_compilation_status = Some(CompilationStatus::Failure {
|
||||
reason: event.reason,
|
||||
compiler_version: event.compiler_version,
|
||||
compiler_path: event.compiler_path,
|
||||
compiler_input: event.compiler_input,
|
||||
});
|
||||
}
|
||||
|
||||
fn handle_libraries_deployed_event(&mut self, event: LibrariesDeployedEvent) {
|
||||
self.execution_information(&event.execution_specifier)
|
||||
.deployed_libraries = Some(event.libraries);
|
||||
}
|
||||
|
||||
fn handle_contract_deployed_event(&mut self, event: ContractDeployedEvent) {
|
||||
self.execution_information(&event.execution_specifier)
|
||||
.deployed_contracts
|
||||
.get_or_insert_default()
|
||||
.insert(event.contract_instance, event.address);
|
||||
}
|
||||
|
||||
fn test_case_report(&mut self, specifier: &TestSpecifier) -> &mut TestCaseReport {
|
||||
self.report
|
||||
.test_case_information
|
||||
.entry(specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(specifier.solc_mode.clone())
|
||||
.or_default()
|
||||
.entry(specifier.case_idx)
|
||||
.or_default()
|
||||
}
|
||||
|
||||
fn execution_information(
|
||||
&mut self,
|
||||
specifier: &ExecutionSpecifier,
|
||||
) -> &mut ExecutionInformation {
|
||||
let test_case_report = self.test_case_report(&specifier.test_specifier);
|
||||
test_case_report
|
||||
.platform_execution
|
||||
.entry(specifier.platform_identifier)
|
||||
.or_default()
|
||||
.get_or_insert_default()
|
||||
}
|
||||
}
|
||||
|
||||
#[serde_as]
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct Report {
|
||||
/// The context that the tool was started up with.
|
||||
pub context: Context,
|
||||
/// The list of corpus files that the tool found.
|
||||
pub corpora: Vec<Corpus>,
|
||||
/// The list of metadata files that were found by the tool.
|
||||
pub metadata_files: BTreeSet<MetadataFilePath>,
|
||||
/// Information relating to each test case.
|
||||
#[serde_as(as = "BTreeMap<_, HashMap<DisplayFromStr, BTreeMap<DisplayFromStr, _>>>")]
|
||||
pub test_case_information:
|
||||
BTreeMap<MetadataFilePath, HashMap<Mode, BTreeMap<CaseIdx, TestCaseReport>>>,
|
||||
}
|
||||
|
||||
impl Report {
|
||||
pub fn new(context: Context) -> Self {
|
||||
Self {
|
||||
context,
|
||||
corpora: Default::default(),
|
||||
metadata_files: Default::default(),
|
||||
test_case_information: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Default)]
|
||||
pub struct TestCaseReport {
|
||||
/// Information on the status of the test case and whether it succeeded, failed, or was ignored.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub status: Option<TestCaseStatus>,
|
||||
/// Information related to the execution on one of the platforms.
|
||||
pub platform_execution: BTreeMap<PlatformIdentifier, Option<ExecutionInformation>>,
|
||||
}
|
||||
|
||||
/// Information related to the status of the test. Could be that the test succeeded, failed, or that
|
||||
/// it was ignored.
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(tag = "status")]
|
||||
pub enum TestCaseStatus {
|
||||
/// The test case succeeded.
|
||||
Succeeded {
|
||||
/// The number of steps of the case that were executed.
|
||||
steps_executed: usize,
|
||||
},
|
||||
/// The test case failed.
|
||||
Failed {
|
||||
/// The reason for the failure of the test case.
|
||||
reason: String,
|
||||
},
|
||||
/// The test case was ignored. This variant carries information related to why it was ignored.
|
||||
Ignored {
|
||||
/// The reason behind the test case being ignored.
|
||||
reason: String,
|
||||
/// Additional fields that describe more information on why the test case is ignored.
|
||||
#[serde(flatten)]
|
||||
additional_fields: IndexMap<String, serde_json::Value>,
|
||||
},
|
||||
}
|
||||
|
||||
/// Information related to the platform node that's being used to execute the step.
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct TestCaseNodeInformation {
|
||||
/// The ID of the node that this case is being executed on.
|
||||
pub id: usize,
|
||||
/// The platform of the node.
|
||||
pub platform_identifier: PlatformIdentifier,
|
||||
/// The connection string of the node.
|
||||
pub connection_string: String,
|
||||
}
|
||||
|
||||
/// Execution information tied to the platform.
|
||||
#[derive(Clone, Debug, Default, Serialize)]
|
||||
pub struct ExecutionInformation {
|
||||
/// Information related to the node assigned to this test case.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub node: Option<TestCaseNodeInformation>,
|
||||
/// Information on the pre-link compiled contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pre_link_compilation_status: Option<CompilationStatus>,
|
||||
/// Information on the post-link compiled contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub post_link_compilation_status: Option<CompilationStatus>,
|
||||
/// Information on the deployed libraries.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub deployed_libraries: Option<BTreeMap<ContractInstance, Address>>,
|
||||
/// Information on the deployed contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub deployed_contracts: Option<BTreeMap<ContractInstance, Address>>,
|
||||
}
|
||||
|
||||
/// Information related to compilation
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(tag = "status")]
|
||||
pub enum CompilationStatus {
|
||||
/// The compilation was successful.
|
||||
Success {
|
||||
/// A flag with information on whether the compilation artifacts were cached or not.
|
||||
is_cached: bool,
|
||||
/// The version of the compiler used to compile the contracts.
|
||||
compiler_version: Version,
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
compiler_path: PathBuf,
|
||||
/// The input provided to the compiler to compile the contracts. This is only included if
|
||||
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
|
||||
/// the compiler was invoked.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
compiler_input: Option<CompilerInput>,
|
||||
/// The output of the compiler. This is only included if the appropriate flag is set in the
|
||||
/// CLI contexts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
compiler_output: Option<CompilerOutput>,
|
||||
},
|
||||
/// The compilation failed.
|
||||
Failure {
|
||||
/// The failure reason.
|
||||
reason: String,
|
||||
/// The version of the compiler used to compile the contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
compiler_version: Option<Version>,
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
compiler_path: Option<PathBuf>,
|
||||
/// The input provided to the compiler to compile the contracts. This is only included if
|
||||
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
|
||||
/// the compiler was invoked.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
compiler_input: Option<CompilerInput>,
|
||||
},
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
//! Common types and functions used throughout the crate.
|
||||
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
use revive_dt_common::{define_wrapper_type, types::PlatformIdentifier};
|
||||
use revive_dt_compiler::Mode;
|
||||
use revive_dt_format::{case::CaseIdx, steps::StepPath};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
define_wrapper_type!(
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct MetadataFilePath(PathBuf);
|
||||
);
|
||||
|
||||
/// An absolute specifier for a test.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct TestSpecifier {
|
||||
pub solc_mode: Mode,
|
||||
pub metadata_file_path: PathBuf,
|
||||
pub case_idx: CaseIdx,
|
||||
}
|
||||
|
||||
/// An absolute path for a test that also includes information about the node that it's assigned to
|
||||
/// and what platform it belongs to.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct ExecutionSpecifier {
|
||||
pub test_specifier: Arc<TestSpecifier>,
|
||||
pub node_id: usize,
|
||||
pub platform_identifier: PlatformIdentifier,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct StepExecutionSpecifier {
|
||||
pub execution_specifier: Arc<ExecutionSpecifier>,
|
||||
pub step_idx: StepPath,
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
//! This crate implements the reporting infrastructure for the differential testing tool.
|
||||
|
||||
mod aggregator;
|
||||
mod common;
|
||||
mod reporter_event;
|
||||
mod runner_event;
|
||||
|
||||
pub use aggregator::*;
|
||||
pub use common::*;
|
||||
pub use reporter_event::*;
|
||||
pub use runner_event::*;
|
||||
@@ -0,0 +1,22 @@
|
||||
//! A reporter event sent by the report aggregator to the various listeners.
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use revive_dt_compiler::Mode;
|
||||
use revive_dt_format::case::CaseIdx;
|
||||
|
||||
use crate::{MetadataFilePath, TestCaseStatus};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ReporterEvent {
|
||||
/// An event sent by the reporter once an entire metadata file and solc mode combination has
|
||||
/// finished execution.
|
||||
MetadataFileSolcModeCombinationExecutionCompleted {
|
||||
/// The path of the metadata file.
|
||||
metadata_file_path: MetadataFilePath,
|
||||
/// The Solc mode that this metadata file was executed in.
|
||||
mode: Mode,
|
||||
/// The status of each one of the cases.
|
||||
case_status: BTreeMap<CaseIdx, TestCaseStatus>,
|
||||
},
|
||||
}
|
||||
@@ -0,0 +1,631 @@
|
||||
//! The types associated with the events sent by the runner to the reporter.
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::{collections::BTreeMap, path::PathBuf, sync::Arc};
|
||||
|
||||
use alloy_primitives::Address;
|
||||
use anyhow::Context as _;
|
||||
use indexmap::IndexMap;
|
||||
use revive_dt_common::types::PlatformIdentifier;
|
||||
use revive_dt_compiler::{CompilerInput, CompilerOutput};
|
||||
use revive_dt_format::metadata::Metadata;
|
||||
use revive_dt_format::{corpus::Corpus, metadata::ContractInstance};
|
||||
use semver::Version;
|
||||
use tokio::sync::{broadcast, oneshot};
|
||||
|
||||
use crate::{ExecutionSpecifier, ReporterEvent, TestSpecifier, common::MetadataFilePath};
|
||||
|
||||
macro_rules! __report_gen_emit_test_specific {
|
||||
(
|
||||
$ident:ident,
|
||||
$variant_ident:ident,
|
||||
$skip_field:ident;
|
||||
$( $bname:ident : $bty:ty, )*
|
||||
;
|
||||
$( $aname:ident : $aty:ty, )*
|
||||
) => {
|
||||
paste::paste! {
|
||||
pub fn [< report_ $variant_ident:snake _event >](
|
||||
&self
|
||||
$(, $bname: impl Into<$bty> )*
|
||||
$(, $aname: impl Into<$aty> )*
|
||||
) -> anyhow::Result<()> {
|
||||
self.report([< $variant_ident Event >] {
|
||||
$skip_field: self.test_specifier.clone()
|
||||
$(, $bname: $bname.into() )*
|
||||
$(, $aname: $aname.into() )*
|
||||
})
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! __report_gen_emit_test_specific_by_parse {
|
||||
(
|
||||
$ident:ident,
|
||||
$variant_ident:ident,
|
||||
$skip_field:ident;
|
||||
$( $bname:ident : $bty:ty, )* ; $( $aname:ident : $aty:ty, )*
|
||||
) => {
|
||||
__report_gen_emit_test_specific!(
|
||||
$ident, $variant_ident, $skip_field;
|
||||
$( $bname : $bty, )* ; $( $aname : $aty, )*
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! __report_gen_scan_before {
|
||||
(
|
||||
$ident:ident, $variant_ident:ident;
|
||||
$( $before:ident : $bty:ty, )*
|
||||
;
|
||||
test_specifier : $skip_ty:ty,
|
||||
$( $after:ident : $aty:ty, )*
|
||||
;
|
||||
) => {
|
||||
__report_gen_emit_test_specific_by_parse!(
|
||||
$ident, $variant_ident, test_specifier;
|
||||
$( $before : $bty, )* ; $( $after : $aty, )*
|
||||
);
|
||||
};
|
||||
(
|
||||
$ident:ident, $variant_ident:ident;
|
||||
$( $before:ident : $bty:ty, )*
|
||||
;
|
||||
$name:ident : $ty:ty, $( $after:ident : $aty:ty, )*
|
||||
;
|
||||
) => {
|
||||
__report_gen_scan_before!(
|
||||
$ident, $variant_ident;
|
||||
$( $before : $bty, )* $name : $ty,
|
||||
;
|
||||
$( $after : $aty, )*
|
||||
;
|
||||
);
|
||||
};
|
||||
(
|
||||
$ident:ident, $variant_ident:ident;
|
||||
$( $before:ident : $bty:ty, )*
|
||||
;
|
||||
;
|
||||
) => {};
|
||||
}
|
||||
|
||||
macro_rules! __report_gen_for_variant {
|
||||
(
|
||||
$ident:ident,
|
||||
$variant_ident:ident;
|
||||
) => {};
|
||||
(
|
||||
$ident:ident,
|
||||
$variant_ident:ident;
|
||||
$( $field_ident:ident : $field_ty:ty ),+ $(,)?
|
||||
) => {
|
||||
__report_gen_scan_before!(
|
||||
$ident, $variant_ident;
|
||||
;
|
||||
$( $field_ident : $field_ty, )*
|
||||
;
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! __report_gen_emit_execution_specific {
|
||||
(
|
||||
$ident:ident,
|
||||
$variant_ident:ident,
|
||||
$skip_field:ident;
|
||||
$( $bname:ident : $bty:ty, )*
|
||||
;
|
||||
$( $aname:ident : $aty:ty, )*
|
||||
) => {
|
||||
paste::paste! {
|
||||
pub fn [< report_ $variant_ident:snake _event >](
|
||||
&self
|
||||
$(, $bname: impl Into<$bty> )*
|
||||
$(, $aname: impl Into<$aty> )*
|
||||
) -> anyhow::Result<()> {
|
||||
self.report([< $variant_ident Event >] {
|
||||
$skip_field: self.execution_specifier.clone()
|
||||
$(, $bname: $bname.into() )*
|
||||
$(, $aname: $aname.into() )*
|
||||
})
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! __report_gen_emit_execution_specific_by_parse {
|
||||
(
|
||||
$ident:ident,
|
||||
$variant_ident:ident,
|
||||
$skip_field:ident;
|
||||
$( $bname:ident : $bty:ty, )* ; $( $aname:ident : $aty:ty, )*
|
||||
) => {
|
||||
__report_gen_emit_execution_specific!(
|
||||
$ident, $variant_ident, $skip_field;
|
||||
$( $bname : $bty, )* ; $( $aname : $aty, )*
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! __report_gen_scan_before_exec {
|
||||
(
|
||||
$ident:ident, $variant_ident:ident;
|
||||
$( $before:ident : $bty:ty, )*
|
||||
;
|
||||
execution_specifier : $skip_ty:ty,
|
||||
$( $after:ident : $aty:ty, )*
|
||||
;
|
||||
) => {
|
||||
__report_gen_emit_execution_specific_by_parse!(
|
||||
$ident, $variant_ident, execution_specifier;
|
||||
$( $before : $bty, )* ; $( $after : $aty, )*
|
||||
);
|
||||
};
|
||||
(
|
||||
$ident:ident, $variant_ident:ident;
|
||||
$( $before:ident : $bty:ty, )*
|
||||
;
|
||||
$name:ident : $ty:ty, $( $after:ident : $aty:ty, )*
|
||||
;
|
||||
) => {
|
||||
__report_gen_scan_before_exec!(
|
||||
$ident, $variant_ident;
|
||||
$( $before : $bty, )* $name : $ty,
|
||||
;
|
||||
$( $after : $aty, )*
|
||||
;
|
||||
);
|
||||
};
|
||||
(
|
||||
$ident:ident, $variant_ident:ident;
|
||||
$( $before:ident : $bty:ty, )*
|
||||
;
|
||||
;
|
||||
) => {};
|
||||
}
|
||||
|
||||
macro_rules! __report_gen_for_variant_exec {
|
||||
(
|
||||
$ident:ident,
|
||||
$variant_ident:ident;
|
||||
) => {};
|
||||
(
|
||||
$ident:ident,
|
||||
$variant_ident:ident;
|
||||
$( $field_ident:ident : $field_ty:ty ),+ $(,)?
|
||||
) => {
|
||||
__report_gen_scan_before_exec!(
|
||||
$ident, $variant_ident;
|
||||
;
|
||||
$( $field_ident : $field_ty, )*
|
||||
;
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! __report_gen_emit_step_execution_specific {
|
||||
(
|
||||
$ident:ident,
|
||||
$variant_ident:ident,
|
||||
$skip_field:ident;
|
||||
$( $bname:ident : $bty:ty, )*
|
||||
;
|
||||
$( $aname:ident : $aty:ty, )*
|
||||
) => {
|
||||
paste::paste! {
|
||||
pub fn [< report_ $variant_ident:snake _event >](
|
||||
&self
|
||||
$(, $bname: impl Into<$bty> )*
|
||||
$(, $aname: impl Into<$aty> )*
|
||||
) -> anyhow::Result<()> {
|
||||
self.report([< $variant_ident Event >] {
|
||||
$skip_field: self.step_specifier.clone()
|
||||
$(, $bname: $bname.into() )*
|
||||
$(, $aname: $aname.into() )*
|
||||
})
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! __report_gen_emit_step_execution_specific_by_parse {
|
||||
(
|
||||
$ident:ident,
|
||||
$variant_ident:ident,
|
||||
$skip_field:ident;
|
||||
$( $bname:ident : $bty:ty, )* ; $( $aname:ident : $aty:ty, )*
|
||||
) => {
|
||||
__report_gen_emit_step_execution_specific!(
|
||||
$ident, $variant_ident, $skip_field;
|
||||
$( $bname : $bty, )* ; $( $aname : $aty, )*
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! __report_gen_scan_before_step {
|
||||
(
|
||||
$ident:ident, $variant_ident:ident;
|
||||
$( $before:ident : $bty:ty, )*
|
||||
;
|
||||
step_specifier : $skip_ty:ty,
|
||||
$( $after:ident : $aty:ty, )*
|
||||
;
|
||||
) => {
|
||||
__report_gen_emit_step_execution_specific_by_parse!(
|
||||
$ident, $variant_ident, step_specifier;
|
||||
$( $before : $bty, )* ; $( $after : $aty, )*
|
||||
);
|
||||
};
|
||||
(
|
||||
$ident:ident, $variant_ident:ident;
|
||||
$( $before:ident : $bty:ty, )*
|
||||
;
|
||||
$name:ident : $ty:ty, $( $after:ident : $aty:ty, )*
|
||||
;
|
||||
) => {
|
||||
__report_gen_scan_before_step!(
|
||||
$ident, $variant_ident;
|
||||
$( $before : $bty, )* $name : $ty,
|
||||
;
|
||||
$( $after : $aty, )*
|
||||
;
|
||||
);
|
||||
};
|
||||
(
|
||||
$ident:ident, $variant_ident:ident;
|
||||
$( $before:ident : $bty:ty, )*
|
||||
;
|
||||
;
|
||||
) => {};
|
||||
}
|
||||
|
||||
macro_rules! __report_gen_for_variant_step {
|
||||
(
|
||||
$ident:ident,
|
||||
$variant_ident:ident;
|
||||
) => {};
|
||||
(
|
||||
$ident:ident,
|
||||
$variant_ident:ident;
|
||||
$( $field_ident:ident : $field_ty:ty ),+ $(,)?
|
||||
) => {
|
||||
__report_gen_scan_before_step!(
|
||||
$ident, $variant_ident;
|
||||
;
|
||||
$( $field_ident : $field_ty, )*
|
||||
;
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
/// Defines the runner-event which is sent from the test runners to the report aggregator.
|
||||
///
|
||||
/// This macro defines a number of things related to the reporting infrastructure and the interface
|
||||
/// used. First of all, it defines the enum of all of the possible events that the runners can send
|
||||
/// to the aggregator. For each one of the variants it defines a separate struct for it to allow the
|
||||
/// variant field in the enum to be put in a [`Box`].
|
||||
///
|
||||
/// In addition to the above, it defines [`From`] implementations for the various event types for
|
||||
/// the [`RunnerEvent`] enum essentially allowing for events such as [`CorpusFileDiscoveryEvent`] to
|
||||
/// be converted into a [`RunnerEvent`].
|
||||
///
|
||||
/// In addition to the above, it also defines the [`RunnerEventReporter`] which is a wrapper around
|
||||
/// an [`UnboundedSender`] allowing for events to be sent to the report aggregator.
|
||||
///
|
||||
/// With the above description, we can see that this macro defines almost all of the interface of
|
||||
/// the reporting infrastructure, from the enum itself, to its associated types, and also to the
|
||||
/// reporter that's used to report events to the aggregator.
|
||||
///
|
||||
/// [`UnboundedSender`]: tokio::sync::mpsc::UnboundedSender
|
||||
macro_rules! define_event {
|
||||
(
|
||||
$(#[$enum_meta: meta])*
|
||||
$vis: vis enum $ident: ident {
|
||||
$(
|
||||
$(#[$variant_meta: meta])*
|
||||
$variant_ident: ident {
|
||||
$(
|
||||
$(#[$field_meta: meta])*
|
||||
$field_ident: ident: $field_ty: ty
|
||||
),* $(,)?
|
||||
}
|
||||
),* $(,)?
|
||||
}
|
||||
) => {
|
||||
paste::paste! {
|
||||
$(#[$enum_meta])*
|
||||
#[derive(Debug)]
|
||||
$vis enum $ident {
|
||||
$(
|
||||
$(#[$variant_meta])*
|
||||
$variant_ident(Box<[<$variant_ident Event>]>)
|
||||
),*
|
||||
}
|
||||
|
||||
$(
|
||||
#[derive(Debug)]
|
||||
$(#[$variant_meta])*
|
||||
$vis struct [<$variant_ident Event>] {
|
||||
$(
|
||||
$(#[$field_meta])*
|
||||
$vis $field_ident: $field_ty
|
||||
),*
|
||||
}
|
||||
)*
|
||||
|
||||
$(
|
||||
impl From<[<$variant_ident Event>]> for $ident {
|
||||
fn from(value: [<$variant_ident Event>]) -> Self {
|
||||
Self::$variant_ident(Box::new(value))
|
||||
}
|
||||
}
|
||||
)*
|
||||
|
||||
/// Provides a way to report events to the aggregator.
|
||||
///
|
||||
/// Under the hood, this is a wrapper around an [`UnboundedSender`] which abstracts away
|
||||
/// the fact that channels are used and that implements high-level methods for reporting
|
||||
/// various events to the aggregator.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct [< $ident Reporter >]($vis tokio::sync::mpsc::UnboundedSender<$ident>);
|
||||
|
||||
impl From<tokio::sync::mpsc::UnboundedSender<$ident>> for [< $ident Reporter >] {
|
||||
fn from(value: tokio::sync::mpsc::UnboundedSender<$ident>) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl [< $ident Reporter >] {
|
||||
pub fn test_specific_reporter(
|
||||
&self,
|
||||
test_specifier: impl Into<std::sync::Arc<crate::common::TestSpecifier>>
|
||||
) -> [< $ident TestSpecificReporter >] {
|
||||
[< $ident TestSpecificReporter >] {
|
||||
reporter: self.clone(),
|
||||
test_specifier: test_specifier.into(),
|
||||
}
|
||||
}
|
||||
|
||||
fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> {
|
||||
self.0.send(event.into()).map_err(Into::into)
|
||||
}
|
||||
|
||||
$(
|
||||
pub fn [< report_ $variant_ident:snake _event >](&self, $($field_ident: impl Into<$field_ty>),*) -> anyhow::Result<()> {
|
||||
self.report([< $variant_ident Event >] {
|
||||
$($field_ident: $field_ident.into()),*
|
||||
})
|
||||
}
|
||||
)*
|
||||
}
|
||||
|
||||
/// A reporter that's tied to a specific test case.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct [< $ident TestSpecificReporter >] {
|
||||
$vis reporter: [< $ident Reporter >],
|
||||
$vis test_specifier: std::sync::Arc<crate::common::TestSpecifier>,
|
||||
}
|
||||
|
||||
impl [< $ident TestSpecificReporter >] {
|
||||
pub fn execution_specific_reporter(
|
||||
&self,
|
||||
node_id: impl Into<usize>,
|
||||
platform_identifier: impl Into<PlatformIdentifier>
|
||||
) -> [< $ident ExecutionSpecificReporter >] {
|
||||
[< $ident ExecutionSpecificReporter >] {
|
||||
reporter: self.reporter.clone(),
|
||||
execution_specifier: Arc::new($crate::common::ExecutionSpecifier {
|
||||
test_specifier: self.test_specifier.clone(),
|
||||
node_id: node_id.into(),
|
||||
platform_identifier: platform_identifier.into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> {
|
||||
self.reporter.report(event)
|
||||
}
|
||||
|
||||
$(
|
||||
__report_gen_for_variant! { $ident, $variant_ident; $( $field_ident : $field_ty ),* }
|
||||
)*
|
||||
}
|
||||
|
||||
/// A reporter that's tied to a specific execution of the test case such as execution on
|
||||
/// a specific node from a specific platform.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct [< $ident ExecutionSpecificReporter >] {
|
||||
$vis reporter: [< $ident Reporter >],
|
||||
$vis execution_specifier: std::sync::Arc<$crate::common::ExecutionSpecifier>,
|
||||
}
|
||||
|
||||
impl [< $ident ExecutionSpecificReporter >] {
|
||||
fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> {
|
||||
self.reporter.report(event)
|
||||
}
|
||||
|
||||
$(
|
||||
__report_gen_for_variant_exec! { $ident, $variant_ident; $( $field_ident : $field_ty ),* }
|
||||
)*
|
||||
}
|
||||
|
||||
/// A reporter that's tied to a specific step execution
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct [< $ident StepExecutionSpecificReporter >] {
|
||||
$vis reporter: [< $ident Reporter >],
|
||||
$vis step_specifier: std::sync::Arc<$crate::common::StepExecutionSpecifier>,
|
||||
}
|
||||
|
||||
impl [< $ident StepExecutionSpecificReporter >] {
|
||||
fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> {
|
||||
self.reporter.report(event)
|
||||
}
|
||||
|
||||
$(
|
||||
__report_gen_for_variant_step! { $ident, $variant_ident; $( $field_ident : $field_ty ),* }
|
||||
)*
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
define_event! {
|
||||
/// An event type that's sent by the test runners/drivers to the report aggregator.
|
||||
pub(crate) enum RunnerEvent {
|
||||
/// An event emitted by the reporter when it wishes to listen to events emitted by the
|
||||
/// aggregator.
|
||||
SubscribeToEvents {
|
||||
/// The channel that the aggregator is to send the receive side of the channel on.
|
||||
tx: oneshot::Sender<broadcast::Receiver<ReporterEvent>>
|
||||
},
|
||||
/// An event emitted by runners when they've discovered a corpus file.
|
||||
CorpusFileDiscovery {
|
||||
/// The contents of the corpus file.
|
||||
corpus: Corpus
|
||||
},
|
||||
/// An event emitted by runners when they've discovered a metadata file.
|
||||
MetadataFileDiscovery {
|
||||
/// The path of the metadata file discovered.
|
||||
path: MetadataFilePath,
|
||||
/// The content of the metadata file.
|
||||
metadata: Metadata
|
||||
},
|
||||
/// An event emitted by the runners when they discover a test case.
|
||||
TestCaseDiscovery {
|
||||
/// A specifier for the test that was discovered.
|
||||
test_specifier: Arc<TestSpecifier>,
|
||||
},
|
||||
/// An event emitted by the runners when a test case is ignored.
|
||||
TestIgnored {
|
||||
/// A specifier for the test that's been ignored.
|
||||
test_specifier: Arc<TestSpecifier>,
|
||||
/// A reason for the test to be ignored.
|
||||
reason: String,
|
||||
/// Additional fields that describe more information on why the test was ignored.
|
||||
additional_fields: IndexMap<String, serde_json::Value>
|
||||
},
|
||||
/// An event emitted by the runners when a test case has succeeded.
|
||||
TestSucceeded {
|
||||
/// A specifier for the test that succeeded.
|
||||
test_specifier: Arc<TestSpecifier>,
|
||||
/// The number of steps of the case that were executed by the driver.
|
||||
steps_executed: usize,
|
||||
},
|
||||
/// An event emitted by the runners when a test case has failed.
|
||||
TestFailed {
|
||||
/// A specifier for the test that succeeded.
|
||||
test_specifier: Arc<TestSpecifier>,
|
||||
/// A reason for the failure of the test.
|
||||
reason: String,
|
||||
},
|
||||
/// An event emitted when the test case is assigned a platform node.
|
||||
NodeAssigned {
|
||||
/// A specifier for the test that the assignment is for.
|
||||
test_specifier: Arc<TestSpecifier>,
|
||||
/// The ID of the node that this case is being executed on.
|
||||
id: usize,
|
||||
/// The identifier of the platform used.
|
||||
platform_identifier: PlatformIdentifier,
|
||||
/// The connection string of the node.
|
||||
connection_string: String,
|
||||
},
|
||||
/// An event emitted by the runners when the compilation of the contracts has succeeded
|
||||
/// on the pre-link contracts.
|
||||
PreLinkContractsCompilationSucceeded {
|
||||
/// A specifier for the execution that's taking place.
|
||||
execution_specifier: Arc<ExecutionSpecifier>,
|
||||
/// The version of the compiler used to compile the contracts.
|
||||
compiler_version: Version,
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
compiler_path: PathBuf,
|
||||
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
|
||||
/// anew.
|
||||
is_cached: bool,
|
||||
/// The input provided to the compiler - this is optional and not provided if the
|
||||
/// contracts were obtained from the cache.
|
||||
compiler_input: Option<CompilerInput>,
|
||||
/// The output of the compiler.
|
||||
compiler_output: CompilerOutput
|
||||
},
|
||||
/// An event emitted by the runners when the compilation of the contracts has succeeded
|
||||
/// on the post-link contracts.
|
||||
PostLinkContractsCompilationSucceeded {
|
||||
/// A specifier for the execution that's taking place.
|
||||
execution_specifier: Arc<ExecutionSpecifier>,
|
||||
/// The version of the compiler used to compile the contracts.
|
||||
compiler_version: Version,
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
compiler_path: PathBuf,
|
||||
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
|
||||
/// anew.
|
||||
is_cached: bool,
|
||||
/// The input provided to the compiler - this is optional and not provided if the
|
||||
/// contracts were obtained from the cache.
|
||||
compiler_input: Option<CompilerInput>,
|
||||
/// The output of the compiler.
|
||||
compiler_output: CompilerOutput
|
||||
},
|
||||
/// An event emitted by the runners when the compilation of the pre-link contract has
|
||||
/// failed.
|
||||
PreLinkContractsCompilationFailed {
|
||||
/// A specifier for the execution that's taking place.
|
||||
execution_specifier: Arc<ExecutionSpecifier>,
|
||||
/// The version of the compiler used to compile the contracts.
|
||||
compiler_version: Option<Version>,
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
compiler_path: Option<PathBuf>,
|
||||
/// The input provided to the compiler - this is optional and not provided if the
|
||||
/// contracts were obtained from the cache.
|
||||
compiler_input: Option<CompilerInput>,
|
||||
/// The failure reason.
|
||||
reason: String,
|
||||
},
|
||||
/// An event emitted by the runners when the compilation of the post-link contract has
|
||||
/// failed.
|
||||
PostLinkContractsCompilationFailed {
|
||||
/// A specifier for the execution that's taking place.
|
||||
execution_specifier: Arc<ExecutionSpecifier>,
|
||||
/// The version of the compiler used to compile the contracts.
|
||||
compiler_version: Option<Version>,
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
compiler_path: Option<PathBuf>,
|
||||
/// The input provided to the compiler - this is optional and not provided if the
|
||||
/// contracts were obtained from the cache.
|
||||
compiler_input: Option<CompilerInput>,
|
||||
/// The failure reason.
|
||||
reason: String,
|
||||
},
|
||||
/// An event emitted by the runners when a library has been deployed.
|
||||
LibrariesDeployed {
|
||||
/// A specifier for the execution that's taking place.
|
||||
execution_specifier: Arc<ExecutionSpecifier>,
|
||||
/// The addresses of the libraries that were deployed.
|
||||
libraries: BTreeMap<ContractInstance, Address>
|
||||
},
|
||||
/// An event emitted by the runners when they've deployed a new contract.
|
||||
ContractDeployed {
|
||||
/// A specifier for the execution that's taking place.
|
||||
execution_specifier: Arc<ExecutionSpecifier>,
|
||||
/// The instance name of the contract.
|
||||
contract_instance: ContractInstance,
|
||||
/// The address of the contract.
|
||||
address: Address
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// An extension to the [`Reporter`] implemented by the macro.
|
||||
impl RunnerEventReporter {
|
||||
pub async fn subscribe(&self) -> anyhow::Result<broadcast::Receiver<ReporterEvent>> {
|
||||
let (tx, rx) = oneshot::channel::<broadcast::Receiver<ReporterEvent>>();
|
||||
self.report_subscribe_to_events_event(tx)
|
||||
.context("Failed to send subscribe request to reporter task")?;
|
||||
rx.await.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
pub type Reporter = RunnerEventReporter;
|
||||
pub type TestSpecificReporter = RunnerEventTestSpecificReporter;
|
||||
pub type ExecutionSpecificReporter = RunnerEventExecutionSpecificReporter;
|
||||
@@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "revive-dt-solc-binaries"
|
||||
description = "Download and cache solc binaries"
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
revive-dt-common = { workspace = true }
|
||||
|
||||
anyhow = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
reqwest = { workspace = true }
|
||||
semver = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -0,0 +1,108 @@
|
||||
//! Helper for caching the solc binaries.
|
||||
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
fs::{File, create_dir_all},
|
||||
io::{BufWriter, Write},
|
||||
os::unix::fs::PermissionsExt,
|
||||
path::{Path, PathBuf},
|
||||
sync::LazyLock,
|
||||
};
|
||||
|
||||
use semver::Version;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::download::SolcDownloader;
|
||||
use anyhow::Context as _;
|
||||
|
||||
pub const SOLC_CACHE_DIRECTORY: &str = "solc";
|
||||
pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default);
|
||||
|
||||
pub(crate) async fn get_or_download(
|
||||
working_directory: &Path,
|
||||
downloader: &SolcDownloader,
|
||||
) -> anyhow::Result<(Version, PathBuf)> {
|
||||
let target_directory = working_directory
|
||||
.join(SOLC_CACHE_DIRECTORY)
|
||||
.join(downloader.version.to_string());
|
||||
let target_file = target_directory.join(downloader.target);
|
||||
|
||||
let mut cache = SOLC_CACHER.lock().await;
|
||||
if cache.contains(&target_file) {
|
||||
tracing::debug!("using cached solc: {}", target_file.display());
|
||||
return Ok((downloader.version.clone(), target_file));
|
||||
}
|
||||
|
||||
create_dir_all(&target_directory).with_context(|| {
|
||||
format!(
|
||||
"Failed to create solc cache directory: {}",
|
||||
target_directory.display()
|
||||
)
|
||||
})?;
|
||||
download_to_file(&target_file, downloader)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to write downloaded solc to {}",
|
||||
target_file.display()
|
||||
)
|
||||
})?;
|
||||
cache.insert(target_file.clone());
|
||||
|
||||
Ok((downloader.version.clone(), target_file))
|
||||
}
|
||||
|
||||
async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::Result<()> {
|
||||
let Ok(file) = File::create_new(path) else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let mut permissions = file
|
||||
.metadata()
|
||||
.with_context(|| format!("Failed to read metadata for {}", path.display()))?
|
||||
.permissions();
|
||||
permissions.set_mode(permissions.mode() | 0o111);
|
||||
file.set_permissions(permissions).with_context(|| {
|
||||
format!("Failed to set executable permissions on {}", path.display())
|
||||
})?;
|
||||
}
|
||||
|
||||
let mut file = BufWriter::new(file);
|
||||
file.write_all(
|
||||
&downloader
|
||||
.download()
|
||||
.await
|
||||
.context("Failed to download solc binary bytes")?,
|
||||
)
|
||||
.with_context(|| format!("Failed to write solc binary to {}", path.display()))?;
|
||||
file.flush()
|
||||
.with_context(|| format!("Failed to flush file {}", path.display()))?;
|
||||
drop(file);
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
std::process::Command::new("xattr")
|
||||
.arg("-d")
|
||||
.arg("com.apple.quarantine")
|
||||
.arg(path)
|
||||
.stderr(std::process::Stdio::null())
|
||||
.stdout(std::process::Stdio::null())
|
||||
.stdout(std::process::Stdio::null())
|
||||
.spawn()
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to spawn xattr to remove quarantine attribute on {}",
|
||||
path.display()
|
||||
)
|
||||
})?
|
||||
.wait()
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed waiting for xattr operation to complete on {}",
|
||||
path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -0,0 +1,212 @@
|
||||
//! This module downloads solc binaries.
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{LazyLock, Mutex},
|
||||
};
|
||||
|
||||
use revive_dt_common::types::VersionOrRequirement;
|
||||
|
||||
use semver::Version;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
use crate::list::List;
|
||||
use anyhow::Context as _;
|
||||
|
||||
pub static LIST_CACHE: LazyLock<Mutex<HashMap<&'static str, List>>> =
|
||||
LazyLock::new(Default::default);
|
||||
|
||||
impl List {
|
||||
pub const LINUX_URL: &str = "https://binaries.soliditylang.org/linux-amd64/list.json";
|
||||
pub const WINDOWS_URL: &str = "https://binaries.soliditylang.org/windows-amd64/list.json";
|
||||
pub const MACOSX_URL: &str = "https://binaries.soliditylang.org/macosx-amd64/list.json";
|
||||
pub const WASM_URL: &str = "https://binaries.soliditylang.org/wasm/list.json";
|
||||
|
||||
/// Try to downloads the list from the given URL.
|
||||
///
|
||||
/// Caches the list retrieved from the `url` into [LIST_CACHE],
|
||||
/// subsequent calls with the same `url` will return the cached list.
|
||||
pub async fn download(url: &'static str) -> anyhow::Result<Self> {
|
||||
if let Some(list) = LIST_CACHE.lock().unwrap().get(url) {
|
||||
return Ok(list.clone());
|
||||
}
|
||||
|
||||
let body: List = reqwest::get(url)
|
||||
.await
|
||||
.with_context(|| format!("Failed to GET solc list from {url}"))?
|
||||
.json()
|
||||
.await
|
||||
.with_context(|| format!("Failed to deserialize solc list JSON from {url}"))?;
|
||||
|
||||
LIST_CACHE.lock().unwrap().insert(url, body.clone());
|
||||
|
||||
Ok(body)
|
||||
}
|
||||
}
|
||||
|
||||
/// Download solc binaries from the official SolidityLang site
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SolcDownloader {
|
||||
pub version: Version,
|
||||
pub target: &'static str,
|
||||
pub list: &'static str,
|
||||
}
|
||||
|
||||
impl SolcDownloader {
|
||||
pub const BASE_URL: &str = "https://binaries.soliditylang.org";
|
||||
|
||||
pub const LINUX_NAME: &str = "linux-amd64";
|
||||
pub const MACOSX_NAME: &str = "macosx-amd64";
|
||||
pub const WINDOWS_NAME: &str = "windows-amd64";
|
||||
pub const WASM_NAME: &str = "wasm";
|
||||
|
||||
async fn new(
|
||||
version: impl Into<VersionOrRequirement>,
|
||||
target: &'static str,
|
||||
list: &'static str,
|
||||
) -> anyhow::Result<Self> {
|
||||
let version_or_requirement = version.into();
|
||||
match version_or_requirement {
|
||||
VersionOrRequirement::Version(version) => Ok(Self {
|
||||
version,
|
||||
target,
|
||||
list,
|
||||
}),
|
||||
VersionOrRequirement::Requirement(requirement) => {
|
||||
let Some(version) = List::download(list)
|
||||
.await
|
||||
.with_context(|| format!("Failed to download solc builds list from {list}"))?
|
||||
.builds
|
||||
.into_iter()
|
||||
.map(|build| build.version)
|
||||
.filter(|version| requirement.matches(version))
|
||||
.max()
|
||||
else {
|
||||
anyhow::bail!("Failed to find a version that satisfies {requirement:?}");
|
||||
};
|
||||
Ok(Self {
|
||||
version,
|
||||
target,
|
||||
list,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn linux(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
||||
Self::new(version, Self::LINUX_NAME, List::LINUX_URL).await
|
||||
}
|
||||
|
||||
pub async fn macosx(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
||||
Self::new(version, Self::MACOSX_NAME, List::MACOSX_URL).await
|
||||
}
|
||||
|
||||
pub async fn windows(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
||||
Self::new(version, Self::WINDOWS_NAME, List::WINDOWS_URL).await
|
||||
}
|
||||
|
||||
pub async fn wasm(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
||||
Self::new(version, Self::WASM_NAME, List::WASM_URL).await
|
||||
}
|
||||
|
||||
/// Download the solc binary.
|
||||
///
|
||||
/// Errors out if the download fails or the digest of the downloaded file
|
||||
/// mismatches the expected digest from the release [List].
|
||||
pub async fn download(&self) -> anyhow::Result<Vec<u8>> {
|
||||
let builds = List::download(self.list)
|
||||
.await
|
||||
.with_context(|| format!("Failed to download solc builds list from {}", self.list))?
|
||||
.builds;
|
||||
let build = builds
|
||||
.iter()
|
||||
.find(|build| build.version == self.version)
|
||||
.ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version))
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Requested solc version {} was not found in builds list fetched from {}",
|
||||
self.version, self.list
|
||||
)
|
||||
})?;
|
||||
|
||||
let path = build.path.clone();
|
||||
let expected_digest = build
|
||||
.sha256
|
||||
.strip_prefix("0x")
|
||||
.unwrap_or(&build.sha256)
|
||||
.to_string();
|
||||
let url = format!("{}/{}/{}", Self::BASE_URL, self.target, path.display());
|
||||
|
||||
let file = reqwest::get(&url)
|
||||
.await
|
||||
.with_context(|| format!("Failed to GET solc binary from {url}"))?
|
||||
.bytes()
|
||||
.await
|
||||
.with_context(|| format!("Failed to read solc binary bytes from {url}"))?
|
||||
.to_vec();
|
||||
|
||||
if hex::encode(Sha256::digest(&file)) != expected_digest {
|
||||
anyhow::bail!("sha256 mismatch for solc version {}", self.version);
|
||||
}
|
||||
|
||||
Ok(file)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{download::SolcDownloader, list::List};
|
||||
|
||||
#[tokio::test]
|
||||
async fn try_get_windows() {
|
||||
let version = List::download(List::WINDOWS_URL)
|
||||
.await
|
||||
.unwrap()
|
||||
.latest_release;
|
||||
SolcDownloader::windows(version)
|
||||
.await
|
||||
.unwrap()
|
||||
.download()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn try_get_macosx() {
|
||||
let version = List::download(List::MACOSX_URL)
|
||||
.await
|
||||
.unwrap()
|
||||
.latest_release;
|
||||
SolcDownloader::macosx(version)
|
||||
.await
|
||||
.unwrap()
|
||||
.download()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn try_get_linux() {
|
||||
let version = List::download(List::LINUX_URL)
|
||||
.await
|
||||
.unwrap()
|
||||
.latest_release;
|
||||
SolcDownloader::linux(version)
|
||||
.await
|
||||
.unwrap()
|
||||
.download()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn try_get_wasm() {
|
||||
let version = List::download(List::WASM_URL).await.unwrap().latest_release;
|
||||
SolcDownloader::wasm(version)
|
||||
.await
|
||||
.unwrap()
|
||||
.download()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
//! This crates provides serializable Rust type definitions for the [solc binary lists][0]
|
||||
//! and download helpers.
|
||||
//!
|
||||
//! [0]: https://binaries.soliditylang.org
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::Context as _;
|
||||
use cache::get_or_download;
|
||||
use download::SolcDownloader;
|
||||
|
||||
use revive_dt_common::types::VersionOrRequirement;
|
||||
use semver::Version;
|
||||
|
||||
pub mod cache;
|
||||
pub mod download;
|
||||
pub mod list;
|
||||
|
||||
/// Downloads the solc binary for Wasm is `wasm` is set, otherwise for
|
||||
/// the target platform.
|
||||
///
|
||||
/// Subsequent calls for the same version will use a cached artifact
|
||||
/// and not download it again.
|
||||
pub async fn download_solc(
|
||||
cache_directory: &Path,
|
||||
version: impl Into<VersionOrRequirement>,
|
||||
wasm: bool,
|
||||
) -> anyhow::Result<(Version, PathBuf)> {
|
||||
let downloader = if wasm {
|
||||
SolcDownloader::wasm(version).await
|
||||
} else if cfg!(target_os = "linux") {
|
||||
SolcDownloader::linux(version).await
|
||||
} else if cfg!(target_os = "macos") {
|
||||
SolcDownloader::macosx(version).await
|
||||
} else if cfg!(target_os = "windows") {
|
||||
SolcDownloader::windows(version).await
|
||||
} else {
|
||||
unimplemented!()
|
||||
}
|
||||
.context("Failed to initialize the Solc Downloader")?;
|
||||
|
||||
get_or_download(cache_directory, &downloader).await
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
//! Rust type definitions for the solc binary lists.
|
||||
|
||||
use std::{collections::HashMap, path::PathBuf};
|
||||
|
||||
use semver::Version;
|
||||
use serde::Deserialize;
|
||||
|
||||
#[derive(Debug, Deserialize, Clone, Eq, PartialEq)]
|
||||
pub struct List {
|
||||
pub builds: Vec<Build>,
|
||||
pub releases: HashMap<Version, String>,
|
||||
#[serde(rename = "latestRelease")]
|
||||
pub latest_release: Version,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone, Eq, PartialEq)]
|
||||
pub struct Build {
|
||||
pub path: PathBuf,
|
||||
pub version: Version,
|
||||
pub build: String,
|
||||
#[serde(rename = "longVersion")]
|
||||
pub long_version: String,
|
||||
pub keccak256: String,
|
||||
pub sha256: String,
|
||||
pub urls: Vec<String>,
|
||||
}
|
||||
Submodule
+1
Submodule polkadot-sdk added at dc3d0e5ab7
Executable
+104
@@ -0,0 +1,104 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Revive Differential Tests - Quick Start Script
|
||||
# This script clones the test repository, sets up the corpus file, and runs the tool
|
||||
|
||||
set -e # Exit on any error
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
TEST_REPO_URL="https://github.com/paritytech/resolc-compiler-tests"
|
||||
TEST_REPO_DIR="resolc-compiler-tests"
|
||||
CORPUS_FILE="./corpus.json"
|
||||
WORKDIR="workdir"
|
||||
|
||||
# Optional positional argument: path to polkadot-sdk directory
|
||||
POLKADOT_SDK_DIR="${1:-}"
|
||||
|
||||
# Binary paths (default to names in $PATH)
|
||||
REVIVE_DEV_NODE_BIN="revive-dev-node"
|
||||
ETH_RPC_BIN="eth-rpc"
|
||||
SUBSTRATE_NODE_BIN="substrate-node"
|
||||
|
||||
echo -e "${GREEN}=== Revive Differential Tests Quick Start ===${NC}"
|
||||
echo ""
|
||||
|
||||
# Check if test repo already exists
|
||||
if [ -d "$TEST_REPO_DIR" ]; then
|
||||
echo -e "${YELLOW}Test repository already exists. Pulling latest changes...${NC}"
|
||||
cd "$TEST_REPO_DIR"
|
||||
git pull
|
||||
cd ..
|
||||
else
|
||||
echo -e "${GREEN}Cloning test repository...${NC}"
|
||||
git clone "$TEST_REPO_URL"
|
||||
fi
|
||||
|
||||
# If polkadot-sdk path is provided, verify and use binaries from there; build if needed
|
||||
if [ -n "$POLKADOT_SDK_DIR" ]; then
|
||||
if [ ! -d "$POLKADOT_SDK_DIR" ]; then
|
||||
echo -e "${RED}Provided polkadot-sdk directory does not exist: $POLKADOT_SDK_DIR${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
POLKADOT_SDK_DIR=$(realpath "$POLKADOT_SDK_DIR")
|
||||
echo -e "${GREEN}Using polkadot-sdk at: $POLKADOT_SDK_DIR${NC}"
|
||||
|
||||
REVIVE_DEV_NODE_BIN="$POLKADOT_SDK_DIR/target/release/revive-dev-node"
|
||||
ETH_RPC_BIN="$POLKADOT_SDK_DIR/target/release/eth-rpc"
|
||||
SUBSTRATE_NODE_BIN="$POLKADOT_SDK_DIR/target/release/substrate-node"
|
||||
|
||||
if [ ! -x "$REVIVE_DEV_NODE_BIN" ] || [ ! -x "$ETH_RPC_BIN" ] || [ ! -x "$SUBSTRATE_NODE_BIN" ]; then
|
||||
echo -e "${YELLOW}Required binaries not found in release target. Building...${NC}"
|
||||
(cd "$POLKADOT_SDK_DIR" && cargo build --release --package staging-node-cli --package pallet-revive-eth-rpc --package revive-dev-node)
|
||||
fi
|
||||
|
||||
for bin in "$REVIVE_DEV_NODE_BIN" "$ETH_RPC_BIN" "$SUBSTRATE_NODE_BIN"; do
|
||||
if [ ! -x "$bin" ]; then
|
||||
echo -e "${RED}Expected binary not found after build: $bin${NC}"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
else
|
||||
echo -e "${YELLOW}No polkadot-sdk path provided. Using binaries from $PATH.${NC}"
|
||||
fi
|
||||
|
||||
# Create corpus file with absolute path resolved at runtime
|
||||
echo -e "${GREEN}Creating corpus file...${NC}"
|
||||
ABSOLUTE_PATH=$(realpath "$TEST_REPO_DIR/fixtures/solidity/")
|
||||
|
||||
cat > "$CORPUS_FILE" << EOF
|
||||
{
|
||||
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
|
||||
"path": "$ABSOLUTE_PATH"
|
||||
}
|
||||
EOF
|
||||
|
||||
echo -e "${GREEN}Corpus file created: $CORPUS_FILE${NC}"
|
||||
|
||||
# Create workdir if it doesn't exist
|
||||
mkdir -p "$WORKDIR"
|
||||
|
||||
echo -e "${GREEN}Starting differential tests...${NC}"
|
||||
echo "This may take a while..."
|
||||
echo ""
|
||||
|
||||
# Run the tool
|
||||
RUST_LOG="info" cargo run --release -- execute-tests \
|
||||
--platform geth-evm-solc \
|
||||
--platform revive-dev-node-polkavm-resolc \
|
||||
--corpus "$CORPUS_FILE" \
|
||||
--working-directory "$WORKDIR" \
|
||||
--concurrency.number-of-nodes 5 \
|
||||
--kitchensink.path "$SUBSTRATE_NODE_BIN" \
|
||||
--revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
|
||||
--eth-rpc.path "$ETH_RPC_BIN" \
|
||||
> logs.log \
|
||||
2> output.log
|
||||
|
||||
echo -e "${GREEN}=== Test run completed! ===${NC}"
|
||||
+583
@@ -0,0 +1,583 @@
|
||||
{
|
||||
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
||||
"title": "Metadata",
|
||||
"description": "A MatterLabs metadata file.\n\nThis defines the structure that the MatterLabs metadata files follow for defining the tests or\nthe workloads.\n\nEach metadata file is composed of multiple test cases where each test case is isolated from the\nothers and runs in a completely different address space. Each test case is composed of a number\nof steps and assertions that should be performed as part of the test case.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"comment": {
|
||||
"description": "This is an optional comment on the metadata file which has no impact on the execution in any\nway.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"ignore": {
|
||||
"description": "An optional boolean which defines if the metadata file as a whole should be ignored. If null\nthen the metadata file will not be ignored.",
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"targets": {
|
||||
"description": "An optional vector of targets that this Metadata file's cases can be executed on. As an\nexample, if we wish for the metadata file's cases to only be run on PolkaVM then we'd\nspecify a target of \"PolkaVM\" in here.",
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
],
|
||||
"items": {
|
||||
"$ref": "#/$defs/VmIdentifier"
|
||||
}
|
||||
},
|
||||
"cases": {
|
||||
"description": "A vector of the test cases and workloads contained within the metadata file. This is their\nprimary description.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/$defs/Case"
|
||||
}
|
||||
},
|
||||
"contracts": {
|
||||
"description": "A map of all of the contracts that the test requires to run.\n\nThis is a map where the key is the name of the contract instance and the value is the\ncontract's path and ident in the file.\n\nIf any contract is to be used by the test then it must be included in here first so that the\nframework is aware of its path, compiles it, and prepares it.",
|
||||
"type": [
|
||||
"object",
|
||||
"null"
|
||||
],
|
||||
"additionalProperties": {
|
||||
"$ref": "#/$defs/ContractPathAndIdent"
|
||||
}
|
||||
},
|
||||
"libraries": {
|
||||
"description": "The set of libraries that this metadata file requires.",
|
||||
"type": [
|
||||
"object",
|
||||
"null"
|
||||
],
|
||||
"additionalProperties": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"$ref": "#/$defs/ContractInstance"
|
||||
}
|
||||
}
|
||||
},
|
||||
"modes": {
|
||||
"description": "This represents a mode that has been parsed from test metadata.\n\nMode strings can take the following form (in pseudo-regex):\n\n```text\n[YEILV][+-]? (M[0123sz])? <semver>?\n```",
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
],
|
||||
"items": {
|
||||
"$ref": "#/$defs/ParsedMode"
|
||||
}
|
||||
},
|
||||
"required_evm_version": {
|
||||
"description": "This field specifies an EVM version requirement that the test case has where the test might\nbe run of the evm version of the nodes match the evm version specified here.",
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/$defs/EvmVersionRequirement"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"compiler_directives": {
|
||||
"description": "A set of compilation directives that will be passed to the compiler whenever the contracts\nfor the test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`]\nis just a filter for when a test can run whereas this is an instruction to the compiler.",
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/$defs/CompilationDirectives"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"cases"
|
||||
],
|
||||
"$defs": {
|
||||
"VmIdentifier": {
|
||||
"description": "An enum representing the identifiers of the supported VMs.",
|
||||
"oneOf": [
|
||||
{
|
||||
"description": "The ethereum virtual machine.",
|
||||
"type": "string",
|
||||
"const": "evm"
|
||||
},
|
||||
{
|
||||
"description": "The EraVM virtual machine.",
|
||||
"type": "string",
|
||||
"const": "eravm"
|
||||
},
|
||||
{
|
||||
"description": "Polkadot's PolaVM Risc-v based virtual machine.",
|
||||
"type": "string",
|
||||
"const": "polkavm"
|
||||
}
|
||||
]
|
||||
},
|
||||
"Case": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "An optional name of the test case.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"comment": {
|
||||
"description": "An optional comment on the case which has no impact on the execution in any way.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"modes": {
|
||||
"description": "This represents a mode that has been parsed from test metadata.\n\nMode strings can take the following form (in pseudo-regex):\n\n```text\n[YEILV][+-]? (M[0123sz])? <semver>?\n```\n\nIf this is provided then it takes higher priority than the modes specified in the metadata\nfile.",
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
],
|
||||
"items": {
|
||||
"$ref": "#/$defs/ParsedMode"
|
||||
}
|
||||
},
|
||||
"inputs": {
|
||||
"description": "The set of steps to run as part of this test case.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/$defs/Step"
|
||||
}
|
||||
},
|
||||
"group": {
|
||||
"description": "An optional name of the group of tests that this test belongs to.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"expected": {
|
||||
"description": "An optional set of expectations and assertions to make about the transaction after it ran.\n\nIf this is not specified then the only assertion that will be ran is that the transaction\nwas successful.\n\nThis expectation that's on the case itself will be attached to the final step of the case.",
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/$defs/Expected"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"ignore": {
|
||||
"description": "An optional boolean which defines if the case as a whole should be ignored. If null then the\ncase will not be ignored.",
|
||||
"type": [
|
||||
"boolean",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"inputs"
|
||||
]
|
||||
},
|
||||
"ParsedMode": {
|
||||
"description": "This represents a mode that has been parsed from test metadata.\n\nMode strings can take the following form (in pseudo-regex):\n\n```text\n[YEILV][+-]? (M[0123sz])? <semver>?\n```\n\nWe can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].",
|
||||
"type": "string"
|
||||
},
|
||||
"Step": {
|
||||
"description": "A test step.\n\nA test step can be anything. It could be an invocation to a function, an assertion, or any other\naction that needs to be run or executed on the nodes used in the tests.",
|
||||
"anyOf": [
|
||||
{
|
||||
"description": "A function call or an invocation to some function on some smart contract.",
|
||||
"$ref": "#/$defs/FunctionCallStep"
|
||||
},
|
||||
{
|
||||
"description": "A step for performing a balance assertion on some account or contract.",
|
||||
"$ref": "#/$defs/BalanceAssertionStep"
|
||||
},
|
||||
{
|
||||
"description": "A step for asserting that the storage of some contract or account is empty.",
|
||||
"$ref": "#/$defs/StorageEmptyAssertionStep"
|
||||
},
|
||||
{
|
||||
"description": "A special step for repeating a bunch of steps a certain number of times.",
|
||||
"$ref": "#/$defs/RepeatStep"
|
||||
},
|
||||
{
|
||||
"description": "A step type that allows for a new account address to be allocated and to later on be used\nas the caller in another step.",
|
||||
"$ref": "#/$defs/AllocateAccountStep"
|
||||
}
|
||||
]
|
||||
},
|
||||
"FunctionCallStep": {
|
||||
"description": "This is an input step which is a transaction description that the framework translates into a\ntransaction and executes on the nodes.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"caller": {
|
||||
"description": "The address of the account performing the call and paying the fees for it.",
|
||||
"type": "string",
|
||||
"default": "0x90f8bf6a479f320ead074411a4b0e7944ea8c9c1"
|
||||
},
|
||||
"comment": {
|
||||
"description": "An optional comment on the step which has no impact on the execution in any way.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"instance": {
|
||||
"description": "The contract instance that's being called in this transaction step.",
|
||||
"$ref": "#/$defs/ContractInstance",
|
||||
"default": "Test"
|
||||
},
|
||||
"method": {
|
||||
"description": "The method that's being called in this step.",
|
||||
"$ref": "#/$defs/Method"
|
||||
},
|
||||
"calldata": {
|
||||
"description": "The calldata that the function should be invoked with.",
|
||||
"$ref": "#/$defs/Calldata",
|
||||
"default": []
|
||||
},
|
||||
"expected": {
|
||||
"description": "A set of assertions and expectations to have for the transaction.",
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/$defs/Expected"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"value": {
|
||||
"description": "An optional value to provide as part of the transaction.",
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/$defs/EtherValue"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"variable_assignments": {
|
||||
"description": "Variable assignment to perform in the framework allowing us to reference them again later on\nduring the execution.",
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/$defs/VariableAssignments"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"method"
|
||||
]
|
||||
},
|
||||
"ContractInstance": {
|
||||
"description": "Represents a contract instance found a metadata file.\n\nTypically, this is used as the key to the \"contracts\" field of metadata files.",
|
||||
"type": "string"
|
||||
},
|
||||
"Method": {
|
||||
"description": "Specify how the contract is called.",
|
||||
"anyOf": [
|
||||
{
|
||||
"description": "Initiate a deploy transaction, calling contracts constructor.\n\nIndicated by `#deployer`.",
|
||||
"type": "string",
|
||||
"const": "#deployer"
|
||||
},
|
||||
{
|
||||
"description": "Does not calculate and insert a function selector.\n\nIndicated by `#fallback`.",
|
||||
"type": "string",
|
||||
"const": "#fallback"
|
||||
},
|
||||
{
|
||||
"description": "Call the public function with the given name.",
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"Calldata": {
|
||||
"description": "A type definition for the calldata supported by the testing framework.\n\nWe choose to document all of the types used in [`Calldata`] in this one doc comment to elaborate\non why they exist and consolidate all of the documentation for calldata in a single place where\nit can be viewed and understood.\n\nThe [`Single`] variant of this enum is quite simple and straightforward: it's a hex-encoded byte\narray of the calldata.\n\nThe [`Compound`] type is more intricate and allows for capabilities such as resolution and some\nsimple arithmetic operations. It houses a vector of [`CalldataItem`]s which is just a wrapper\naround an owned string.\n\nA [`CalldataItem`] could be a simple hex string of a single calldata argument, but it could also\nbe something that requires resolution such as `MyContract.address` which is a variable that is\nunderstood by the resolution logic to mean \"Lookup the address of this particular contract\ninstance\".\n\nIn addition to the above, the format supports some simple arithmetic operations like add, sub,\ndivide, multiply, bitwise AND, bitwise OR, and bitwise XOR. Our parser understands the [reverse\npolish notation] simply because it's easy to write a calculator for that notation and since we\ndo not have plans to use arithmetic too often in tests. In reverse polish notation a typical\n`2 + 4` would be written as `2 4 +` which makes this notation very simple to implement through\na stack.\n\nCombining the above, a single [`CalldataItem`] could employ both resolution and arithmetic at\nthe same time. For example, a [`CalldataItem`] of `$BLOCK_NUMBER $BLOCK_NUMBER +` means that\nthe block number should be retrieved and then it should be added to itself.\n\nInternally, we split the [`CalldataItem`] by spaces. Therefore, `$BLOCK_NUMBER $BLOCK_NUMBER+`\nis invalid but `$BLOCK_NUMBER $BLOCK_NUMBER +` is valid and can be understood by the parser and\ncalculator. After the split is done, each token is parsed into a [`CalldataToken<&str>`] forming\nan [`Iterator`] over [`CalldataToken<&str>`]. A [`CalldataToken<&str>`] can then be resolved\ninto a [`CalldataToken<U256>`] through the resolution logic. Finally, after resolution is done,\nthis iterator of [`CalldataToken<U256>`] is collapsed into the final result by applying the\narithmetic operations requested.\n\nFor example, supplying a [`Compound`] calldata of `0xdeadbeef` produces an iterator of a single\n[`CalldataToken<&str>`] items of the value [`CalldataToken::Item`] of the string value 12 which\nwe can then resolve into the appropriate [`U256`] value and convert into calldata.\n\nIn summary, the various types used in [`Calldata`] represent the following:\n- [`CalldataItem`]: A calldata string from the metadata files.\n- [`CalldataToken<&str>`]: Typically used in an iterator of items from the space splitted\n [`CalldataItem`] and represents a token that has not yet been resolved into its value.\n- [`CalldataToken<U256>`]: Represents a token that's been resolved from being a string and into\n the word-size calldata argument on which we can perform arithmetic.\n\n[`Single`]: Calldata::Single\n[`Compound`]: Calldata::Compound\n[reverse polish notation]: https://en.wikipedia.org/wiki/Reverse_Polish_notation",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/$defs/CalldataItem"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"CalldataItem": {
|
||||
"description": "This represents an item in the [`Calldata::Compound`] variant. Each item will be resolved\naccording to the resolution rules of the tool.",
|
||||
"type": "string"
|
||||
},
|
||||
"Expected": {
|
||||
"description": "A set of expectations and assertions to make about the transaction after it ran.\n\nIf this is not specified then the only assertion that will be ran is that the transaction\nwas successful.",
|
||||
"anyOf": [
|
||||
{
|
||||
"description": "An assertion that the transaction succeeded and returned the provided set of data.",
|
||||
"$ref": "#/$defs/Calldata"
|
||||
},
|
||||
{
|
||||
"description": "A more complex assertion.",
|
||||
"$ref": "#/$defs/ExpectedOutput"
|
||||
},
|
||||
{
|
||||
"description": "A set of assertions.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/$defs/ExpectedOutput"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"ExpectedOutput": {
|
||||
"description": "A set of assertions to run on the transaction.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"compiler_version": {
|
||||
"description": "An optional compiler version that's required in order for this assertion to run.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"return_data": {
|
||||
"description": "An optional field of the expected returns from the invocation.",
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/$defs/Calldata"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"events": {
|
||||
"description": "An optional set of assertions to run on the emitted events from the transaction.",
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
],
|
||||
"items": {
|
||||
"$ref": "#/$defs/Event"
|
||||
}
|
||||
},
|
||||
"exception": {
|
||||
"description": "A boolean which defines whether we expect the transaction to succeed or fail.",
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"Event": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"address": {
|
||||
"description": "An optional field of the address of the emitter of the event.",
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/$defs/StepAddress"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"topics": {
|
||||
"description": "The set of topics to expect the event to have.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"values": {
|
||||
"description": "The set of values to expect the event to have.",
|
||||
"$ref": "#/$defs/Calldata"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"topics",
|
||||
"values"
|
||||
]
|
||||
},
|
||||
"StepAddress": {
|
||||
"description": "An address type that might either be an address literal or a resolvable address.",
|
||||
"type": "string"
|
||||
},
|
||||
"EtherValue": {
|
||||
"description": "Defines an Ether value.\n\nThis is an unsigned 256 bit integer that's followed by some denomination which can either be\neth, ether, gwei, or wei.",
|
||||
"type": "string"
|
||||
},
|
||||
"VariableAssignments": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"return_data": {
|
||||
"description": "A vector of the variable names to assign to the return data.\n\nExample: `UniswapV3PoolAddress`",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"return_data"
|
||||
]
|
||||
},
|
||||
"BalanceAssertionStep": {
|
||||
"description": "This represents a balance assertion step where the framework needs to query the balance of some\naccount or contract and assert that it's some amount.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"comment": {
|
||||
"description": "An optional comment on the balance assertion.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"address": {
|
||||
"description": "The address that the balance assertion should be done on.\n\nThis is a string which will be resolved into an address when being processed. Therefore,\nthis could be a normal hex address, a variable such as `Test.address`, or perhaps even a\nfull on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are\nfollowed in the calldata.",
|
||||
"$ref": "#/$defs/StepAddress"
|
||||
},
|
||||
"expected_balance": {
|
||||
"description": "The amount of balance to assert that the account or contract has. This is a 256 bit string\nthat's serialized and deserialized into a decimal string.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"address",
|
||||
"expected_balance"
|
||||
]
|
||||
},
|
||||
"StorageEmptyAssertionStep": {
|
||||
"description": "This represents an assertion for the storage of some contract or account and whether it's empty\nor not.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"comment": {
|
||||
"description": "An optional comment on the storage empty assertion.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"address": {
|
||||
"description": "The address that the balance assertion should be done on.\n\nThis is a string which will be resolved into an address when being processed. Therefore,\nthis could be a normal hex address, a variable such as `Test.address`, or perhaps even a\nfull on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are\nfollowed in the calldata.",
|
||||
"$ref": "#/$defs/StepAddress"
|
||||
},
|
||||
"is_storage_empty": {
|
||||
"description": "A boolean of whether the storage of the address is empty or not.",
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"address",
|
||||
"is_storage_empty"
|
||||
]
|
||||
},
|
||||
"RepeatStep": {
|
||||
"description": "This represents a repetition step which is a special step type that allows for a sequence of\nsteps to be repeated (on different drivers) a certain number of times.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"comment": {
|
||||
"description": "An optional comment on the repetition step.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"repeat": {
|
||||
"description": "The number of repetitions that the steps should be repeated for.",
|
||||
"type": "integer",
|
||||
"format": "uint",
|
||||
"minimum": 0
|
||||
},
|
||||
"steps": {
|
||||
"description": "The sequence of steps to repeat for the above defined number of repetitions.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/$defs/Step"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"repeat",
|
||||
"steps"
|
||||
]
|
||||
},
|
||||
"AllocateAccountStep": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"comment": {
|
||||
"description": "An optional comment on the account allocation step.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"allocate_account": {
|
||||
"description": "An instruction to allocate a new account with the value being the variable name of that\naccount. This must start with `$VARIABLE:` and then be followed by the variable name of the\naccount.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"allocate_account"
|
||||
]
|
||||
},
|
||||
"ContractPathAndIdent": {
|
||||
"description": "Represents an identifier used for contracts.\n\nThe type supports serialization from and into the following string format:\n\n```text\n${path}:${contract_ident}\n```",
|
||||
"type": "string"
|
||||
},
|
||||
"EvmVersionRequirement": {
|
||||
"description": "An EVM version requirement that the test case has. This gets serialized and deserialized from\nand into [`String`]. This follows a simple format of (>=|<=|=|>|<) followed by a string of the\nEVM version.\n\nWhen specified, the framework will only run the test if the node's EVM version matches that\nrequired by the metadata file.",
|
||||
"type": "string"
|
||||
},
|
||||
"CompilationDirectives": {
|
||||
"description": "A set of compilation directives that will be passed to the compiler whenever the contracts for\nthe test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`] is\njust a filter for when a test can run whereas this is an instruction to the compiler.\nDefines how the compiler should handle revert strings.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"revert_string_handling": {
|
||||
"description": "Defines how the revert strings should be handled.",
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/$defs/RevertString"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"RevertString": {
|
||||
"description": "Defines how the compiler should handle revert strings.",
|
||||
"oneOf": [
|
||||
{
|
||||
"description": "The default handling of the revert strings.",
|
||||
"type": "string",
|
||||
"const": "default"
|
||||
},
|
||||
{
|
||||
"description": "The debug handling of the revert strings.",
|
||||
"type": "string",
|
||||
"const": "debug"
|
||||
},
|
||||
{
|
||||
"description": "Strip the revert strings.",
|
||||
"type": "string",
|
||||
"const": "strip"
|
||||
},
|
||||
{
|
||||
"description": "Provide verbose debug strings for the revert string.",
|
||||
"type": "string",
|
||||
"const": "verboseDebug"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user