mirror of
https://github.com/pezkuwichain/revive-differential-tests.git
synced 2026-04-22 20:47:58 +00:00
Compare commits
30 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 49cbc51546 | |||
| c2526e48e7 | |||
| 7878f68c26 | |||
| 8b1afc36a3 | |||
| 60328cd493 | |||
| eb264fcc7b | |||
| 84b139d3b4 | |||
| d93824d973 | |||
| bec5a7e390 | |||
| 85033cfead | |||
| 76d6a154c1 | |||
| c58551803d | |||
| 185edcfad9 | |||
| 09d56f5177 | |||
| a59e287fa1 | |||
| f2045db0e9 | |||
| 5a11f44673 | |||
| 46aea0890d | |||
| 9b40c9b9e3 | |||
| f67a9bf643 | |||
| 67d767ffde | |||
| f7fbe094ec | |||
| 90b2dd4cfe | |||
| 64d63ef999 | |||
| 757bfbe116 | |||
| 8619e7feb0 | |||
| edba49b301 | |||
| 9980926d40 | |||
| ff993d44a5 | |||
| 8cbb1a9f77 |
@@ -7,3 +7,9 @@ node_modules
|
|||||||
# We do not want to commit any log files that we produce from running the code locally so this is
|
# We do not want to commit any log files that we produce from running the code locally so this is
|
||||||
# added to the .gitignore file.
|
# added to the .gitignore file.
|
||||||
*.log
|
*.log
|
||||||
|
|
||||||
|
profile.json.gz
|
||||||
|
resolc-compiler-tests
|
||||||
|
workdir
|
||||||
|
|
||||||
|
!/schema.json
|
||||||
Generated
+676
-15
File diff suppressed because it is too large
Load Diff
+16
-2
@@ -8,7 +8,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
|||||||
license = "MIT/Apache-2.0"
|
license = "MIT/Apache-2.0"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
repository = "https://github.com/paritytech/revive-differential-testing.git"
|
repository = "https://github.com/paritytech/revive-differential-testing.git"
|
||||||
rust-version = "1.85.0"
|
rust-version = "1.87.0"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
revive-dt-common = { version = "0.1.0", path = "crates/common" }
|
revive-dt-common = { version = "0.1.0", path = "crates/common" }
|
||||||
@@ -25,30 +25,42 @@ revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
|
|||||||
alloy-primitives = "1.2.1"
|
alloy-primitives = "1.2.1"
|
||||||
alloy-sol-types = "1.2.1"
|
alloy-sol-types = "1.2.1"
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
|
bson = { version = "2.15.0" }
|
||||||
|
cacache = { version = "13.1.0" }
|
||||||
clap = { version = "4", features = ["derive"] }
|
clap = { version = "4", features = ["derive"] }
|
||||||
|
dashmap = { version = "6.1.0" }
|
||||||
foundry-compilers-artifacts = { version = "0.18.0" }
|
foundry-compilers-artifacts = { version = "0.18.0" }
|
||||||
futures = { version = "0.3.31" }
|
futures = { version = "0.3.31" }
|
||||||
hex = "0.4.3"
|
hex = "0.4.3"
|
||||||
|
regex = "1"
|
||||||
|
moka = "0.12.10"
|
||||||
|
paste = "1.0.15"
|
||||||
reqwest = { version = "0.12.15", features = ["json"] }
|
reqwest = { version = "0.12.15", features = ["json"] }
|
||||||
once_cell = "1.21"
|
once_cell = "1.21"
|
||||||
|
schemars = { version = "1.0.4", features = ["semver1"] }
|
||||||
semver = { version = "1.0", features = ["serde"] }
|
semver = { version = "1.0", features = ["serde"] }
|
||||||
serde = { version = "1.0", default-features = false, features = ["derive"] }
|
serde = { version = "1.0", default-features = false, features = ["derive"] }
|
||||||
serde_json = { version = "1.0", default-features = false, features = [
|
serde_json = { version = "1.0", default-features = false, features = [
|
||||||
"arbitrary_precision",
|
"arbitrary_precision",
|
||||||
"std",
|
"std",
|
||||||
|
"unbounded_depth",
|
||||||
] }
|
] }
|
||||||
|
serde_with = { version = "3.14.0" }
|
||||||
sha2 = { version = "0.10.9" }
|
sha2 = { version = "0.10.9" }
|
||||||
sp-core = "36.1.0"
|
sp-core = "36.1.0"
|
||||||
sp-runtime = "41.1.0"
|
sp-runtime = "41.1.0"
|
||||||
|
strum = { version = "0.27.2", features = ["derive"] }
|
||||||
temp-dir = { version = "0.1.16" }
|
temp-dir = { version = "0.1.16" }
|
||||||
tempfile = "3.3"
|
tempfile = "3.3"
|
||||||
|
thiserror = "2"
|
||||||
tokio = { version = "1.47.0", default-features = false, features = [
|
tokio = { version = "1.47.0", default-features = false, features = [
|
||||||
"rt-multi-thread",
|
"rt-multi-thread",
|
||||||
"process",
|
"process",
|
||||||
"rt",
|
"rt",
|
||||||
] }
|
] }
|
||||||
uuid = { version = "1.8", features = ["v4"] }
|
uuid = { version = "1.8", features = ["v4"] }
|
||||||
tracing = "0.1.41"
|
tracing = { version = "0.1.41" }
|
||||||
|
tracing-appender = { version = "0.2.3" }
|
||||||
tracing-subscriber = { version = "0.3.19", default-features = false, features = [
|
tracing-subscriber = { version = "0.3.19", default-features = false, features = [
|
||||||
"fmt",
|
"fmt",
|
||||||
"json",
|
"json",
|
||||||
@@ -83,3 +95,5 @@ features = [
|
|||||||
inherits = "release"
|
inherits = "release"
|
||||||
lto = true
|
lto = true
|
||||||
codegen-units = 1
|
codegen-units = 1
|
||||||
|
|
||||||
|
[workspace.lints.clippy]
|
||||||
|
|||||||
@@ -1,34 +1,211 @@
|
|||||||
# revive-differential-tests
|
<div align="center">
|
||||||
|
<h1><code>Revive Differential Tests</code></h1>
|
||||||
|
|
||||||
The revive differential testing framework allows to define smart contract tests in a declarative manner in order to compile and execute them against different Ethereum-compatible blockchain implmentations. This is useful to:
|
<p>
|
||||||
- Analyze observable differences in contract compilation and execution across different blockchain implementations, including contract storage, account balances, transaction output and emitted events on a per-transaction base.
|
<strong>Differential testing for Ethereum-compatible smart contract stacks</strong>
|
||||||
- Collect and compare benchmark metrics such as code size, gas usage or transaction throughput per seconds (TPS) of different blockchain implementations.
|
</p>
|
||||||
- Ensure reproducible contract builds across multiple compiler implementations or multiple host platforms.
|
</div>
|
||||||
- Implement end-to-end regression tests for Ethereum-compatible smart contract stacks.
|
|
||||||
|
|
||||||
# Declarative test format
|
This project compiles and executes declarative smart-contract tests against multiple platforms, then compares behavior (status, return data, events, and state diffs). Today it supports:
|
||||||
|
|
||||||
For now, the format used to write tests is the [matter-labs era compiler format](https://github.com/matter-labs/era-compiler-tests?tab=readme-ov-file#matter-labs-simplecomplex-format). This allows us to re-use many tests from their corpora.
|
- Geth (EVM reference implementation)
|
||||||
|
- Revive Kitchensink (Substrate-based PolkaVM + `eth-rpc` proxy)
|
||||||
|
|
||||||
# The `retester` utility
|
Use it to:
|
||||||
|
|
||||||
The `retester` helper utilty is used to run the tests. To get an idea of what `retester` can do, please consults its command line help:
|
- Detect observable differences between platforms (execution success, logs, state changes)
|
||||||
|
- Ensure reproducible builds across compilers/hosts
|
||||||
|
- Run end-to-end regression suites
|
||||||
|
|
||||||
```
|
This framework uses the [MatterLabs tests format](https://github.com/matter-labs/era-compiler-tests/tree/main/solidity) for declarative tests which is composed of the following:
|
||||||
cargo run -p revive-dt-core -- --help
|
|
||||||
|
- Metadata files, this is akin to a module of tests in Rust.
|
||||||
|
- Each metadata file contains multiple cases, a case is akin to a Rust test where a module can contain multiple tests.
|
||||||
|
- Each case contains multiple steps and assertions, this is akin to any Rust test that contains multiple statements.
|
||||||
|
|
||||||
|
Metadata files are JSON files, but Solidity files can also be metadata files if they include inline metadata provided as a comment at the top of the contract.
|
||||||
|
|
||||||
|
All of the steps contained within each test case are either:
|
||||||
|
|
||||||
|
- Transactions that need to be submitted and assertions to run on the submitted transactions.
|
||||||
|
- Assertions on the state of the chain (e.g., account balances, storage, etc...)
|
||||||
|
|
||||||
|
All of the transactions submitted by the this tool to the test nodes follow a similar logic to what wallets do. We first use alloy to estimate the transaction fees, then we attach that to the transaction and submit it to the node and then await the transaction receipt.
|
||||||
|
|
||||||
|
This repository contains none of the tests and only contains the testing framework or the test runner. The tests can be found in the [`resolc-compiler-tests`](https://github.com/paritytech/resolc-compiler-tests) repository which is a clone of [MatterLab's test suite](https://github.com/matter-labs/era-compiler-tests) with some modifications and adjustments made to suit our use case.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
This section describes the required dependencies that this framework requires to run. Compiling this framework is pretty straightforward and no additional dependencies beyond what's specified in the `Cargo.toml` file should be required.
|
||||||
|
|
||||||
|
- Stable Rust
|
||||||
|
- Geth - When doing differential testing against the PVM we submit transactions to a Geth node and to Kitchensink to compare them.
|
||||||
|
- Kitchensink - When doing differential testing against the PVM we submit transactions to a Geth node and to Kitchensink to compare them.
|
||||||
|
- ETH-RPC - All communication with Kitchensink is done through the ETH RPC.
|
||||||
|
- Solc - This is actually a transitive dependency, while this tool doesn't require solc as it downloads the versions that it requires, resolc requires that Solc is installed and available in the path.
|
||||||
|
- Resolc - This is required to compile the contracts to PolkaVM bytecode.
|
||||||
|
|
||||||
|
All of the above need to be installed and available in the path in order for the tool to work.
|
||||||
|
|
||||||
|
## Running The Tool
|
||||||
|
|
||||||
|
This tool is being updated quite frequently. Therefore, it's recommended that you don't install the tool and then run it, but rather that you run it from the root of the directory using `cargo run --release`. The help command of the tool gives you all of the information you need to know about each of the options and flags that the tool offers.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cargo run --release -- --help
|
||||||
|
Usage: retester [OPTIONS]
|
||||||
|
|
||||||
|
Options:
|
||||||
|
-s, --solc <SOLC>
|
||||||
|
The `solc` version to use if the test didn't specify it explicitly
|
||||||
|
|
||||||
|
[default: 0.8.29]
|
||||||
|
|
||||||
|
--wasm
|
||||||
|
Use the Wasm compiler versions
|
||||||
|
|
||||||
|
-r, --resolc <RESOLC>
|
||||||
|
The path to the `resolc` executable to be tested.
|
||||||
|
|
||||||
|
By default it uses the `resolc` binary found in `$PATH`.
|
||||||
|
|
||||||
|
If `--wasm` is set, this should point to the resolc Wasm ile.
|
||||||
|
|
||||||
|
[default: resolc]
|
||||||
|
|
||||||
|
-c, --corpus <CORPUS>
|
||||||
|
A list of test corpus JSON files to be tested
|
||||||
|
|
||||||
|
-w, --workdir <WORKING_DIRECTORY>
|
||||||
|
A place to store temporary artifacts during test execution.
|
||||||
|
|
||||||
|
Creates a temporary dir if not specified.
|
||||||
|
|
||||||
|
-g, --geth <GETH>
|
||||||
|
The path to the `geth` executable.
|
||||||
|
|
||||||
|
By default it uses `geth` binary found in `$PATH`.
|
||||||
|
|
||||||
|
[default: geth]
|
||||||
|
|
||||||
|
--geth-start-timeout <GETH_START_TIMEOUT>
|
||||||
|
The maximum time in milliseconds to wait for geth to start
|
||||||
|
|
||||||
|
[default: 5000]
|
||||||
|
|
||||||
|
--genesis <GENESIS_FILE>
|
||||||
|
Configure nodes according to this genesis.json file
|
||||||
|
|
||||||
|
[default: genesis.json]
|
||||||
|
|
||||||
|
-a, --account <ACCOUNT>
|
||||||
|
The signing account private key
|
||||||
|
|
||||||
|
[default: 0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d]
|
||||||
|
|
||||||
|
--private-keys-count <PRIVATE_KEYS_TO_ADD>
|
||||||
|
This argument controls which private keys the nodes should have access to and be added to its wallet signers. With a value of N, private keys (0, N] will be added to the signer set of the node
|
||||||
|
|
||||||
|
[default: 100000]
|
||||||
|
|
||||||
|
-l, --leader <LEADER>
|
||||||
|
The differential testing leader node implementation
|
||||||
|
|
||||||
|
[default: geth]
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
- geth: The go-ethereum reference full node EVM implementation
|
||||||
|
- kitchensink: The kitchensink runtime provides the PolkaVM (PVM) based node implentation
|
||||||
|
|
||||||
|
-f, --follower <FOLLOWER>
|
||||||
|
The differential testing follower node implementation
|
||||||
|
|
||||||
|
[default: kitchensink]
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
- geth: The go-ethereum reference full node EVM implementation
|
||||||
|
- kitchensink: The kitchensink runtime provides the PolkaVM (PVM) based node implentation
|
||||||
|
|
||||||
|
--compile-only <COMPILE_ONLY>
|
||||||
|
Only compile against this testing platform (doesn't execute the tests)
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
- geth: The go-ethereum reference full node EVM implementation
|
||||||
|
- kitchensink: The kitchensink runtime provides the PolkaVM (PVM) based node implentation
|
||||||
|
|
||||||
|
--number-of-nodes <NUMBER_OF_NODES>
|
||||||
|
Determines the amount of nodes that will be spawned for each chain
|
||||||
|
|
||||||
|
[default: 1]
|
||||||
|
|
||||||
|
--number-of-threads <NUMBER_OF_THREADS>
|
||||||
|
Determines the amount of tokio worker threads that will will be used
|
||||||
|
|
||||||
|
[default: 16]
|
||||||
|
|
||||||
|
--number-concurrent-tasks <NUMBER_CONCURRENT_TASKS>
|
||||||
|
Determines the amount of concurrent tasks that will be spawned to run tests. Defaults to 10 x the number of nodes
|
||||||
|
|
||||||
|
-e, --extract-problems
|
||||||
|
Extract problems back to the test corpus
|
||||||
|
|
||||||
|
-k, --kitchensink <KITCHENSINK>
|
||||||
|
The path to the `kitchensink` executable.
|
||||||
|
|
||||||
|
By default it uses `substrate-node` binary found in `$PATH`.
|
||||||
|
|
||||||
|
[default: substrate-node]
|
||||||
|
|
||||||
|
-p, --eth_proxy <ETH_PROXY>
|
||||||
|
The path to the `eth_proxy` executable.
|
||||||
|
|
||||||
|
By default it uses `eth-rpc` binary found in `$PATH`.
|
||||||
|
|
||||||
|
[default: eth-rpc]
|
||||||
|
|
||||||
|
-i, --invalidate-compilation-cache
|
||||||
|
Controls if the compilation cache should be invalidated or not
|
||||||
|
|
||||||
|
-h, --help
|
||||||
|
Print help (see a summary with '-h')
|
||||||
```
|
```
|
||||||
|
|
||||||
For example, to run the [complex Solidity tests](https://github.com/matter-labs/era-compiler-tests/tree/main/solidity/complex), define a corpus structure as follows:
|
To run tests with this tool you need a corpus JSON file that defines the tests included in the corpus. The simplest corpus file looks like the following:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"name": "ML Solidity Complex",
|
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
|
||||||
"path": "/path/to/era-compiler-tests/solidity/complex"
|
"path": "resolc-compiler-tests/fixtures/solidity"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Assuming this to be saved in a `ml-solidity-complex.json` file, the following command will try to compile and execute the tests found inside the corpus:
|
> [!NOTE]
|
||||||
|
> Note that the tests can be found in the [`resolc-compiler-tests`](https://github.com/paritytech/resolc-compiler-tests) repository.
|
||||||
|
|
||||||
|
The above corpus file instructs the tool to look for all of the test cases contained within all of the metadata files of the specified directory.
|
||||||
|
|
||||||
|
The simplest command to run this tool is the following:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
RUST_LOG=debug cargo r --release -p revive-dt-core -- --corpus ml-solidity-complex.json
|
RUST_LOG="info" cargo run --release -- execute-tests \
|
||||||
|
--follower geth \
|
||||||
|
--corpus path_to_your_corpus_file.json \
|
||||||
|
--working-directory path_to_a_temporary_directory_to_cache_things_in \
|
||||||
|
--concurrency.number-of-nodes 5 \
|
||||||
|
> logs.log \
|
||||||
|
2> output.log
|
||||||
|
```
|
||||||
|
|
||||||
|
The above command will run the tool executing every one of the tests discovered in the path specified in the corpus file. All of the logs from the execution will be persisted in the `logs.log` file and all of the output of the tool will be persisted to the `output.log` file. If all that you're looking for is to run the tool and check which tests succeeded and failed, then the `output.log` file is what you need to be looking at. However, if you're contributing the to the tool then the `logs.log` file will be very valuable.
|
||||||
|
|
||||||
|
If you only want to run a subset of tests, then you can specify that in your corpus file. The following is an example:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
|
||||||
|
"paths": [
|
||||||
|
"path/to/a/single/metadata/file/I/want/to/run.json",
|
||||||
|
"path/to/a/directory/to/find/all/metadata/files/within"
|
||||||
|
]
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,13 +1,24 @@
|
|||||||
{
|
{
|
||||||
"modes": [
|
"modes": [
|
||||||
"Y >=0.8.9",
|
"Y >=0.8.9",
|
||||||
"E",
|
"E"
|
||||||
"I"
|
|
||||||
],
|
],
|
||||||
"cases": [
|
"cases": [
|
||||||
{
|
{
|
||||||
"name": "first",
|
"name": "first",
|
||||||
"inputs": [
|
"inputs": [
|
||||||
|
{
|
||||||
|
"address": "0xdeadbeef00000000000000000000000000000042",
|
||||||
|
"expected_balance": "1233"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"address": "0xdeadbeef00000000000000000000000000000042",
|
||||||
|
"is_storage_empty": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"address": "0xdeadbeef00000000000000000000000000000042",
|
||||||
|
"is_storage_empty": false
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"instance": "WBTC_1",
|
"instance": "WBTC_1",
|
||||||
"method": "#deployer",
|
"method": "#deployer",
|
||||||
|
|||||||
@@ -0,0 +1 @@
|
|||||||
|
|
||||||
@@ -10,4 +10,11 @@ rust-version.workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
|
moka = { workspace = true, features = ["sync"] }
|
||||||
|
once_cell = { workspace = true }
|
||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
|
serde = { workspace = true }
|
||||||
|
tokio = { workspace = true, default-features = false, features = ["time"] }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|||||||
@@ -0,0 +1,49 @@
|
|||||||
|
//! This module implements a cached file system allowing for results to be stored in-memory rather
|
||||||
|
//! rather being queried from the file system again.
|
||||||
|
|
||||||
|
use std::fs;
|
||||||
|
use std::io::{Error, Result};
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
|
use moka::sync::Cache;
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
|
|
||||||
|
pub fn read(path: impl AsRef<Path>) -> Result<Vec<u8>> {
|
||||||
|
static READ_CACHE: Lazy<Cache<PathBuf, Vec<u8>>> = Lazy::new(|| Cache::new(10_000));
|
||||||
|
|
||||||
|
let path = path.as_ref().canonicalize()?;
|
||||||
|
match READ_CACHE.get(path.as_path()) {
|
||||||
|
Some(content) => Ok(content),
|
||||||
|
None => {
|
||||||
|
let content = fs::read(path.as_path())?;
|
||||||
|
READ_CACHE.insert(path, content.clone());
|
||||||
|
Ok(content)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn read_to_string(path: impl AsRef<Path>) -> Result<String> {
|
||||||
|
let content = read(path)?;
|
||||||
|
String::from_utf8(content).map_err(|_| {
|
||||||
|
Error::new(
|
||||||
|
std::io::ErrorKind::InvalidData,
|
||||||
|
"The contents of the file are not valid UTF8",
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn read_dir(path: impl AsRef<Path>) -> Result<Box<dyn Iterator<Item = Result<PathBuf>>>> {
|
||||||
|
static READ_DIR_CACHE: Lazy<Cache<PathBuf, Vec<PathBuf>>> = Lazy::new(|| Cache::new(10_000));
|
||||||
|
|
||||||
|
let path = path.as_ref().canonicalize()?;
|
||||||
|
match READ_DIR_CACHE.get(path.as_path()) {
|
||||||
|
Some(entries) => Ok(Box::new(entries.into_iter().map(Ok)) as Box<_>),
|
||||||
|
None => {
|
||||||
|
let entries = fs::read_dir(path.as_path())?
|
||||||
|
.flat_map(|maybe_entry| maybe_entry.map(|entry| entry.path()))
|
||||||
|
.collect();
|
||||||
|
READ_DIR_CACHE.insert(path.clone(), entries);
|
||||||
|
Ok(read_dir(path).unwrap())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -3,19 +3,28 @@ use std::{
|
|||||||
path::Path,
|
path::Path,
|
||||||
};
|
};
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::{Context, Result};
|
||||||
|
|
||||||
/// This method clears the passed directory of all of the files and directories contained within
|
/// This method clears the passed directory of all of the files and directories contained within
|
||||||
/// without deleting the directory.
|
/// without deleting the directory.
|
||||||
pub fn clear_directory(path: impl AsRef<Path>) -> Result<()> {
|
pub fn clear_directory(path: impl AsRef<Path>) -> Result<()> {
|
||||||
for entry in read_dir(path.as_ref())? {
|
for entry in read_dir(path.as_ref())
|
||||||
let entry = entry?;
|
.with_context(|| format!("Failed to read directory: {}", path.as_ref().display()))?
|
||||||
|
{
|
||||||
|
let entry = entry.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to read an entry in directory: {}",
|
||||||
|
path.as_ref().display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
let entry_path = entry.path();
|
let entry_path = entry.path();
|
||||||
|
|
||||||
if entry_path.is_file() {
|
if entry_path.is_file() {
|
||||||
remove_file(entry_path)?
|
remove_file(&entry_path)
|
||||||
|
.with_context(|| format!("Failed to remove file: {}", entry_path.display()))?
|
||||||
} else {
|
} else {
|
||||||
remove_dir_all(entry_path)?
|
remove_dir_all(&entry_path)
|
||||||
|
.with_context(|| format!("Failed to remove directory: {}", entry_path.display()))?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -0,0 +1,3 @@
|
|||||||
|
mod poll;
|
||||||
|
|
||||||
|
pub use poll::*;
|
||||||
@@ -0,0 +1,72 @@
|
|||||||
|
use std::ops::ControlFlow;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use anyhow::{Context as _, Result, anyhow};
|
||||||
|
|
||||||
|
const EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION: Duration = Duration::from_secs(60);
|
||||||
|
|
||||||
|
/// A function that polls for a fallible future for some period of time and errors if it fails to
|
||||||
|
/// get a result after polling.
|
||||||
|
///
|
||||||
|
/// Given a future that returns a [`Result<ControlFlow<O, ()>>`], this function calls the future
|
||||||
|
/// repeatedly (with some wait period) until the future returns a [`ControlFlow::Break`] or until it
|
||||||
|
/// returns an [`Err`] in which case the function stops polling and returns the error.
|
||||||
|
///
|
||||||
|
/// If the future keeps returning [`ControlFlow::Continue`] and fails to return a [`Break`] within
|
||||||
|
/// the permitted polling duration then this function returns an [`Err`]
|
||||||
|
///
|
||||||
|
/// [`Break`]: ControlFlow::Break
|
||||||
|
/// [`Continue`]: ControlFlow::Continue
|
||||||
|
pub async fn poll<F, O>(
|
||||||
|
polling_duration: Duration,
|
||||||
|
polling_wait_behavior: PollingWaitBehavior,
|
||||||
|
mut future: impl FnMut() -> F,
|
||||||
|
) -> Result<O>
|
||||||
|
where
|
||||||
|
F: Future<Output = Result<ControlFlow<O, ()>>>,
|
||||||
|
{
|
||||||
|
let mut retries = 0;
|
||||||
|
let mut total_wait_duration = Duration::ZERO;
|
||||||
|
let max_allowed_wait_duration = polling_duration;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
if total_wait_duration >= max_allowed_wait_duration {
|
||||||
|
break Err(anyhow!(
|
||||||
|
"Polling failed after {} retries and a total of {:?} of wait time",
|
||||||
|
retries,
|
||||||
|
total_wait_duration
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
match future()
|
||||||
|
.await
|
||||||
|
.context("Polled future returned an error during polling loop")?
|
||||||
|
{
|
||||||
|
ControlFlow::Continue(()) => {
|
||||||
|
let next_wait_duration = match polling_wait_behavior {
|
||||||
|
PollingWaitBehavior::Constant(duration) => duration,
|
||||||
|
PollingWaitBehavior::ExponentialBackoff => {
|
||||||
|
Duration::from_secs(2u64.pow(retries))
|
||||||
|
.min(EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let next_wait_duration =
|
||||||
|
next_wait_duration.min(max_allowed_wait_duration - total_wait_duration);
|
||||||
|
total_wait_duration += next_wait_duration;
|
||||||
|
retries += 1;
|
||||||
|
|
||||||
|
tokio::time::sleep(next_wait_duration).await;
|
||||||
|
}
|
||||||
|
ControlFlow::Break(output) => {
|
||||||
|
break Ok(output);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
|
||||||
|
pub enum PollingWaitBehavior {
|
||||||
|
Constant(Duration),
|
||||||
|
#[default]
|
||||||
|
ExponentialBackoff,
|
||||||
|
}
|
||||||
@@ -0,0 +1,21 @@
|
|||||||
|
/// An iterator that could be either of two iterators.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum EitherIter<A, B> {
|
||||||
|
A(A),
|
||||||
|
B(B),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<A, B, T> Iterator for EitherIter<A, B>
|
||||||
|
where
|
||||||
|
A: Iterator<Item = T>,
|
||||||
|
B: Iterator<Item = T>,
|
||||||
|
{
|
||||||
|
type Item = T;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
match self {
|
||||||
|
EitherIter::A(iter) => iter.next(),
|
||||||
|
EitherIter::B(iter) => iter.next(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,4 +1,8 @@
|
|||||||
use std::{borrow::Cow, collections::HashSet, path::PathBuf};
|
use std::{
|
||||||
|
borrow::Cow,
|
||||||
|
collections::HashSet,
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
};
|
||||||
|
|
||||||
/// An iterator that finds files of a certain extension in the provided directory. You can think of
|
/// An iterator that finds files of a certain extension in the provided directory. You can think of
|
||||||
/// this a glob pattern similar to: `${path}/**/*.md`
|
/// this a glob pattern similar to: `${path}/**/*.md`
|
||||||
@@ -15,14 +19,20 @@ pub struct FilesWithExtensionIterator {
|
|||||||
/// this vector then they will be returned when the [`Iterator::next`] method is called. If not
|
/// this vector then they will be returned when the [`Iterator::next`] method is called. If not
|
||||||
/// then we visit one of the next directories to visit.
|
/// then we visit one of the next directories to visit.
|
||||||
files_matching_allowed_extensions: Vec<PathBuf>,
|
files_matching_allowed_extensions: Vec<PathBuf>,
|
||||||
|
|
||||||
|
/// This option controls if the the cached file system should be used or not. This could be
|
||||||
|
/// better for certain cases where the entries in the directories do not change and therefore
|
||||||
|
/// caching can be used.
|
||||||
|
use_cached_fs: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FilesWithExtensionIterator {
|
impl FilesWithExtensionIterator {
|
||||||
pub fn new(root_directory: PathBuf) -> Self {
|
pub fn new(root_directory: impl AsRef<Path>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
allowed_extensions: Default::default(),
|
allowed_extensions: Default::default(),
|
||||||
directories_to_search: vec![root_directory],
|
directories_to_search: vec![root_directory.as_ref().to_path_buf()],
|
||||||
files_matching_allowed_extensions: Default::default(),
|
files_matching_allowed_extensions: Default::default(),
|
||||||
|
use_cached_fs: Default::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -33,6 +43,11 @@ impl FilesWithExtensionIterator {
|
|||||||
self.allowed_extensions.insert(allowed_extension.into());
|
self.allowed_extensions.insert(allowed_extension.into());
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn with_use_cached_fs(mut self, use_cached_fs: bool) -> Self {
|
||||||
|
self.use_cached_fs = use_cached_fs;
|
||||||
|
self
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Iterator for FilesWithExtensionIterator {
|
impl Iterator for FilesWithExtensionIterator {
|
||||||
@@ -45,16 +60,19 @@ impl Iterator for FilesWithExtensionIterator {
|
|||||||
|
|
||||||
let directory_to_search = self.directories_to_search.pop()?;
|
let directory_to_search = self.directories_to_search.pop()?;
|
||||||
|
|
||||||
// Read all of the entries in the directory. If we failed to read this dir's entires then we
|
let iterator = if self.use_cached_fs {
|
||||||
// elect to just ignore it and look in the next directory, we do that by calling the next
|
let Ok(dir_entries) = crate::cached_fs::read_dir(directory_to_search.as_path()) else {
|
||||||
// method again on the iterator, which is an intentional decision that we made here instead
|
return self.next();
|
||||||
// of panicking.
|
};
|
||||||
|
Box::new(dir_entries) as Box<dyn Iterator<Item = std::io::Result<PathBuf>>>
|
||||||
|
} else {
|
||||||
let Ok(dir_entries) = std::fs::read_dir(directory_to_search) else {
|
let Ok(dir_entries) = std::fs::read_dir(directory_to_search) else {
|
||||||
return self.next();
|
return self.next();
|
||||||
};
|
};
|
||||||
|
Box::new(dir_entries.map(|maybe_entry| maybe_entry.map(|entry| entry.path()))) as Box<_>
|
||||||
|
};
|
||||||
|
|
||||||
for entry in dir_entries.flatten() {
|
for entry_path in iterator.flatten() {
|
||||||
let entry_path = entry.path();
|
|
||||||
if entry_path.is_dir() {
|
if entry_path.is_dir() {
|
||||||
self.directories_to_search.push(entry_path)
|
self.directories_to_search.push(entry_path)
|
||||||
} else if entry_path.is_file()
|
} else if entry_path.is_file()
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
mod either_iter;
|
||||||
mod files_with_extension_iterator;
|
mod files_with_extension_iterator;
|
||||||
|
|
||||||
|
pub use either_iter::*;
|
||||||
pub use files_with_extension_iterator::*;
|
pub use files_with_extension_iterator::*;
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
//! This crate provides common concepts, functionality, types, macros, and more that other crates in
|
//! This crate provides common concepts, functionality, types, macros, and more that other crates in
|
||||||
//! the workspace can benefit from.
|
//! the workspace can benefit from.
|
||||||
|
|
||||||
|
pub mod cached_fs;
|
||||||
pub mod fs;
|
pub mod fs;
|
||||||
|
pub mod futures;
|
||||||
pub mod iterators;
|
pub mod iterators;
|
||||||
pub mod macros;
|
pub mod macros;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
|
|||||||
@@ -1,3 +1,25 @@
|
|||||||
|
#[macro_export]
|
||||||
|
macro_rules! impl_for_wrapper {
|
||||||
|
(Display, $ident: ident) => {
|
||||||
|
#[automatically_derived]
|
||||||
|
impl std::fmt::Display for $ident {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
std::fmt::Display::fmt(&self.0, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
(FromStr, $ident: ident) => {
|
||||||
|
#[automatically_derived]
|
||||||
|
impl std::str::FromStr for $ident {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> anyhow::Result<Self> {
|
||||||
|
s.parse().map(Self).map_err(Into::into)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
/// Defines wrappers around types.
|
/// Defines wrappers around types.
|
||||||
///
|
///
|
||||||
/// For example, the macro invocation seen below:
|
/// For example, the macro invocation seen below:
|
||||||
@@ -42,7 +64,13 @@
|
|||||||
macro_rules! define_wrapper_type {
|
macro_rules! define_wrapper_type {
|
||||||
(
|
(
|
||||||
$(#[$meta: meta])*
|
$(#[$meta: meta])*
|
||||||
$vis:vis struct $ident: ident($ty: ty);
|
$vis:vis struct $ident: ident($ty: ty)
|
||||||
|
|
||||||
|
$(
|
||||||
|
impl $($trait_ident: ident),*
|
||||||
|
)?
|
||||||
|
|
||||||
|
;
|
||||||
) => {
|
) => {
|
||||||
$(#[$meta])*
|
$(#[$meta])*
|
||||||
$vis struct $ident($ty);
|
$vis struct $ident($ty);
|
||||||
@@ -98,9 +126,15 @@ macro_rules! define_wrapper_type {
|
|||||||
value.0
|
value.0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
$(
|
||||||
|
$(
|
||||||
|
$crate::macros::impl_for_wrapper!($trait_ident, $ident);
|
||||||
|
)*
|
||||||
|
)?
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Technically not needed but this allows for the macro to be found in the `macros` module of the
|
/// Technically not needed but this allows for the macro to be found in the `macros` module of the
|
||||||
/// crate in addition to being found in the root of the crate.
|
/// crate in addition to being found in the root of the crate.
|
||||||
pub use define_wrapper_type;
|
pub use {define_wrapper_type, impl_for_wrapper};
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
mod mode;
|
||||||
mod version_or_requirement;
|
mod version_or_requirement;
|
||||||
|
|
||||||
|
pub use mode::*;
|
||||||
pub use version_or_requirement::*;
|
pub use version_or_requirement::*;
|
||||||
|
|||||||
@@ -0,0 +1,173 @@
|
|||||||
|
use crate::types::VersionOrRequirement;
|
||||||
|
use semver::Version;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::fmt::Display;
|
||||||
|
use std::str::FromStr;
|
||||||
|
use std::sync::LazyLock;
|
||||||
|
|
||||||
|
/// This represents a mode that a given test should be run with, if possible.
|
||||||
|
///
|
||||||
|
/// We obtain this by taking a [`ParsedMode`], which may be looser or more strict
|
||||||
|
/// in its requirements, and then expanding it out into a list of [`Mode`]s.
|
||||||
|
///
|
||||||
|
/// Use [`ParsedMode::to_test_modes()`] to do this.
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||||
|
pub struct Mode {
|
||||||
|
pub pipeline: ModePipeline,
|
||||||
|
pub optimize_setting: ModeOptimizerSetting,
|
||||||
|
pub version: Option<semver::VersionReq>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for Mode {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
self.pipeline.fmt(f)?;
|
||||||
|
f.write_str(" ")?;
|
||||||
|
self.optimize_setting.fmt(f)?;
|
||||||
|
|
||||||
|
if let Some(version) = &self.version {
|
||||||
|
f.write_str(" ")?;
|
||||||
|
version.fmt(f)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Mode {
|
||||||
|
/// Return all of the available mode combinations.
|
||||||
|
pub fn all() -> impl Iterator<Item = &'static Mode> {
|
||||||
|
static ALL_MODES: LazyLock<Vec<Mode>> = LazyLock::new(|| {
|
||||||
|
ModePipeline::test_cases()
|
||||||
|
.flat_map(|pipeline| {
|
||||||
|
ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode {
|
||||||
|
pipeline,
|
||||||
|
optimize_setting,
|
||||||
|
version: None,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
});
|
||||||
|
ALL_MODES.iter()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resolves the [`Mode`]'s solidity version requirement into a [`VersionOrRequirement`] if
|
||||||
|
/// the requirement is present on the object. Otherwise, the passed default version is used.
|
||||||
|
pub fn compiler_version_to_use(&self, default: Version) -> VersionOrRequirement {
|
||||||
|
match self.version {
|
||||||
|
Some(ref requirement) => requirement.clone().into(),
|
||||||
|
None => default.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// What do we want the compiler to do?
|
||||||
|
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
|
||||||
|
pub enum ModePipeline {
|
||||||
|
/// Compile Solidity code via Yul IR
|
||||||
|
ViaYulIR,
|
||||||
|
/// Compile Solidity direct to assembly
|
||||||
|
ViaEVMAssembly,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for ModePipeline {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
match s {
|
||||||
|
// via Yul IR
|
||||||
|
"Y" => Ok(ModePipeline::ViaYulIR),
|
||||||
|
// Don't go via Yul IR
|
||||||
|
"E" => Ok(ModePipeline::ViaEVMAssembly),
|
||||||
|
// Anything else that we see isn't a mode at all
|
||||||
|
_ => Err(anyhow::anyhow!(
|
||||||
|
"Unsupported pipeline '{s}': expected 'Y' or 'E'"
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for ModePipeline {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
ModePipeline::ViaYulIR => f.write_str("Y"),
|
||||||
|
ModePipeline::ViaEVMAssembly => f.write_str("E"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ModePipeline {
|
||||||
|
/// Should we go via Yul IR?
|
||||||
|
pub fn via_yul_ir(&self) -> bool {
|
||||||
|
matches!(self, ModePipeline::ViaYulIR)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An iterator over the available pipelines that we'd like to test,
|
||||||
|
/// when an explicit pipeline was not specified.
|
||||||
|
pub fn test_cases() -> impl Iterator<Item = ModePipeline> + Clone {
|
||||||
|
[ModePipeline::ViaYulIR, ModePipeline::ViaEVMAssembly].into_iter()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
|
||||||
|
pub enum ModeOptimizerSetting {
|
||||||
|
/// 0 / -: Don't apply any optimizations
|
||||||
|
M0,
|
||||||
|
/// 1: Apply less than default optimizations
|
||||||
|
M1,
|
||||||
|
/// 2: Apply the default optimizations
|
||||||
|
M2,
|
||||||
|
/// 3 / +: Apply aggressive optimizations
|
||||||
|
M3,
|
||||||
|
/// s: Optimize for size
|
||||||
|
Ms,
|
||||||
|
/// z: Aggressively optimize for size
|
||||||
|
Mz,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for ModeOptimizerSetting {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
match s {
|
||||||
|
"M0" => Ok(ModeOptimizerSetting::M0),
|
||||||
|
"M1" => Ok(ModeOptimizerSetting::M1),
|
||||||
|
"M2" => Ok(ModeOptimizerSetting::M2),
|
||||||
|
"M3" => Ok(ModeOptimizerSetting::M3),
|
||||||
|
"Ms" => Ok(ModeOptimizerSetting::Ms),
|
||||||
|
"Mz" => Ok(ModeOptimizerSetting::Mz),
|
||||||
|
_ => Err(anyhow::anyhow!(
|
||||||
|
"Unsupported optimizer setting '{s}': expected 'M0', 'M1', 'M2', 'M3', 'Ms' or 'Mz'"
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for ModeOptimizerSetting {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
ModeOptimizerSetting::M0 => f.write_str("M0"),
|
||||||
|
ModeOptimizerSetting::M1 => f.write_str("M1"),
|
||||||
|
ModeOptimizerSetting::M2 => f.write_str("M2"),
|
||||||
|
ModeOptimizerSetting::M3 => f.write_str("M3"),
|
||||||
|
ModeOptimizerSetting::Ms => f.write_str("Ms"),
|
||||||
|
ModeOptimizerSetting::Mz => f.write_str("Mz"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ModeOptimizerSetting {
|
||||||
|
/// An iterator over the available optimizer settings that we'd like to test,
|
||||||
|
/// when an explicit optimizer setting was not specified.
|
||||||
|
pub fn test_cases() -> impl Iterator<Item = ModeOptimizerSetting> + Clone {
|
||||||
|
[
|
||||||
|
// No optimizations:
|
||||||
|
ModeOptimizerSetting::M0,
|
||||||
|
// Aggressive optimizations:
|
||||||
|
ModeOptimizerSetting::M3,
|
||||||
|
]
|
||||||
|
.into_iter()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Are any optimizations enabled?
|
||||||
|
pub fn optimizations_enabled(&self) -> bool {
|
||||||
|
!matches!(self, ModeOptimizerSetting::M0)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -18,9 +18,13 @@ revive-common = { workspace = true }
|
|||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
alloy-primitives = { workspace = true }
|
alloy-primitives = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
|
dashmap = { workspace = true }
|
||||||
foundry-compilers-artifacts = { workspace = true }
|
foundry-compilers-artifacts = { workspace = true }
|
||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|||||||
+79
-55
@@ -5,56 +5,70 @@
|
|||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
fs::read_to_string,
|
|
||||||
hash::Hash,
|
hash::Hash,
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
};
|
};
|
||||||
|
|
||||||
use alloy::json_abi::JsonAbi;
|
use alloy::json_abi::JsonAbi;
|
||||||
use alloy_primitives::Address;
|
use alloy_primitives::Address;
|
||||||
|
use anyhow::{Context as _, Result};
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use revive_common::EVMVersion;
|
use revive_common::EVMVersion;
|
||||||
|
use revive_dt_common::cached_fs::read_to_string;
|
||||||
use revive_dt_common::types::VersionOrRequirement;
|
use revive_dt_common::types::VersionOrRequirement;
|
||||||
use revive_dt_config::Arguments;
|
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
|
||||||
|
|
||||||
|
// Re-export this as it's a part of the compiler interface.
|
||||||
|
pub use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
|
||||||
|
|
||||||
pub mod revive_js;
|
pub mod revive_js;
|
||||||
pub mod revive_resolc;
|
pub mod revive_resolc;
|
||||||
pub mod solc;
|
pub mod solc;
|
||||||
|
|
||||||
/// A common interface for all supported Solidity compilers.
|
/// A common interface for all supported Solidity compilers.
|
||||||
pub trait SolidityCompiler {
|
pub trait SolidityCompiler: Sized {
|
||||||
/// Extra options specific to the compiler.
|
/// Instantiates a new compiler object.
|
||||||
type Options: Default + PartialEq + Eq + Hash;
|
///
|
||||||
|
/// Based on the given [`Context`] and [`VersionOrRequirement`] this function instantiates a
|
||||||
|
/// new compiler object. Certain implementations of this trait might choose to cache cache the
|
||||||
|
/// compiler objects and return the same ones over and over again.
|
||||||
|
fn new(
|
||||||
|
context: impl AsRef<SolcConfiguration>
|
||||||
|
+ AsRef<ResolcConfiguration>
|
||||||
|
+ AsRef<WorkingDirectoryConfiguration>,
|
||||||
|
version: impl Into<Option<VersionOrRequirement>>,
|
||||||
|
) -> impl Future<Output = Result<Self>>;
|
||||||
|
|
||||||
|
/// Returns the version of the compiler.
|
||||||
|
fn version(&self) -> &Version;
|
||||||
|
|
||||||
|
/// Returns the path of the compiler executable.
|
||||||
|
fn path(&self) -> &Path;
|
||||||
|
|
||||||
/// The low-level compiler interface.
|
/// The low-level compiler interface.
|
||||||
fn build(
|
fn build(&self, input: CompilerInput) -> impl Future<Output = Result<CompilerOutput>>;
|
||||||
|
|
||||||
|
/// Does the compiler support the provided mode and version settings.
|
||||||
|
fn supports_mode(
|
||||||
&self,
|
&self,
|
||||||
input: CompilerInput,
|
optimizer_setting: ModeOptimizerSetting,
|
||||||
additional_options: Self::Options,
|
pipeline: ModePipeline,
|
||||||
) -> impl Future<Output = anyhow::Result<CompilerOutput>>;
|
) -> bool;
|
||||||
|
|
||||||
fn new(solc_executable: PathBuf) -> Self;
|
|
||||||
|
|
||||||
fn get_compiler_executable(
|
|
||||||
config: &Arguments,
|
|
||||||
version: impl Into<VersionOrRequirement>,
|
|
||||||
) -> impl Future<Output = anyhow::Result<PathBuf>>;
|
|
||||||
|
|
||||||
fn version(&self) -> anyhow::Result<Version>;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The generic compilation input configuration.
|
/// The generic compilation input configuration.
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||||
pub struct CompilerInput {
|
pub struct CompilerInput {
|
||||||
pub enable_optimization: Option<bool>,
|
pub pipeline: Option<ModePipeline>,
|
||||||
pub via_ir: Option<bool>,
|
pub optimization: Option<ModeOptimizerSetting>,
|
||||||
pub evm_version: Option<EVMVersion>,
|
pub evm_version: Option<EVMVersion>,
|
||||||
pub allow_paths: Vec<PathBuf>,
|
pub allow_paths: Vec<PathBuf>,
|
||||||
pub base_path: Option<PathBuf>,
|
pub base_path: Option<PathBuf>,
|
||||||
pub sources: HashMap<PathBuf, String>,
|
pub sources: HashMap<PathBuf, String>,
|
||||||
pub libraries: HashMap<PathBuf, HashMap<String, Address>>,
|
pub libraries: HashMap<PathBuf, HashMap<String, Address>>,
|
||||||
|
pub revert_string_handling: Option<RevertString>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The generic compilation output configuration.
|
/// The generic compilation output configuration.
|
||||||
@@ -66,43 +80,34 @@ pub struct CompilerOutput {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A generic builder style interface for configuring the supported compiler options.
|
/// A generic builder style interface for configuring the supported compiler options.
|
||||||
pub struct Compiler<T: SolidityCompiler> {
|
#[derive(Default)]
|
||||||
|
pub struct Compiler {
|
||||||
input: CompilerInput,
|
input: CompilerInput,
|
||||||
additional_options: T::Options,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Compiler<solc::Solc> {
|
impl Compiler {
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Compiler<T>
|
|
||||||
where
|
|
||||||
T: SolidityCompiler,
|
|
||||||
{
|
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
input: CompilerInput {
|
input: CompilerInput {
|
||||||
enable_optimization: Default::default(),
|
pipeline: Default::default(),
|
||||||
via_ir: Default::default(),
|
optimization: Default::default(),
|
||||||
evm_version: Default::default(),
|
evm_version: Default::default(),
|
||||||
allow_paths: Default::default(),
|
allow_paths: Default::default(),
|
||||||
base_path: Default::default(),
|
base_path: Default::default(),
|
||||||
sources: Default::default(),
|
sources: Default::default(),
|
||||||
libraries: Default::default(),
|
libraries: Default::default(),
|
||||||
|
revert_string_handling: Default::default(),
|
||||||
},
|
},
|
||||||
additional_options: T::Options::default(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_optimization(mut self, value: impl Into<Option<bool>>) -> Self {
|
pub fn with_optimization(mut self, value: impl Into<Option<ModeOptimizerSetting>>) -> Self {
|
||||||
self.input.enable_optimization = value.into();
|
self.input.optimization = value.into();
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_via_ir(mut self, value: impl Into<Option<bool>>) -> Self {
|
pub fn with_pipeline(mut self, value: impl Into<Option<ModePipeline>>) -> Self {
|
||||||
self.input.via_ir = value.into();
|
self.input.pipeline = value.into();
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -121,10 +126,11 @@ where
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_source(mut self, path: impl AsRef<Path>) -> anyhow::Result<Self> {
|
pub fn with_source(mut self, path: impl AsRef<Path>) -> Result<Self> {
|
||||||
self.input
|
self.input.sources.insert(
|
||||||
.sources
|
path.as_ref().to_path_buf(),
|
||||||
.insert(path.as_ref().to_path_buf(), read_to_string(path.as_ref())?);
|
read_to_string(path.as_ref()).context("Failed to read the contract source")?,
|
||||||
|
);
|
||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -142,21 +148,39 @@ where
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_additional_options(mut self, options: impl Into<T::Options>) -> Self {
|
pub fn with_revert_string_handling(
|
||||||
self.additional_options = options.into();
|
mut self,
|
||||||
|
revert_string_handling: impl Into<Option<RevertString>>,
|
||||||
|
) -> Self {
|
||||||
|
self.input.revert_string_handling = revert_string_handling.into();
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn try_build(
|
pub fn then(self, callback: impl FnOnce(Self) -> Self) -> Self {
|
||||||
self,
|
callback(self)
|
||||||
compiler_path: impl AsRef<Path>,
|
|
||||||
) -> anyhow::Result<CompilerOutput> {
|
|
||||||
T::new(compiler_path.as_ref().to_path_buf())
|
|
||||||
.build(self.input, self.additional_options)
|
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn input(&self) -> CompilerInput {
|
pub fn try_then<E>(self, callback: impl FnOnce(Self) -> Result<Self, E>) -> Result<Self, E> {
|
||||||
self.input.clone()
|
callback(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn try_build(self, compiler: &impl SolidityCompiler) -> Result<CompilerOutput> {
|
||||||
|
compiler.build(self.input).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn input(&self) -> &CompilerInput {
|
||||||
|
&self.input
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Defines how the compiler should handle revert strings.
|
||||||
|
#[derive(
|
||||||
|
Clone, Debug, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize,
|
||||||
|
)]
|
||||||
|
pub enum RevertString {
|
||||||
|
#[default]
|
||||||
|
Default,
|
||||||
|
Debug,
|
||||||
|
Strip,
|
||||||
|
VerboseDebug,
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,53 +3,101 @@
|
|||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
process::{Command, Stdio},
|
process::Stdio,
|
||||||
|
sync::{Arc, LazyLock},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use dashmap::DashMap;
|
||||||
use revive_dt_common::types::VersionOrRequirement;
|
use revive_dt_common::types::VersionOrRequirement;
|
||||||
use revive_dt_config::Arguments;
|
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
|
||||||
use revive_solc_json_interface::{
|
use revive_solc_json_interface::{
|
||||||
SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
|
SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
|
||||||
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection,
|
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection,
|
||||||
SolcStandardJsonOutput,
|
SolcStandardJsonOutput,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{CompilerInput, CompilerOutput, SolidityCompiler};
|
use crate::{
|
||||||
|
CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler, solc::Solc,
|
||||||
|
};
|
||||||
|
|
||||||
use alloy::json_abi::JsonAbi;
|
use alloy::json_abi::JsonAbi;
|
||||||
use anyhow::Context;
|
use anyhow::{Context as _, Result};
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
|
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
|
||||||
|
|
||||||
// TODO: I believe that we need to also pass the solc compiler to resolc so that resolc uses the
|
|
||||||
// specified solc compiler. I believe that currently we completely ignore the specified solc binary
|
|
||||||
// when invoking resolc which doesn't seem right if we're using solc as a compiler frontend.
|
|
||||||
|
|
||||||
/// A wrapper around the `resolc` binary, emitting PVM-compatible bytecode.
|
/// A wrapper around the `resolc` binary, emitting PVM-compatible bytecode.
|
||||||
#[derive(Debug)]
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
pub struct Resolc {
|
pub struct Resolc(Arc<ResolcInner>);
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
|
struct ResolcInner {
|
||||||
|
/// The internal solc compiler that the resolc compiler uses as a compiler frontend.
|
||||||
|
solc: Solc,
|
||||||
/// Path to the `resolc` executable
|
/// Path to the `resolc` executable
|
||||||
resolc_path: PathBuf,
|
resolc_path: PathBuf,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SolidityCompiler for Resolc {
|
impl SolidityCompiler for Resolc {
|
||||||
type Options = Vec<String>;
|
async fn new(
|
||||||
|
context: impl AsRef<SolcConfiguration>
|
||||||
|
+ AsRef<ResolcConfiguration>
|
||||||
|
+ AsRef<WorkingDirectoryConfiguration>,
|
||||||
|
version: impl Into<Option<VersionOrRequirement>>,
|
||||||
|
) -> Result<Self> {
|
||||||
|
/// This is a cache of all of the resolc compiler objects. Since we do not currently support
|
||||||
|
/// multiple resolc compiler versions, so our cache is just keyed by the solc compiler and
|
||||||
|
/// its version to the resolc compiler.
|
||||||
|
static COMPILERS_CACHE: LazyLock<DashMap<Solc, Resolc>> = LazyLock::new(Default::default);
|
||||||
|
|
||||||
|
let resolc_configuration = AsRef::<ResolcConfiguration>::as_ref(&context);
|
||||||
|
|
||||||
|
let solc = Solc::new(&context, version)
|
||||||
|
.await
|
||||||
|
.context("Failed to create the solc compiler frontend for resolc")?;
|
||||||
|
|
||||||
|
Ok(COMPILERS_CACHE
|
||||||
|
.entry(solc.clone())
|
||||||
|
.or_insert_with(|| {
|
||||||
|
Self(Arc::new(ResolcInner {
|
||||||
|
solc,
|
||||||
|
resolc_path: resolc_configuration.path.clone(),
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn version(&self) -> &Version {
|
||||||
|
// We currently return the solc compiler version since we do not support multiple resolc
|
||||||
|
// compiler versions.
|
||||||
|
self.0.solc.version()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn path(&self) -> &std::path::Path {
|
||||||
|
&self.0.resolc_path
|
||||||
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", ret)]
|
#[tracing::instrument(level = "debug", ret)]
|
||||||
async fn build(
|
async fn build(
|
||||||
&self,
|
&self,
|
||||||
CompilerInput {
|
CompilerInput {
|
||||||
enable_optimization,
|
pipeline,
|
||||||
// Ignored and not honored since this is required for the resolc compilation.
|
optimization,
|
||||||
via_ir: _via_ir,
|
|
||||||
evm_version,
|
evm_version,
|
||||||
allow_paths,
|
allow_paths,
|
||||||
base_path,
|
base_path,
|
||||||
sources,
|
sources,
|
||||||
libraries,
|
libraries,
|
||||||
|
// TODO: this is currently not being handled since there is no way to pass it into
|
||||||
|
// resolc. So, we need to go back to this later once it's supported.
|
||||||
|
revert_string_handling: _,
|
||||||
}: CompilerInput,
|
}: CompilerInput,
|
||||||
additional_options: Self::Options,
|
) -> Result<CompilerOutput> {
|
||||||
) -> anyhow::Result<CompilerOutput> {
|
if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) {
|
||||||
|
anyhow::bail!(
|
||||||
|
"Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
let input = SolcStandardJsonInput {
|
let input = SolcStandardJsonInput {
|
||||||
language: SolcStandardJsonInputLanguage::Solidity,
|
language: SolcStandardJsonInputLanguage::Solidity,
|
||||||
sources: sources
|
sources: sources
|
||||||
@@ -78,7 +126,9 @@ impl SolidityCompiler for Resolc {
|
|||||||
output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()),
|
output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()),
|
||||||
via_ir: Some(true),
|
via_ir: Some(true),
|
||||||
optimizer: SolcStandardJsonInputSettingsOptimizer::new(
|
optimizer: SolcStandardJsonInputSettingsOptimizer::new(
|
||||||
enable_optimization.unwrap_or(false),
|
optimization
|
||||||
|
.unwrap_or(ModeOptimizerSetting::M0)
|
||||||
|
.optimizations_enabled(),
|
||||||
None,
|
None,
|
||||||
&Version::new(0, 0, 0),
|
&Version::new(0, 0, 0),
|
||||||
false,
|
false,
|
||||||
@@ -88,7 +138,7 @@ impl SolidityCompiler for Resolc {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut command = AsyncCommand::new(&self.resolc_path);
|
let mut command = AsyncCommand::new(self.path());
|
||||||
command
|
command
|
||||||
.stdin(Stdio::piped())
|
.stdin(Stdio::piped())
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
@@ -107,18 +157,28 @@ impl SolidityCompiler for Resolc {
|
|||||||
.join(","),
|
.join(","),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
let mut child = command.spawn()?;
|
let mut child = command
|
||||||
|
.spawn()
|
||||||
|
.with_context(|| format!("Failed to spawn resolc at {}", self.path().display()))?;
|
||||||
|
|
||||||
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
|
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
|
||||||
let serialized_input = serde_json::to_vec(&input)?;
|
let serialized_input = serde_json::to_vec(&input)
|
||||||
stdin_pipe.write_all(&serialized_input).await?;
|
.context("Failed to serialize Standard JSON input for resolc")?;
|
||||||
|
stdin_pipe
|
||||||
|
.write_all(&serialized_input)
|
||||||
|
.await
|
||||||
|
.context("Failed to write Standard JSON to resolc stdin")?;
|
||||||
|
|
||||||
let output = child.wait_with_output().await?;
|
let output = child
|
||||||
|
.wait_with_output()
|
||||||
|
.await
|
||||||
|
.context("Failed while waiting for resolc process to finish")?;
|
||||||
let stdout = output.stdout;
|
let stdout = output.stdout;
|
||||||
let stderr = output.stderr;
|
let stderr = output.stderr;
|
||||||
|
|
||||||
if !output.status.success() {
|
if !output.status.success() {
|
||||||
let json_in = serde_json::to_string_pretty(&input)?;
|
let json_in = serde_json::to_string_pretty(&input)
|
||||||
|
.context("Failed to pretty-print Standard JSON input for logging")?;
|
||||||
let message = String::from_utf8_lossy(&stderr);
|
let message = String::from_utf8_lossy(&stderr);
|
||||||
tracing::error!(
|
tracing::error!(
|
||||||
status = %output.status,
|
status = %output.status,
|
||||||
@@ -129,12 +189,14 @@ impl SolidityCompiler for Resolc {
|
|||||||
anyhow::bail!("Compilation failed with an error: {message}");
|
anyhow::bail!("Compilation failed with an error: {message}");
|
||||||
}
|
}
|
||||||
|
|
||||||
let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout).map_err(|e| {
|
let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout)
|
||||||
|
.map_err(|e| {
|
||||||
anyhow::anyhow!(
|
anyhow::anyhow!(
|
||||||
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
||||||
String::from_utf8_lossy(&stderr)
|
String::from_utf8_lossy(&stderr)
|
||||||
)
|
)
|
||||||
})?;
|
})
|
||||||
|
.context("Failed to parse resolc standard JSON output")?;
|
||||||
|
|
||||||
tracing::debug!(
|
tracing::debug!(
|
||||||
output = %serde_json::to_string(&parsed).unwrap(),
|
output = %serde_json::to_string(&parsed).unwrap(),
|
||||||
@@ -161,7 +223,10 @@ impl SolidityCompiler for Resolc {
|
|||||||
|
|
||||||
let mut compiler_output = CompilerOutput::default();
|
let mut compiler_output = CompilerOutput::default();
|
||||||
for (source_path, contracts) in contracts.into_iter() {
|
for (source_path, contracts) in contracts.into_iter() {
|
||||||
let source_path = PathBuf::from(source_path).canonicalize()?;
|
let src_for_msg = source_path.clone();
|
||||||
|
let source_path = PathBuf::from(source_path)
|
||||||
|
.canonicalize()
|
||||||
|
.with_context(|| format!("Failed to canonicalize path {src_for_msg}"))?;
|
||||||
|
|
||||||
let map = compiler_output.contracts.entry(source_path).or_default();
|
let map = compiler_output.contracts.entry(source_path).or_default();
|
||||||
for (contract_name, contract_information) in contracts.into_iter() {
|
for (contract_name, contract_information) in contracts.into_iter() {
|
||||||
@@ -169,23 +234,41 @@ impl SolidityCompiler for Resolc {
|
|||||||
.evm
|
.evm
|
||||||
.and_then(|evm| evm.bytecode.clone())
|
.and_then(|evm| evm.bytecode.clone())
|
||||||
.context("Unexpected - Contract compiled with resolc has no bytecode")?;
|
.context("Unexpected - Contract compiled with resolc has no bytecode")?;
|
||||||
let abi = contract_information
|
let abi = {
|
||||||
|
let metadata = contract_information
|
||||||
.metadata
|
.metadata
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|metadata| metadata.as_object())
|
.context("No metadata found for the contract")?;
|
||||||
.and_then(|metadata| metadata.get("solc_metadata"))
|
let solc_metadata_str = match metadata {
|
||||||
.and_then(|solc_metadata| solc_metadata.as_str())
|
serde_json::Value::String(solc_metadata_str) => solc_metadata_str.as_str(),
|
||||||
.and_then(|metadata| serde_json::from_str::<serde_json::Value>(metadata).ok())
|
serde_json::Value::Object(metadata_object) => {
|
||||||
.and_then(|metadata| {
|
let solc_metadata_value = metadata_object
|
||||||
metadata.get("output").and_then(|output| {
|
.get("solc_metadata")
|
||||||
output
|
.context("Contract doesn't have a 'solc_metadata' field")?;
|
||||||
.get("abi")
|
solc_metadata_value
|
||||||
.and_then(|abi| serde_json::from_value::<JsonAbi>(abi.clone()).ok())
|
.as_str()
|
||||||
})
|
.context("The 'solc_metadata' field is not a string")?
|
||||||
})
|
}
|
||||||
.context(
|
serde_json::Value::Null
|
||||||
"Unexpected - Failed to get the ABI for a contract compiled with resolc",
|
| serde_json::Value::Bool(_)
|
||||||
|
| serde_json::Value::Number(_)
|
||||||
|
| serde_json::Value::Array(_) => {
|
||||||
|
anyhow::bail!("Unsupported type of metadata {metadata:?}")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let solc_metadata =
|
||||||
|
serde_json::from_str::<serde_json::Value>(solc_metadata_str).context(
|
||||||
|
"Failed to deserialize the solc_metadata as a serde_json generic value",
|
||||||
)?;
|
)?;
|
||||||
|
let output_value = solc_metadata
|
||||||
|
.get("output")
|
||||||
|
.context("solc_metadata doesn't have an output field")?;
|
||||||
|
let abi_value = output_value
|
||||||
|
.get("abi")
|
||||||
|
.context("solc_metadata output doesn't contain an abi field")?;
|
||||||
|
serde_json::from_value::<JsonAbi>(abi_value.clone())
|
||||||
|
.context("ABI found in solc_metadata output is not valid ABI")?
|
||||||
|
};
|
||||||
map.insert(contract_name, (bytecode.object, abi));
|
map.insert(contract_name, (bytecode.object, abi));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -193,61 +276,11 @@ impl SolidityCompiler for Resolc {
|
|||||||
Ok(compiler_output)
|
Ok(compiler_output)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new(resolc_path: PathBuf) -> Self {
|
fn supports_mode(
|
||||||
Resolc { resolc_path }
|
&self,
|
||||||
}
|
optimize_setting: ModeOptimizerSetting,
|
||||||
|
pipeline: ModePipeline,
|
||||||
async fn get_compiler_executable(
|
) -> bool {
|
||||||
config: &Arguments,
|
pipeline == ModePipeline::ViaYulIR && self.0.solc.supports_mode(optimize_setting, pipeline)
|
||||||
_version: impl Into<VersionOrRequirement>,
|
|
||||||
) -> anyhow::Result<PathBuf> {
|
|
||||||
if !config.resolc.as_os_str().is_empty() {
|
|
||||||
return Ok(config.resolc.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(PathBuf::from("resolc"))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn version(&self) -> anyhow::Result<semver::Version> {
|
|
||||||
// Logic for parsing the resolc version from the following string:
|
|
||||||
// Solidity frontend for the revive compiler version 0.3.0+commit.b238913.llvm-18.1.8
|
|
||||||
|
|
||||||
let output = Command::new(self.resolc_path.as_path())
|
|
||||||
.arg("--version")
|
|
||||||
.stdout(Stdio::piped())
|
|
||||||
.spawn()?
|
|
||||||
.wait_with_output()?
|
|
||||||
.stdout;
|
|
||||||
let output = String::from_utf8_lossy(&output);
|
|
||||||
let version_string = output
|
|
||||||
.split("version ")
|
|
||||||
.nth(1)
|
|
||||||
.context("Version parsing failed")?
|
|
||||||
.split("+")
|
|
||||||
.next()
|
|
||||||
.context("Version parsing failed")?;
|
|
||||||
|
|
||||||
Version::parse(version_string).map_err(Into::into)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn compiler_version_can_be_obtained() {
|
|
||||||
// Arrange
|
|
||||||
let args = Arguments::default();
|
|
||||||
let path = Resolc::get_compiler_executable(&args, Version::new(0, 7, 6))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
let compiler = Resolc::new(path);
|
|
||||||
|
|
||||||
// Act
|
|
||||||
let version = compiler.version();
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
let _ = version.expect("Failed to get version");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
+122
-80
@@ -3,16 +3,18 @@
|
|||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
process::{Command, Stdio},
|
process::Stdio,
|
||||||
|
sync::{Arc, LazyLock},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use dashmap::DashMap;
|
||||||
use revive_dt_common::types::VersionOrRequirement;
|
use revive_dt_common::types::VersionOrRequirement;
|
||||||
use revive_dt_config::Arguments;
|
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
|
||||||
use revive_dt_solc_binaries::download_solc;
|
use revive_dt_solc_binaries::download_solc;
|
||||||
|
|
||||||
use crate::{CompilerInput, CompilerOutput, SolidityCompiler};
|
use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
|
||||||
|
|
||||||
use anyhow::Context;
|
use anyhow::{Context as _, Result};
|
||||||
use foundry_compilers_artifacts::{
|
use foundry_compilers_artifacts::{
|
||||||
output_selection::{
|
output_selection::{
|
||||||
BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection,
|
BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection,
|
||||||
@@ -23,28 +25,88 @@ use foundry_compilers_artifacts::{
|
|||||||
use semver::Version;
|
use semver::Version;
|
||||||
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
|
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
pub struct Solc {
|
pub struct Solc(Arc<SolcInner>);
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
|
struct SolcInner {
|
||||||
|
/// The path of the solidity compiler executable that this object uses.
|
||||||
solc_path: PathBuf,
|
solc_path: PathBuf,
|
||||||
|
/// The version of the solidity compiler executable that this object uses.
|
||||||
|
solc_version: Version,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SolidityCompiler for Solc {
|
impl SolidityCompiler for Solc {
|
||||||
type Options = ();
|
async fn new(
|
||||||
|
context: impl AsRef<SolcConfiguration>
|
||||||
|
+ AsRef<ResolcConfiguration>
|
||||||
|
+ AsRef<WorkingDirectoryConfiguration>,
|
||||||
|
version: impl Into<Option<VersionOrRequirement>>,
|
||||||
|
) -> Result<Self> {
|
||||||
|
// This is a cache for the compiler objects so that whenever the same compiler version is
|
||||||
|
// requested the same object is returned. We do this as we do not want to keep cloning the
|
||||||
|
// compiler around.
|
||||||
|
static COMPILERS_CACHE: LazyLock<DashMap<(PathBuf, Version), Solc>> =
|
||||||
|
LazyLock::new(Default::default);
|
||||||
|
|
||||||
|
let working_directory_configuration =
|
||||||
|
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
||||||
|
let solc_configuration = AsRef::<SolcConfiguration>::as_ref(&context);
|
||||||
|
|
||||||
|
// We attempt to download the solc binary. Note the following: this call does the version
|
||||||
|
// resolution for us. Therefore, even if the download didn't proceed, this function will
|
||||||
|
// resolve the version requirement into a canonical version of the compiler. It's then up
|
||||||
|
// to us to either use the provided path or not.
|
||||||
|
let version = version
|
||||||
|
.into()
|
||||||
|
.unwrap_or_else(|| solc_configuration.version.clone().into());
|
||||||
|
let (version, path) =
|
||||||
|
download_solc(working_directory_configuration.as_path(), version, false)
|
||||||
|
.await
|
||||||
|
.context("Failed to download/get path to solc binary")?;
|
||||||
|
|
||||||
|
Ok(COMPILERS_CACHE
|
||||||
|
.entry((path.clone(), version.clone()))
|
||||||
|
.or_insert_with(|| {
|
||||||
|
Self(Arc::new(SolcInner {
|
||||||
|
solc_path: path,
|
||||||
|
solc_version: version,
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn version(&self) -> &Version {
|
||||||
|
&self.0.solc_version
|
||||||
|
}
|
||||||
|
|
||||||
|
fn path(&self) -> &std::path::Path {
|
||||||
|
&self.0.solc_path
|
||||||
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", ret)]
|
#[tracing::instrument(level = "debug", ret)]
|
||||||
async fn build(
|
async fn build(
|
||||||
&self,
|
&self,
|
||||||
CompilerInput {
|
CompilerInput {
|
||||||
enable_optimization,
|
pipeline,
|
||||||
via_ir,
|
optimization,
|
||||||
evm_version,
|
evm_version,
|
||||||
allow_paths,
|
allow_paths,
|
||||||
base_path,
|
base_path,
|
||||||
sources,
|
sources,
|
||||||
libraries,
|
libraries,
|
||||||
|
revert_string_handling,
|
||||||
}: CompilerInput,
|
}: CompilerInput,
|
||||||
_: Self::Options,
|
) -> Result<CompilerOutput> {
|
||||||
) -> anyhow::Result<CompilerOutput> {
|
// Be careful to entirely omit the viaIR field if the compiler does not support it,
|
||||||
|
// as it will error if you provide fields it does not know about. Because
|
||||||
|
// `supports_mode` is called prior to instantiating a compiler, we should never
|
||||||
|
// ask for something which is invalid.
|
||||||
|
let via_ir = match (pipeline, self.compiler_supports_yul()) {
|
||||||
|
(pipeline, true) => pipeline.map(|p| p.via_yul_ir()),
|
||||||
|
(_pipeline, false) => None,
|
||||||
|
};
|
||||||
|
|
||||||
let input = SolcInput {
|
let input = SolcInput {
|
||||||
language: SolcLanguage::Solidity,
|
language: SolcLanguage::Solidity,
|
||||||
sources: Sources(
|
sources: Sources(
|
||||||
@@ -55,7 +117,7 @@ impl SolidityCompiler for Solc {
|
|||||||
),
|
),
|
||||||
settings: Settings {
|
settings: Settings {
|
||||||
optimizer: Optimizer {
|
optimizer: Optimizer {
|
||||||
enabled: enable_optimization,
|
enabled: optimization.map(|o| o.optimizations_enabled()),
|
||||||
details: Some(Default::default()),
|
details: Some(Default::default()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
@@ -87,11 +149,20 @@ impl SolidityCompiler for Solc {
|
|||||||
})
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
},
|
},
|
||||||
|
debug: revert_string_handling.map(|revert_string_handling| DebuggingSettings {
|
||||||
|
revert_strings: match revert_string_handling {
|
||||||
|
crate::RevertString::Default => Some(RevertStrings::Default),
|
||||||
|
crate::RevertString::Debug => Some(RevertStrings::Debug),
|
||||||
|
crate::RevertString::Strip => Some(RevertStrings::Strip),
|
||||||
|
crate::RevertString::VerboseDebug => Some(RevertStrings::VerboseDebug),
|
||||||
|
},
|
||||||
|
debug_info: Default::default(),
|
||||||
|
}),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut command = AsyncCommand::new(&self.solc_path);
|
let mut command = AsyncCommand::new(self.path());
|
||||||
command
|
command
|
||||||
.stdin(Stdio::piped())
|
.stdin(Stdio::piped())
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
@@ -110,15 +181,25 @@ impl SolidityCompiler for Solc {
|
|||||||
.join(","),
|
.join(","),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
let mut child = command.spawn()?;
|
let mut child = command
|
||||||
|
.spawn()
|
||||||
|
.with_context(|| format!("Failed to spawn solc at {}", self.path().display()))?;
|
||||||
|
|
||||||
let stdin = child.stdin.as_mut().expect("should be piped");
|
let stdin = child.stdin.as_mut().expect("should be piped");
|
||||||
let serialized_input = serde_json::to_vec(&input)?;
|
let serialized_input = serde_json::to_vec(&input)
|
||||||
stdin.write_all(&serialized_input).await?;
|
.context("Failed to serialize Standard JSON input for solc")?;
|
||||||
let output = child.wait_with_output().await?;
|
stdin
|
||||||
|
.write_all(&serialized_input)
|
||||||
|
.await
|
||||||
|
.context("Failed to write Standard JSON to solc stdin")?;
|
||||||
|
let output = child
|
||||||
|
.wait_with_output()
|
||||||
|
.await
|
||||||
|
.context("Failed while waiting for solc process to finish")?;
|
||||||
|
|
||||||
if !output.status.success() {
|
if !output.status.success() {
|
||||||
let json_in = serde_json::to_string_pretty(&input)?;
|
let json_in = serde_json::to_string_pretty(&input)
|
||||||
|
.context("Failed to pretty-print Standard JSON input for logging")?;
|
||||||
let message = String::from_utf8_lossy(&output.stderr);
|
let message = String::from_utf8_lossy(&output.stderr);
|
||||||
tracing::error!(
|
tracing::error!(
|
||||||
status = %output.status,
|
status = %output.status,
|
||||||
@@ -129,12 +210,14 @@ impl SolidityCompiler for Solc {
|
|||||||
anyhow::bail!("Compilation failed with an error: {message}");
|
anyhow::bail!("Compilation failed with an error: {message}");
|
||||||
}
|
}
|
||||||
|
|
||||||
let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout).map_err(|e| {
|
let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout)
|
||||||
|
.map_err(|e| {
|
||||||
anyhow::anyhow!(
|
anyhow::anyhow!(
|
||||||
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
||||||
String::from_utf8_lossy(&output.stdout)
|
String::from_utf8_lossy(&output.stdout)
|
||||||
)
|
)
|
||||||
})?;
|
})
|
||||||
|
.context("Failed to parse solc standard JSON output")?;
|
||||||
|
|
||||||
// Detecting if the compiler output contained errors and reporting them through logs and
|
// Detecting if the compiler output contained errors and reporting them through logs and
|
||||||
// errors instead of returning the compiler output that might contain errors.
|
// errors instead of returning the compiler output that might contain errors.
|
||||||
@@ -154,7 +237,12 @@ impl SolidityCompiler for Solc {
|
|||||||
for (contract_path, contracts) in parsed.contracts {
|
for (contract_path, contracts) in parsed.contracts {
|
||||||
let map = compiler_output
|
let map = compiler_output
|
||||||
.contracts
|
.contracts
|
||||||
.entry(contract_path.canonicalize()?)
|
.entry(contract_path.canonicalize().with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to canonicalize contract path {}",
|
||||||
|
contract_path.display()
|
||||||
|
)
|
||||||
|
})?)
|
||||||
.or_default();
|
.or_default();
|
||||||
for (contract_name, contract_info) in contracts.into_iter() {
|
for (contract_name, contract_info) in contracts.into_iter() {
|
||||||
let source_code = contract_info
|
let source_code = contract_info
|
||||||
@@ -175,67 +263,21 @@ impl SolidityCompiler for Solc {
|
|||||||
Ok(compiler_output)
|
Ok(compiler_output)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new(solc_path: PathBuf) -> Self {
|
fn supports_mode(
|
||||||
Self { solc_path }
|
&self,
|
||||||
}
|
_optimize_setting: ModeOptimizerSetting,
|
||||||
|
pipeline: ModePipeline,
|
||||||
async fn get_compiler_executable(
|
) -> bool {
|
||||||
config: &Arguments,
|
// solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support mode E
|
||||||
version: impl Into<VersionOrRequirement>,
|
// (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler is new enough.
|
||||||
) -> anyhow::Result<PathBuf> {
|
pipeline == ModePipeline::ViaEVMAssembly
|
||||||
let path = download_solc(config.directory(), version, config.wasm).await?;
|
|| (pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul())
|
||||||
Ok(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn version(&self) -> anyhow::Result<semver::Version> {
|
|
||||||
// The following is the parsing code for the version from the solc version strings which
|
|
||||||
// look like the following:
|
|
||||||
// ```
|
|
||||||
// solc, the solidity compiler commandline interface
|
|
||||||
// Version: 0.8.30+commit.73712a01.Darwin.appleclang
|
|
||||||
// ```
|
|
||||||
|
|
||||||
let child = Command::new(self.solc_path.as_path())
|
|
||||||
.arg("--version")
|
|
||||||
.stdout(Stdio::piped())
|
|
||||||
.spawn()?;
|
|
||||||
let output = child.wait_with_output()?;
|
|
||||||
let output = String::from_utf8_lossy(&output.stdout);
|
|
||||||
let version_line = output
|
|
||||||
.split("Version: ")
|
|
||||||
.nth(1)
|
|
||||||
.context("Version parsing failed")?;
|
|
||||||
let version_string = version_line
|
|
||||||
.split("+")
|
|
||||||
.next()
|
|
||||||
.context("Version parsing failed")?;
|
|
||||||
|
|
||||||
Version::parse(version_string).map_err(Into::into)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
impl Solc {
|
||||||
mod test {
|
fn compiler_supports_yul(&self) -> bool {
|
||||||
use super::*;
|
const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13);
|
||||||
|
self.version() >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR
|
||||||
#[tokio::test]
|
|
||||||
async fn compiler_version_can_be_obtained() {
|
|
||||||
// Arrange
|
|
||||||
let args = Arguments::default();
|
|
||||||
println!("Getting compiler path");
|
|
||||||
let path = Solc::get_compiler_executable(&args, Version::new(0, 7, 6))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
println!("Got compiler path");
|
|
||||||
let compiler = Solc::new(path);
|
|
||||||
|
|
||||||
// Act
|
|
||||||
let version = compiler.version();
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
assert_eq!(
|
|
||||||
version.expect("Failed to get version"),
|
|
||||||
Version::new(0, 7, 6)
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,25 +1,25 @@
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use revive_dt_common::types::VersionOrRequirement;
|
||||||
use revive_dt_compiler::{Compiler, SolidityCompiler, revive_resolc::Resolc, solc::Solc};
|
use revive_dt_compiler::{Compiler, SolidityCompiler, revive_resolc::Resolc, solc::Solc};
|
||||||
use revive_dt_config::Arguments;
|
use revive_dt_config::ExecutionContext;
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn contracts_can_be_compiled_with_solc() {
|
async fn contracts_can_be_compiled_with_solc() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let args = Arguments::default();
|
let args = ExecutionContext::default();
|
||||||
let compiler_path = Solc::get_compiler_executable(&args, Version::new(0, 8, 30))
|
let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
println!("About to assert");
|
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
let output = Compiler::<Solc>::new()
|
let output = Compiler::new()
|
||||||
.with_source("./tests/assets/array_one_element/callable.sol")
|
.with_source("./tests/assets/array_one_element/callable.sol")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.with_source("./tests/assets/array_one_element/main.sol")
|
.with_source("./tests/assets/array_one_element/main.sol")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.try_build(compiler_path)
|
.try_build(&solc)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
@@ -49,18 +49,18 @@ async fn contracts_can_be_compiled_with_solc() {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn contracts_can_be_compiled_with_resolc() {
|
async fn contracts_can_be_compiled_with_resolc() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let args = Arguments::default();
|
let args = ExecutionContext::default();
|
||||||
let compiler_path = Resolc::get_compiler_executable(&args, Version::new(0, 8, 30))
|
let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
let output = Compiler::<Resolc>::new()
|
let output = Compiler::new()
|
||||||
.with_source("./tests/assets/array_one_element/callable.sol")
|
.with_source("./tests/assets/array_one_element/callable.sol")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.with_source("./tests/assets/array_one_element/main.sol")
|
.with_source("./tests/assets/array_one_element/main.sol")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.try_build(compiler_path)
|
.try_build(&resolc)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
|
|||||||
@@ -10,8 +10,13 @@ rust-version.workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
|
anyhow = { workspace = true }
|
||||||
clap = { workspace = true }
|
clap = { workspace = true }
|
||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
temp-dir = { workspace = true }
|
temp-dir = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
|
serde_json = { workspace = true }
|
||||||
|
strum = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|||||||
+500
-118
@@ -2,183 +2,565 @@
|
|||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
fmt::Display,
|
fmt::Display,
|
||||||
|
fs::read_to_string,
|
||||||
|
ops::Deref,
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
sync::LazyLock,
|
str::FromStr,
|
||||||
|
sync::{Arc, LazyLock, OnceLock},
|
||||||
|
time::Duration,
|
||||||
};
|
};
|
||||||
|
|
||||||
use alloy::{network::EthereumWallet, signers::local::PrivateKeySigner};
|
use alloy::{
|
||||||
use clap::{Parser, ValueEnum};
|
genesis::Genesis,
|
||||||
|
hex::ToHexExt,
|
||||||
|
network::EthereumWallet,
|
||||||
|
primitives::{FixedBytes, U256},
|
||||||
|
signers::local::PrivateKeySigner,
|
||||||
|
};
|
||||||
|
use clap::{Parser, ValueEnum, ValueHint};
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Serialize, Serializer};
|
||||||
|
use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
|
||||||
use temp_dir::TempDir;
|
use temp_dir::TempDir;
|
||||||
|
|
||||||
#[derive(Debug, Parser, Clone, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Parser, Serialize)]
|
||||||
#[command(name = "retester")]
|
#[command(name = "retester")]
|
||||||
pub struct Arguments {
|
pub enum Context {
|
||||||
/// The `solc` version to use if the test didn't specify it explicitly.
|
/// Executes tests in the MatterLabs format differentially against a leader and a follower.
|
||||||
#[arg(long = "solc", short, default_value = "0.8.29")]
|
ExecuteTests(Box<ExecutionContext>),
|
||||||
pub solc: Version,
|
/// Exports the JSON schema of the MatterLabs test format used by the tool.
|
||||||
|
ExportJsonSchema,
|
||||||
|
}
|
||||||
|
|
||||||
/// Use the Wasm compiler versions.
|
impl Context {
|
||||||
#[arg(long = "wasm")]
|
pub fn working_directory_configuration(&self) -> &WorkingDirectoryConfiguration {
|
||||||
pub wasm: bool,
|
self.as_ref()
|
||||||
|
}
|
||||||
|
|
||||||
/// The path to the `resolc` executable to be tested.
|
pub fn report_configuration(&self) -> &ReportConfiguration {
|
||||||
|
self.as_ref()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<WorkingDirectoryConfiguration> for Context {
|
||||||
|
fn as_ref(&self) -> &WorkingDirectoryConfiguration {
|
||||||
|
match self {
|
||||||
|
Context::ExecuteTests(execution_context) => &execution_context.working_directory,
|
||||||
|
Context::ExportJsonSchema => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<ReportConfiguration> for Context {
|
||||||
|
fn as_ref(&self) -> &ReportConfiguration {
|
||||||
|
match self {
|
||||||
|
Context::ExecuteTests(execution_context) => &execution_context.report_configuration,
|
||||||
|
Context::ExportJsonSchema => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Parser, Serialize)]
|
||||||
|
pub struct ExecutionContext {
|
||||||
|
/// The working directory that the program will use for all of the temporary artifacts needed at
|
||||||
|
/// runtime.
|
||||||
///
|
///
|
||||||
/// By default it uses the `resolc` binary found in `$PATH`.
|
/// If not specified, then a temporary directory will be created and used by the program for all
|
||||||
///
|
/// temporary artifacts.
|
||||||
/// If `--wasm` is set, this should point to the resolc Wasm ile.
|
#[clap(
|
||||||
#[arg(long = "resolc", short, default_value = "resolc")]
|
short,
|
||||||
pub resolc: PathBuf,
|
long,
|
||||||
|
default_value = "",
|
||||||
|
value_hint = ValueHint::DirPath,
|
||||||
|
)]
|
||||||
|
pub working_directory: WorkingDirectoryConfiguration,
|
||||||
|
|
||||||
|
/// The differential testing leader node implementation.
|
||||||
|
#[arg(short, long = "leader", default_value_t = TestingPlatform::Geth)]
|
||||||
|
pub leader: TestingPlatform,
|
||||||
|
|
||||||
|
/// The differential testing follower node implementation.
|
||||||
|
#[arg(short, long = "follower", default_value_t = TestingPlatform::Kitchensink)]
|
||||||
|
pub follower: TestingPlatform,
|
||||||
|
|
||||||
/// A list of test corpus JSON files to be tested.
|
/// A list of test corpus JSON files to be tested.
|
||||||
#[arg(long = "corpus", short)]
|
#[arg(long = "corpus", short)]
|
||||||
pub corpus: Vec<PathBuf>,
|
pub corpus: Vec<PathBuf>,
|
||||||
|
|
||||||
/// A place to store temporary artifacts during test execution.
|
/// Configuration parameters for the solc compiler.
|
||||||
///
|
#[clap(flatten, next_help_heading = "Solc Configuration")]
|
||||||
/// Creates a temporary dir if not specified.
|
pub solc_configuration: SolcConfiguration,
|
||||||
#[arg(long = "workdir", short)]
|
|
||||||
pub working_directory: Option<PathBuf>,
|
|
||||||
|
|
||||||
/// Add a tempdir manually if `working_directory` was not given.
|
/// Configuration parameters for the resolc compiler.
|
||||||
|
#[clap(flatten, next_help_heading = "Resolc Configuration")]
|
||||||
|
pub resolc_configuration: ResolcConfiguration,
|
||||||
|
|
||||||
|
/// Configuration parameters for the geth node.
|
||||||
|
#[clap(flatten, next_help_heading = "Geth Configuration")]
|
||||||
|
pub geth_configuration: GethConfiguration,
|
||||||
|
|
||||||
|
/// Configuration parameters for the Kitchensink.
|
||||||
|
#[clap(flatten, next_help_heading = "Kitchensink Configuration")]
|
||||||
|
pub kitchensink_configuration: KitchensinkConfiguration,
|
||||||
|
|
||||||
|
/// Configuration parameters for the Revive Dev Node.
|
||||||
|
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
|
||||||
|
pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
|
||||||
|
|
||||||
|
/// Configuration parameters for the Eth Rpc.
|
||||||
|
#[clap(flatten, next_help_heading = "Eth RPC Configuration")]
|
||||||
|
pub eth_rpc_configuration: EthRpcConfiguration,
|
||||||
|
|
||||||
|
/// Configuration parameters for the genesis.
|
||||||
|
#[clap(flatten, next_help_heading = "Genesis Configuration")]
|
||||||
|
pub genesis_configuration: GenesisConfiguration,
|
||||||
|
|
||||||
|
/// Configuration parameters for the wallet.
|
||||||
|
#[clap(flatten, next_help_heading = "Wallet Configuration")]
|
||||||
|
pub wallet_configuration: WalletConfiguration,
|
||||||
|
|
||||||
|
/// Configuration parameters for concurrency.
|
||||||
|
#[clap(flatten, next_help_heading = "Concurrency Configuration")]
|
||||||
|
pub concurrency_configuration: ConcurrencyConfiguration,
|
||||||
|
|
||||||
|
/// Configuration parameters for the compilers and compilation.
|
||||||
|
#[clap(flatten, next_help_heading = "Compilation Configuration")]
|
||||||
|
pub compilation_configuration: CompilationConfiguration,
|
||||||
|
|
||||||
|
/// Configuration parameters for the report.
|
||||||
|
#[clap(flatten, next_help_heading = "Report Configuration")]
|
||||||
|
pub report_configuration: ReportConfiguration,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ExecutionContext {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::parse_from(["execution-context"])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<WorkingDirectoryConfiguration> for ExecutionContext {
|
||||||
|
fn as_ref(&self) -> &WorkingDirectoryConfiguration {
|
||||||
|
&self.working_directory
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<SolcConfiguration> for ExecutionContext {
|
||||||
|
fn as_ref(&self) -> &SolcConfiguration {
|
||||||
|
&self.solc_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<ResolcConfiguration> for ExecutionContext {
|
||||||
|
fn as_ref(&self) -> &ResolcConfiguration {
|
||||||
|
&self.resolc_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<GethConfiguration> for ExecutionContext {
|
||||||
|
fn as_ref(&self) -> &GethConfiguration {
|
||||||
|
&self.geth_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<KitchensinkConfiguration> for ExecutionContext {
|
||||||
|
fn as_ref(&self) -> &KitchensinkConfiguration {
|
||||||
|
&self.kitchensink_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<ReviveDevNodeConfiguration> for ExecutionContext {
|
||||||
|
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
|
||||||
|
&self.revive_dev_node_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<EthRpcConfiguration> for ExecutionContext {
|
||||||
|
fn as_ref(&self) -> &EthRpcConfiguration {
|
||||||
|
&self.eth_rpc_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<GenesisConfiguration> for ExecutionContext {
|
||||||
|
fn as_ref(&self) -> &GenesisConfiguration {
|
||||||
|
&self.genesis_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<WalletConfiguration> for ExecutionContext {
|
||||||
|
fn as_ref(&self) -> &WalletConfiguration {
|
||||||
|
&self.wallet_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<ConcurrencyConfiguration> for ExecutionContext {
|
||||||
|
fn as_ref(&self) -> &ConcurrencyConfiguration {
|
||||||
|
&self.concurrency_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<CompilationConfiguration> for ExecutionContext {
|
||||||
|
fn as_ref(&self) -> &CompilationConfiguration {
|
||||||
|
&self.compilation_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<ReportConfiguration> for ExecutionContext {
|
||||||
|
fn as_ref(&self) -> &ReportConfiguration {
|
||||||
|
&self.report_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A set of configuration parameters for Solc.
|
||||||
|
#[derive(Clone, Debug, Parser, Serialize)]
|
||||||
|
pub struct SolcConfiguration {
|
||||||
|
/// Specifies the default version of the Solc compiler that should be used if there is no
|
||||||
|
/// override specified by one of the test cases.
|
||||||
|
#[clap(long = "solc.version", default_value = "0.8.29")]
|
||||||
|
pub version: Version,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A set of configuration parameters for Resolc.
|
||||||
|
#[derive(Clone, Debug, Parser, Serialize)]
|
||||||
|
pub struct ResolcConfiguration {
|
||||||
|
/// Specifies the path of the resolc compiler to be used by the tool.
|
||||||
///
|
///
|
||||||
/// We attach it here because [TempDir] prunes itself on drop.
|
/// If this is not specified, then the tool assumes that it should use the resolc binary that's
|
||||||
|
/// provided in the user's $PATH.
|
||||||
|
#[clap(id = "resolc.path", long = "resolc.path", default_value = "resolc")]
|
||||||
|
pub path: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A set of configuration parameters for Geth.
|
||||||
|
#[derive(Clone, Debug, Parser, Serialize)]
|
||||||
|
pub struct GethConfiguration {
|
||||||
|
/// Specifies the path of the geth node to be used by the tool.
|
||||||
|
///
|
||||||
|
/// If this is not specified, then the tool assumes that it should use the geth binary that's
|
||||||
|
/// provided in the user's $PATH.
|
||||||
|
#[clap(id = "geth.path", long = "geth.path", default_value = "geth")]
|
||||||
|
pub path: PathBuf,
|
||||||
|
|
||||||
|
/// The amount of time to wait upon startup before considering that the node timed out.
|
||||||
|
#[clap(
|
||||||
|
id = "geth.start-timeout-ms",
|
||||||
|
long = "geth.start-timeout-ms",
|
||||||
|
default_value = "5000",
|
||||||
|
value_parser = parse_duration
|
||||||
|
)]
|
||||||
|
pub start_timeout_ms: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A set of configuration parameters for Kitchensink.
|
||||||
|
#[derive(Clone, Debug, Parser, Serialize)]
|
||||||
|
pub struct KitchensinkConfiguration {
|
||||||
|
/// Specifies the path of the kitchensink node to be used by the tool.
|
||||||
|
///
|
||||||
|
/// If this is not specified, then the tool assumes that it should use the kitchensink binary
|
||||||
|
/// that's provided in the user's $PATH.
|
||||||
|
#[clap(
|
||||||
|
id = "kitchensink.path",
|
||||||
|
long = "kitchensink.path",
|
||||||
|
default_value = "substrate-node"
|
||||||
|
)]
|
||||||
|
pub path: PathBuf,
|
||||||
|
|
||||||
|
/// The amount of time to wait upon startup before considering that the node timed out.
|
||||||
|
#[clap(
|
||||||
|
id = "kitchensink.start-timeout-ms",
|
||||||
|
long = "kitchensink.start-timeout-ms",
|
||||||
|
default_value = "5000",
|
||||||
|
value_parser = parse_duration
|
||||||
|
)]
|
||||||
|
pub start_timeout_ms: Duration,
|
||||||
|
|
||||||
|
/// This configures the tool to use Kitchensink instead of using the revive-dev-node.
|
||||||
|
#[clap(long = "kitchensink.dont-use-dev-node")]
|
||||||
|
pub use_kitchensink: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A set of configuration parameters for the revive dev node.
|
||||||
|
#[derive(Clone, Debug, Parser, Serialize)]
|
||||||
|
pub struct ReviveDevNodeConfiguration {
|
||||||
|
/// Specifies the path of the revive dev node to be used by the tool.
|
||||||
|
///
|
||||||
|
/// If this is not specified, then the tool assumes that it should use the revive dev node binary
|
||||||
|
/// that's provided in the user's $PATH.
|
||||||
|
#[clap(
|
||||||
|
id = "revive-dev-node.path",
|
||||||
|
long = "revive-dev-node.path",
|
||||||
|
default_value = "revive-dev-node"
|
||||||
|
)]
|
||||||
|
pub path: PathBuf,
|
||||||
|
|
||||||
|
/// The amount of time to wait upon startup before considering that the node timed out.
|
||||||
|
#[clap(
|
||||||
|
id = "revive-dev-node.start-timeout-ms",
|
||||||
|
long = "revive-dev-node.start-timeout-ms",
|
||||||
|
default_value = "5000",
|
||||||
|
value_parser = parse_duration
|
||||||
|
)]
|
||||||
|
pub start_timeout_ms: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A set of configuration parameters for the ETH RPC.
|
||||||
|
#[derive(Clone, Debug, Parser, Serialize)]
|
||||||
|
pub struct EthRpcConfiguration {
|
||||||
|
/// Specifies the path of the ETH RPC to be used by the tool.
|
||||||
|
///
|
||||||
|
/// If this is not specified, then the tool assumes that it should use the ETH RPC binary
|
||||||
|
/// that's provided in the user's $PATH.
|
||||||
|
#[clap(id = "eth-rpc.path", long = "eth-rpc.path", default_value = "eth-rpc")]
|
||||||
|
pub path: PathBuf,
|
||||||
|
|
||||||
|
/// The amount of time to wait upon startup before considering that the node timed out.
|
||||||
|
#[clap(
|
||||||
|
id = "eth-rpc.start-timeout-ms",
|
||||||
|
long = "eth-rpc.start-timeout-ms",
|
||||||
|
default_value = "5000",
|
||||||
|
value_parser = parse_duration
|
||||||
|
)]
|
||||||
|
pub start_timeout_ms: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A set of configuration parameters for the genesis.
|
||||||
|
#[derive(Clone, Debug, Parser, Serialize)]
|
||||||
|
pub struct GenesisConfiguration {
|
||||||
|
/// Specifies the path of the genesis file to use for the nodes that are started.
|
||||||
|
///
|
||||||
|
/// This is expected to be the path of a JSON geth genesis file.
|
||||||
|
#[clap(id = "genesis.path", long = "genesis.path")]
|
||||||
|
path: Option<PathBuf>,
|
||||||
|
|
||||||
|
/// The genesis object found at the provided path.
|
||||||
#[clap(skip)]
|
#[clap(skip)]
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
pub temp_dir: Option<&'static TempDir>,
|
genesis: OnceLock<Genesis>,
|
||||||
|
}
|
||||||
|
|
||||||
/// The path to the `geth` executable.
|
impl GenesisConfiguration {
|
||||||
///
|
pub fn genesis(&self) -> anyhow::Result<&Genesis> {
|
||||||
/// By default it uses `geth` binary found in `$PATH`.
|
static DEFAULT_GENESIS: LazyLock<Genesis> = LazyLock::new(|| {
|
||||||
#[arg(short, long = "geth", default_value = "geth")]
|
let genesis = include_str!("../../../genesis.json");
|
||||||
pub geth: PathBuf,
|
serde_json::from_str(genesis).unwrap()
|
||||||
|
});
|
||||||
|
|
||||||
/// The maximum time in milliseconds to wait for geth to start.
|
match self.genesis.get() {
|
||||||
#[arg(long = "geth-start-timeout", default_value = "5000")]
|
Some(genesis) => Ok(genesis),
|
||||||
pub geth_start_timeout: u64,
|
None => {
|
||||||
|
let genesis = match self.path.as_ref() {
|
||||||
|
Some(genesis_path) => {
|
||||||
|
let genesis_content = read_to_string(genesis_path)?;
|
||||||
|
serde_json::from_str(genesis_content.as_str())?
|
||||||
|
}
|
||||||
|
None => DEFAULT_GENESIS.clone(),
|
||||||
|
};
|
||||||
|
Ok(self.genesis.get_or_init(|| genesis))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The test network chain ID.
|
/// A set of configuration parameters for the wallet.
|
||||||
#[arg(short, long = "network-id", default_value = "420420420")]
|
#[derive(Clone, Debug, Parser, Serialize)]
|
||||||
pub network_id: u64,
|
pub struct WalletConfiguration {
|
||||||
|
/// The private key of the default signer.
|
||||||
/// Configure nodes according to this genesis.json file.
|
#[clap(
|
||||||
#[arg(long = "genesis", default_value = "genesis.json")]
|
long = "wallet.default-private-key",
|
||||||
pub genesis_file: PathBuf,
|
|
||||||
|
|
||||||
/// The signing account private key.
|
|
||||||
#[arg(
|
|
||||||
short,
|
|
||||||
long = "account",
|
|
||||||
default_value = "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d"
|
default_value = "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d"
|
||||||
)]
|
)]
|
||||||
pub account: String,
|
#[serde(serialize_with = "serialize_private_key")]
|
||||||
|
default_key: PrivateKeySigner,
|
||||||
|
|
||||||
/// This argument controls which private keys the nodes should have access to and be added to
|
/// This argument controls which private keys the nodes should have access to and be added to
|
||||||
/// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set
|
/// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set
|
||||||
/// of the node.
|
/// of the node.
|
||||||
#[arg(long = "private-keys-count", default_value_t = 15_000)]
|
#[clap(long = "wallet.additional-keys", default_value_t = 100_000)]
|
||||||
pub private_keys_to_add: usize,
|
additional_keys: usize,
|
||||||
|
|
||||||
/// The differential testing leader node implementation.
|
/// The wallet object that will be used.
|
||||||
#[arg(short, long = "leader", default_value = "geth")]
|
#[clap(skip)]
|
||||||
pub leader: TestingPlatform,
|
#[serde(skip)]
|
||||||
|
wallet: OnceLock<Arc<EthereumWallet>>,
|
||||||
|
}
|
||||||
|
|
||||||
/// The differential testing follower node implementation.
|
impl WalletConfiguration {
|
||||||
#[arg(short, long = "follower", default_value = "kitchensink")]
|
pub fn wallet(&self) -> Arc<EthereumWallet> {
|
||||||
pub follower: TestingPlatform,
|
self.wallet
|
||||||
|
.get_or_init(|| {
|
||||||
|
let mut wallet = EthereumWallet::new(self.default_key.clone());
|
||||||
|
for signer in (1..=self.additional_keys)
|
||||||
|
.map(|id| U256::from(id))
|
||||||
|
.map(|id| id.to_be_bytes::<32>())
|
||||||
|
.map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap())
|
||||||
|
{
|
||||||
|
wallet.register_signer(signer);
|
||||||
|
}
|
||||||
|
Arc::new(wallet)
|
||||||
|
})
|
||||||
|
.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Only compile against this testing platform (doesn't execute the tests).
|
fn serialize_private_key<S>(value: &PrivateKeySigner, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
#[arg(long = "compile-only")]
|
where
|
||||||
pub compile_only: Option<TestingPlatform>,
|
S: Serializer,
|
||||||
|
{
|
||||||
|
value.to_bytes().encode_hex().serialize(serializer)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A set of configuration for concurrency.
|
||||||
|
#[derive(Clone, Debug, Parser, Serialize)]
|
||||||
|
pub struct ConcurrencyConfiguration {
|
||||||
/// Determines the amount of nodes that will be spawned for each chain.
|
/// Determines the amount of nodes that will be spawned for each chain.
|
||||||
#[arg(long, default_value = "1")]
|
#[clap(long = "concurrency.number-of-nodes", default_value_t = 5)]
|
||||||
pub number_of_nodes: usize,
|
pub number_of_nodes: usize,
|
||||||
|
|
||||||
/// Determines the amount of threads that will will be used.
|
/// Determines the amount of tokio worker threads that will will be used.
|
||||||
#[arg(long, default_value = "12")]
|
#[arg(
|
||||||
|
long = "concurrency.number-of-threads",
|
||||||
|
default_value_t = std::thread::available_parallelism()
|
||||||
|
.map(|n| n.get())
|
||||||
|
.unwrap_or(1)
|
||||||
|
)]
|
||||||
pub number_of_threads: usize,
|
pub number_of_threads: usize,
|
||||||
|
|
||||||
/// Extract problems back to the test corpus.
|
/// Determines the amount of concurrent tasks that will be spawned to run tests.
|
||||||
#[arg(short, long = "extract-problems")]
|
|
||||||
pub extract_problems: bool,
|
|
||||||
|
|
||||||
/// The path to the `kitchensink` executable.
|
|
||||||
///
|
///
|
||||||
/// By default it uses `substrate-node` binary found in `$PATH`.
|
/// Defaults to 10 x the number of nodes.
|
||||||
#[arg(short, long = "kitchensink", default_value = "substrate-node")]
|
#[arg(long = "concurrency.number-of-concurrent-tasks")]
|
||||||
pub kitchensink: PathBuf,
|
number_concurrent_tasks: Option<usize>,
|
||||||
|
|
||||||
/// The path to the `eth_proxy` executable.
|
/// Determines if the concurrency limit should be ignored or not.
|
||||||
///
|
#[arg(long = "concurrency.ignore-concurrency-limit")]
|
||||||
/// By default it uses `eth-rpc` binary found in `$PATH`.
|
ignore_concurrency_limit: bool,
|
||||||
#[arg(short = 'p', long = "eth_proxy", default_value = "eth-rpc")]
|
|
||||||
pub eth_proxy: PathBuf,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Arguments {
|
impl ConcurrencyConfiguration {
|
||||||
/// Return the configured working directory with the following precedence:
|
pub fn concurrency_limit(&self) -> Option<usize> {
|
||||||
/// 1. `self.working_directory` if it was provided.
|
match self.ignore_concurrency_limit {
|
||||||
/// 2. `self.temp_dir` if it it was provided
|
true => None,
|
||||||
/// 3. Panic.
|
false => Some(
|
||||||
pub fn directory(&self) -> &Path {
|
self.number_concurrent_tasks
|
||||||
if let Some(path) = &self.working_directory {
|
.unwrap_or(20 * self.number_of_nodes),
|
||||||
return path.as_path();
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(temp_dir) = &self.temp_dir {
|
|
||||||
return temp_dir.path();
|
|
||||||
}
|
|
||||||
|
|
||||||
panic!("should have a workdir configured")
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Try to parse `self.account` into a [PrivateKeySigner],
|
|
||||||
/// panicing on error.
|
|
||||||
pub fn wallet(&self) -> EthereumWallet {
|
|
||||||
let signer = self
|
|
||||||
.account
|
|
||||||
.parse::<PrivateKeySigner>()
|
|
||||||
.unwrap_or_else(|error| {
|
|
||||||
panic!("private key '{}' parsing error: {error}", self.account);
|
|
||||||
});
|
|
||||||
EthereumWallet::new(signer)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Arguments {
|
#[derive(Clone, Debug, Parser, Serialize)]
|
||||||
|
pub struct CompilationConfiguration {
|
||||||
|
/// Controls if the compilation cache should be invalidated or not.
|
||||||
|
#[arg(long = "compilation.invalidate-cache")]
|
||||||
|
pub invalidate_compilation_cache: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Parser, Serialize)]
|
||||||
|
pub struct ReportConfiguration {
|
||||||
|
/// Controls if the compiler input is included in the final report.
|
||||||
|
#[clap(long = "report.include-compiler-input")]
|
||||||
|
pub include_compiler_input: bool,
|
||||||
|
|
||||||
|
/// Controls if the compiler output is included in the final report.
|
||||||
|
#[clap(long = "report.include-compiler-output")]
|
||||||
|
pub include_compiler_output: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Represents the working directory that the program uses.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum WorkingDirectoryConfiguration {
|
||||||
|
/// A temporary directory is used as the working directory. This will be removed when dropped.
|
||||||
|
TemporaryDirectory(Arc<TempDir>),
|
||||||
|
/// A directory with a path is used as the working directory.
|
||||||
|
Path(PathBuf),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WorkingDirectoryConfiguration {
|
||||||
|
pub fn as_path(&self) -> &Path {
|
||||||
|
self.as_ref()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deref for WorkingDirectoryConfiguration {
|
||||||
|
type Target = Path;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
self.as_path()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<Path> for WorkingDirectoryConfiguration {
|
||||||
|
fn as_ref(&self) -> &Path {
|
||||||
|
match self {
|
||||||
|
WorkingDirectoryConfiguration::TemporaryDirectory(temp_dir) => temp_dir.path(),
|
||||||
|
WorkingDirectoryConfiguration::Path(path) => path.as_path(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for WorkingDirectoryConfiguration {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
static TEMP_DIR: LazyLock<TempDir> = LazyLock::new(|| TempDir::new().unwrap());
|
TempDir::new()
|
||||||
|
.map(Arc::new)
|
||||||
|
.map(Self::TemporaryDirectory)
|
||||||
|
.expect("Failed to create the temporary directory")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let default = Arguments::parse_from(["retester"]);
|
impl FromStr for WorkingDirectoryConfiguration {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
|
||||||
Arguments {
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
temp_dir: Some(&TEMP_DIR),
|
match s {
|
||||||
..default
|
"" => Ok(Default::default()),
|
||||||
|
_ => Ok(Self::Path(PathBuf::from(s))),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Display for WorkingDirectoryConfiguration {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
Display::fmt(&self.as_path().display(), f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for WorkingDirectoryConfiguration {
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: serde::Serializer,
|
||||||
|
{
|
||||||
|
self.as_path().serialize(serializer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_duration(s: &str) -> anyhow::Result<Duration> {
|
||||||
|
u64::from_str(s)
|
||||||
|
.map(Duration::from_millis)
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
|
|
||||||
/// The Solidity compatible node implementation.
|
/// The Solidity compatible node implementation.
|
||||||
///
|
///
|
||||||
/// This describes the solutions to be tested against on a high level.
|
/// This describes the solutions to be tested against on a high level.
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, ValueEnum, Serialize, Deserialize,
|
Clone,
|
||||||
|
Copy,
|
||||||
|
Debug,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
PartialOrd,
|
||||||
|
Ord,
|
||||||
|
Hash,
|
||||||
|
Serialize,
|
||||||
|
ValueEnum,
|
||||||
|
EnumString,
|
||||||
|
Display,
|
||||||
|
AsRefStr,
|
||||||
|
IntoStaticStr,
|
||||||
)]
|
)]
|
||||||
#[clap(rename_all = "lower")]
|
#[strum(serialize_all = "kebab-case")]
|
||||||
pub enum TestingPlatform {
|
pub enum TestingPlatform {
|
||||||
/// The go-ethereum reference full node EVM implementation.
|
/// The go-ethereum reference full node EVM implementation.
|
||||||
Geth,
|
Geth,
|
||||||
/// The kitchensink runtime provides the PolkaVM (PVM) based node implentation.
|
/// The kitchensink runtime provides the PolkaVM (PVM) based node implementation.
|
||||||
Kitchensink,
|
Kitchensink,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Display for TestingPlatform {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
match self {
|
|
||||||
Self::Geth => f.write_str("geth"),
|
|
||||||
Self::Kitchensink => f.write_str("revive"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -23,11 +23,19 @@ revive-dt-report = { workspace = true }
|
|||||||
|
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
|
bson = { workspace = true }
|
||||||
|
cacache = { workspace = true }
|
||||||
clap = { workspace = true }
|
clap = { workspace = true }
|
||||||
futures = { workspace = true }
|
futures = { workspace = true }
|
||||||
indexmap = { workspace = true }
|
indexmap = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
|
tracing-appender = { workspace = true }
|
||||||
tracing-subscriber = { workspace = true }
|
tracing-subscriber = { workspace = true }
|
||||||
|
schemars = { workspace = true }
|
||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
temp-dir = { workspace = true }
|
serde = { workspace = true }
|
||||||
|
serde_json = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|||||||
@@ -0,0 +1,359 @@
|
|||||||
|
//! A wrapper around the compiler which allows for caching of compilation artifacts so that they can
|
||||||
|
//! be reused between runs.
|
||||||
|
|
||||||
|
use std::{
|
||||||
|
borrow::Cow,
|
||||||
|
collections::HashMap,
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
|
use futures::FutureExt;
|
||||||
|
use revive_dt_common::iterators::FilesWithExtensionIterator;
|
||||||
|
use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler};
|
||||||
|
use revive_dt_config::TestingPlatform;
|
||||||
|
use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata};
|
||||||
|
|
||||||
|
use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address};
|
||||||
|
use anyhow::{Context as _, Error, Result};
|
||||||
|
use revive_dt_report::ExecutionSpecificReporter;
|
||||||
|
use semver::Version;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tokio::sync::{Mutex, RwLock};
|
||||||
|
use tracing::{Instrument, debug, debug_span, instrument};
|
||||||
|
|
||||||
|
use crate::Platform;
|
||||||
|
|
||||||
|
pub struct CachedCompiler<'a> {
|
||||||
|
/// The cache that stores the compiled contracts.
|
||||||
|
artifacts_cache: ArtifactsCache,
|
||||||
|
|
||||||
|
/// This is a mechanism that the cached compiler uses so that if multiple compilation requests
|
||||||
|
/// come in for the same contract we never compile all of them and only compile it once and all
|
||||||
|
/// other tasks that request this same compilation concurrently get the cached version.
|
||||||
|
cache_key_lock: RwLock<HashMap<CacheKey<'a>, Arc<Mutex<()>>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> CachedCompiler<'a> {
|
||||||
|
pub async fn new(path: impl AsRef<Path>, invalidate_cache: bool) -> Result<Self> {
|
||||||
|
let mut cache = ArtifactsCache::new(path);
|
||||||
|
if invalidate_cache {
|
||||||
|
cache = cache
|
||||||
|
.with_invalidated_cache()
|
||||||
|
.await
|
||||||
|
.context("Failed to invalidate compilation cache directory")?;
|
||||||
|
}
|
||||||
|
Ok(Self {
|
||||||
|
artifacts_cache: cache,
|
||||||
|
cache_key_lock: Default::default(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compiles or gets the compilation artifacts from the cache.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
#[instrument(
|
||||||
|
level = "debug",
|
||||||
|
skip_all,
|
||||||
|
fields(
|
||||||
|
metadata_file_path = %metadata_file_path.display(),
|
||||||
|
%mode,
|
||||||
|
platform = P::config_id().to_string()
|
||||||
|
),
|
||||||
|
err
|
||||||
|
)]
|
||||||
|
pub async fn compile_contracts<P: Platform>(
|
||||||
|
&self,
|
||||||
|
metadata: &'a Metadata,
|
||||||
|
metadata_file_path: &'a Path,
|
||||||
|
mode: Cow<'a, Mode>,
|
||||||
|
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||||
|
compiler: &P::Compiler,
|
||||||
|
reporter: &ExecutionSpecificReporter,
|
||||||
|
) -> Result<CompilerOutput> {
|
||||||
|
let cache_key = CacheKey {
|
||||||
|
platform_key: P::config_id(),
|
||||||
|
compiler_version: compiler.version().clone(),
|
||||||
|
metadata_file_path,
|
||||||
|
solc_mode: mode.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let compilation_callback = || {
|
||||||
|
async move {
|
||||||
|
compile_contracts::<P>(
|
||||||
|
metadata
|
||||||
|
.directory()
|
||||||
|
.context("Failed to get metadata directory while preparing compilation")?,
|
||||||
|
metadata
|
||||||
|
.files_to_compile()
|
||||||
|
.context("Failed to enumerate files to compile from metadata")?,
|
||||||
|
&mode,
|
||||||
|
deployed_libraries,
|
||||||
|
compiler,
|
||||||
|
reporter,
|
||||||
|
)
|
||||||
|
.map(|compilation_result| compilation_result.map(CacheValue::new))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
.instrument(debug_span!(
|
||||||
|
"Running compilation for the cache key",
|
||||||
|
cache_key.platform_key = %cache_key.platform_key,
|
||||||
|
cache_key.compiler_version = %cache_key.compiler_version,
|
||||||
|
cache_key.metadata_file_path = %cache_key.metadata_file_path.display(),
|
||||||
|
cache_key.solc_mode = %cache_key.solc_mode,
|
||||||
|
))
|
||||||
|
};
|
||||||
|
|
||||||
|
let compiled_contracts = match deployed_libraries {
|
||||||
|
// If deployed libraries have been specified then we will re-compile the contract as it
|
||||||
|
// means that linking is required in this case.
|
||||||
|
Some(_) => {
|
||||||
|
debug!("Deployed libraries defined, recompilation must take place");
|
||||||
|
debug!("Cache miss");
|
||||||
|
compilation_callback()
|
||||||
|
.await
|
||||||
|
.context("Compilation callback for deployed libraries failed")?
|
||||||
|
.compiler_output
|
||||||
|
}
|
||||||
|
// If no deployed libraries are specified then we can follow the cached flow and attempt
|
||||||
|
// to lookup the compilation artifacts in the cache.
|
||||||
|
None => {
|
||||||
|
debug!("Deployed libraries undefined, attempting to make use of cache");
|
||||||
|
|
||||||
|
// Lock this specific cache key such that we do not get inconsistent state. We want
|
||||||
|
// that when multiple cases come in asking for the compilation artifacts then they
|
||||||
|
// don't all trigger a compilation if there's a cache miss. Hence, the lock here.
|
||||||
|
let read_guard = self.cache_key_lock.read().await;
|
||||||
|
let mutex = match read_guard.get(&cache_key).cloned() {
|
||||||
|
Some(value) => {
|
||||||
|
drop(read_guard);
|
||||||
|
value
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
drop(read_guard);
|
||||||
|
self.cache_key_lock
|
||||||
|
.write()
|
||||||
|
.await
|
||||||
|
.entry(cache_key.clone())
|
||||||
|
.or_default()
|
||||||
|
.clone()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let _guard = mutex.lock().await;
|
||||||
|
|
||||||
|
match self.artifacts_cache.get(&cache_key).await {
|
||||||
|
Some(cache_value) => {
|
||||||
|
if deployed_libraries.is_some() {
|
||||||
|
reporter
|
||||||
|
.report_post_link_contracts_compilation_succeeded_event(
|
||||||
|
compiler.version().clone(),
|
||||||
|
compiler.path(),
|
||||||
|
true,
|
||||||
|
None,
|
||||||
|
cache_value.compiler_output.clone(),
|
||||||
|
)
|
||||||
|
.expect("Can't happen");
|
||||||
|
} else {
|
||||||
|
reporter
|
||||||
|
.report_pre_link_contracts_compilation_succeeded_event(
|
||||||
|
compiler.version().clone(),
|
||||||
|
compiler.path(),
|
||||||
|
true,
|
||||||
|
None,
|
||||||
|
cache_value.compiler_output.clone(),
|
||||||
|
)
|
||||||
|
.expect("Can't happen");
|
||||||
|
}
|
||||||
|
cache_value.compiler_output
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
compilation_callback()
|
||||||
|
.await
|
||||||
|
.context("Compilation callback failed (cache miss path)")?
|
||||||
|
.compiler_output
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(compiled_contracts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn compile_contracts<P: Platform>(
|
||||||
|
metadata_directory: impl AsRef<Path>,
|
||||||
|
mut files_to_compile: impl Iterator<Item = PathBuf>,
|
||||||
|
mode: &Mode,
|
||||||
|
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||||
|
compiler: &P::Compiler,
|
||||||
|
reporter: &ExecutionSpecificReporter,
|
||||||
|
) -> Result<CompilerOutput> {
|
||||||
|
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
|
||||||
|
.with_allowed_extension("sol")
|
||||||
|
.with_use_cached_fs(true)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let compilation = Compiler::new()
|
||||||
|
.with_allow_path(metadata_directory)
|
||||||
|
// Handling the modes
|
||||||
|
.with_optimization(mode.optimize_setting)
|
||||||
|
.with_pipeline(mode.pipeline)
|
||||||
|
// Adding the contract sources to the compiler.
|
||||||
|
.try_then(|compiler| {
|
||||||
|
files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path))
|
||||||
|
})?
|
||||||
|
// Adding the deployed libraries to the compiler.
|
||||||
|
.then(|compiler| {
|
||||||
|
deployed_libraries
|
||||||
|
.iter()
|
||||||
|
.flat_map(|value| value.iter())
|
||||||
|
.map(|(instance, (ident, address, abi))| (instance, ident, address, abi))
|
||||||
|
.flat_map(|(_, ident, address, _)| {
|
||||||
|
all_sources_in_dir
|
||||||
|
.iter()
|
||||||
|
.map(move |path| (ident, address, path))
|
||||||
|
})
|
||||||
|
.fold(compiler, |compiler, (ident, address, path)| {
|
||||||
|
compiler.with_library(path, ident.as_str(), *address)
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
|
let input = compilation.input().clone();
|
||||||
|
let output = compilation.try_build(compiler).await;
|
||||||
|
|
||||||
|
match (output.as_ref(), deployed_libraries.is_some()) {
|
||||||
|
(Ok(output), true) => {
|
||||||
|
reporter
|
||||||
|
.report_post_link_contracts_compilation_succeeded_event(
|
||||||
|
compiler.version().clone(),
|
||||||
|
compiler.path(),
|
||||||
|
false,
|
||||||
|
input,
|
||||||
|
output.clone(),
|
||||||
|
)
|
||||||
|
.expect("Can't happen");
|
||||||
|
}
|
||||||
|
(Ok(output), false) => {
|
||||||
|
reporter
|
||||||
|
.report_pre_link_contracts_compilation_succeeded_event(
|
||||||
|
compiler.version().clone(),
|
||||||
|
compiler.path(),
|
||||||
|
false,
|
||||||
|
input,
|
||||||
|
output.clone(),
|
||||||
|
)
|
||||||
|
.expect("Can't happen");
|
||||||
|
}
|
||||||
|
(Err(err), true) => {
|
||||||
|
reporter
|
||||||
|
.report_post_link_contracts_compilation_failed_event(
|
||||||
|
compiler.version().clone(),
|
||||||
|
compiler.path().to_path_buf(),
|
||||||
|
input,
|
||||||
|
format!("{err:#}"),
|
||||||
|
)
|
||||||
|
.expect("Can't happen");
|
||||||
|
}
|
||||||
|
(Err(err), false) => {
|
||||||
|
reporter
|
||||||
|
.report_pre_link_contracts_compilation_failed_event(
|
||||||
|
compiler.version().clone(),
|
||||||
|
compiler.path().to_path_buf(),
|
||||||
|
input,
|
||||||
|
format!("{err:#}"),
|
||||||
|
)
|
||||||
|
.expect("Can't happen");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ArtifactsCache {
|
||||||
|
path: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ArtifactsCache {
|
||||||
|
pub fn new(path: impl AsRef<Path>) -> Self {
|
||||||
|
Self {
|
||||||
|
path: path.as_ref().to_path_buf(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "debug", skip_all, err)]
|
||||||
|
pub async fn with_invalidated_cache(self) -> Result<Self> {
|
||||||
|
cacache::clear(self.path.as_path())
|
||||||
|
.await
|
||||||
|
.map_err(Into::<Error>::into)
|
||||||
|
.with_context(|| format!("Failed to clear cache at {}", self.path.display()))?;
|
||||||
|
Ok(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "debug", skip_all, err)]
|
||||||
|
pub async fn insert(&self, key: &CacheKey<'_>, value: &CacheValue) -> Result<()> {
|
||||||
|
let key = bson::to_vec(key).context("Failed to serialize cache key (bson)")?;
|
||||||
|
let value = bson::to_vec(value).context("Failed to serialize cache value (bson)")?;
|
||||||
|
cacache::write(self.path.as_path(), key.encode_hex(), value)
|
||||||
|
.await
|
||||||
|
.with_context(|| {
|
||||||
|
format!("Failed to write cache entry under {}", self.path.display())
|
||||||
|
})?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get(&self, key: &CacheKey<'_>) -> Option<CacheValue> {
|
||||||
|
let key = bson::to_vec(key).ok()?;
|
||||||
|
let value = cacache::read(self.path.as_path(), key.encode_hex())
|
||||||
|
.await
|
||||||
|
.ok()?;
|
||||||
|
let value = bson::from_slice::<CacheValue>(&value).ok()?;
|
||||||
|
Some(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "debug", skip_all, err)]
|
||||||
|
pub async fn get_or_insert_with(
|
||||||
|
&self,
|
||||||
|
key: &CacheKey<'_>,
|
||||||
|
callback: impl AsyncFnOnce() -> Result<CacheValue>,
|
||||||
|
) -> Result<CacheValue> {
|
||||||
|
match self.get(key).await {
|
||||||
|
Some(value) => {
|
||||||
|
debug!("Cache hit");
|
||||||
|
Ok(value)
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
debug!("Cache miss");
|
||||||
|
let value = callback().await?;
|
||||||
|
self.insert(key, &value).await?;
|
||||||
|
Ok(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)]
|
||||||
|
struct CacheKey<'a> {
|
||||||
|
/// The platform name that this artifact was compiled for. For example, this could be EVM or
|
||||||
|
/// PVM.
|
||||||
|
platform_key: &'a TestingPlatform,
|
||||||
|
|
||||||
|
/// The version of the compiler that was used to compile the artifacts.
|
||||||
|
compiler_version: Version,
|
||||||
|
|
||||||
|
/// The path of the metadata file that the compilation artifacts are for.
|
||||||
|
metadata_file_path: &'a Path,
|
||||||
|
|
||||||
|
/// The mode that the compilation artifacts where compiled with.
|
||||||
|
solc_mode: Cow<'a, Mode>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
struct CacheValue {
|
||||||
|
/// The compiler output from the compilation run.
|
||||||
|
compiler_output: CompilerOutput,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CacheValue {
|
||||||
|
pub fn new(compiler_output: CompilerOutput) -> Self {
|
||||||
|
Self { compiler_output }
|
||||||
|
}
|
||||||
|
}
|
||||||
+362
-188
@@ -4,31 +4,37 @@ use std::collections::HashMap;
|
|||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use alloy::consensus::EMPTY_ROOT_HASH;
|
||||||
|
use alloy::hex;
|
||||||
use alloy::json_abi::JsonAbi;
|
use alloy::json_abi::JsonAbi;
|
||||||
use alloy::network::{Ethereum, TransactionBuilder};
|
use alloy::network::{Ethereum, TransactionBuilder};
|
||||||
use alloy::primitives::U256;
|
use alloy::primitives::U256;
|
||||||
use alloy::rpc::types::TransactionReceipt;
|
use alloy::rpc::types::TransactionReceipt;
|
||||||
use alloy::rpc::types::trace::geth::{
|
use alloy::rpc::types::trace::geth::{
|
||||||
CallFrame, GethDebugBuiltInTracerType, GethDebugTracerType, GethDebugTracingOptions, GethTrace,
|
CallFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType,
|
||||||
PreStateConfig,
|
GethDebugTracingOptions, GethTrace, PreStateConfig,
|
||||||
};
|
};
|
||||||
use alloy::{
|
use alloy::{
|
||||||
primitives::Address,
|
primitives::Address,
|
||||||
rpc::types::{
|
rpc::types::{TransactionRequest, trace::geth::DiffMode},
|
||||||
TransactionRequest,
|
|
||||||
trace::geth::{AccountState, DiffMode},
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
use anyhow::Context;
|
use anyhow::Context as _;
|
||||||
|
use futures::TryStreamExt;
|
||||||
use indexmap::IndexMap;
|
use indexmap::IndexMap;
|
||||||
|
use revive_dt_format::traits::{ResolutionContext, ResolverApi};
|
||||||
|
use revive_dt_report::ExecutionSpecificReporter;
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
|
|
||||||
use revive_dt_format::case::{Case, CaseIdx};
|
use revive_dt_format::case::Case;
|
||||||
use revive_dt_format::input::{Calldata, EtherValue, Expected, ExpectedOutput, Method};
|
use revive_dt_format::input::{
|
||||||
use revive_dt_format::metadata::{ContractInstance, ContractPathAndIdent};
|
BalanceAssertion, Calldata, EtherValue, Expected, ExpectedOutput, Input, Method, StepIdx,
|
||||||
use revive_dt_format::{input::Input, metadata::Metadata};
|
StorageEmptyAssertion,
|
||||||
use revive_dt_node::Node;
|
};
|
||||||
|
use revive_dt_format::metadata::{ContractIdent, ContractInstance, ContractPathAndIdent};
|
||||||
|
use revive_dt_format::{input::Step, metadata::Metadata};
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
|
use tokio::try_join;
|
||||||
|
use tracing::{Instrument, info, info_span, instrument};
|
||||||
|
|
||||||
use crate::Platform;
|
use crate::Platform;
|
||||||
|
|
||||||
@@ -37,7 +43,7 @@ pub struct CaseState<T: Platform> {
|
|||||||
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||||
|
|
||||||
/// This map stores the contracts deployments for this case.
|
/// This map stores the contracts deployments for this case.
|
||||||
deployed_contracts: HashMap<ContractInstance, (Address, JsonAbi)>,
|
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||||
|
|
||||||
/// This map stores the variables used for each one of the cases contained in the metadata
|
/// This map stores the variables used for each one of the cases contained in the metadata
|
||||||
/// file.
|
/// file.
|
||||||
@@ -46,6 +52,9 @@ pub struct CaseState<T: Platform> {
|
|||||||
/// Stores the version used for the current case.
|
/// Stores the version used for the current case.
|
||||||
compiler_version: Version,
|
compiler_version: Version,
|
||||||
|
|
||||||
|
/// The execution reporter.
|
||||||
|
execution_reporter: ExecutionSpecificReporter,
|
||||||
|
|
||||||
phantom: PhantomData<T>,
|
phantom: PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -56,55 +65,118 @@ where
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
compiler_version: Version,
|
compiler_version: Version,
|
||||||
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||||
deployed_contracts: HashMap<ContractInstance, (Address, JsonAbi)>,
|
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||||
|
execution_reporter: ExecutionSpecificReporter,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
compiled_contracts,
|
compiled_contracts,
|
||||||
deployed_contracts,
|
deployed_contracts,
|
||||||
variables: Default::default(),
|
variables: Default::default(),
|
||||||
compiler_version,
|
compiler_version,
|
||||||
|
execution_reporter,
|
||||||
phantom: PhantomData,
|
phantom: PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn handle_step(
|
||||||
|
&mut self,
|
||||||
|
metadata: &Metadata,
|
||||||
|
step: &Step,
|
||||||
|
node: &T::Blockchain,
|
||||||
|
) -> anyhow::Result<StepOutput> {
|
||||||
|
match step {
|
||||||
|
Step::FunctionCall(input) => {
|
||||||
|
let (receipt, geth_trace, diff_mode) = self
|
||||||
|
.handle_input(metadata, input, node)
|
||||||
|
.await
|
||||||
|
.context("Failed to handle function call step")?;
|
||||||
|
Ok(StepOutput::FunctionCall(receipt, geth_trace, diff_mode))
|
||||||
|
}
|
||||||
|
Step::BalanceAssertion(balance_assertion) => {
|
||||||
|
self.handle_balance_assertion(metadata, balance_assertion, node)
|
||||||
|
.await
|
||||||
|
.context("Failed to handle balance assertion step")?;
|
||||||
|
Ok(StepOutput::BalanceAssertion)
|
||||||
|
}
|
||||||
|
Step::StorageEmptyAssertion(storage_empty) => {
|
||||||
|
self.handle_storage_empty(metadata, storage_empty, node)
|
||||||
|
.await
|
||||||
|
.context("Failed to handle storage empty assertion step")?;
|
||||||
|
Ok(StepOutput::StorageEmptyAssertion)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.inspect(|_| info!("Step Succeeded"))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", name = "Handling Input", skip_all)]
|
||||||
pub async fn handle_input(
|
pub async fn handle_input(
|
||||||
&mut self,
|
&mut self,
|
||||||
metadata: &Metadata,
|
metadata: &Metadata,
|
||||||
case_idx: CaseIdx,
|
|
||||||
input: &Input,
|
input: &Input,
|
||||||
node: &T::Blockchain,
|
node: &T::Blockchain,
|
||||||
) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> {
|
) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> {
|
||||||
let deployment_receipts = self
|
let deployment_receipts = self
|
||||||
.handle_contract_deployment(metadata, case_idx, input, node)
|
.handle_input_contract_deployment(metadata, input, node)
|
||||||
.await?;
|
.await
|
||||||
|
.context("Failed during contract deployment phase of input handling")?;
|
||||||
let execution_receipt = self
|
let execution_receipt = self
|
||||||
.handle_input_execution(input, deployment_receipts, node)
|
.handle_input_execution(input, deployment_receipts, node)
|
||||||
.await?;
|
.await
|
||||||
|
.context("Failed during transaction execution phase of input handling")?;
|
||||||
let tracing_result = self
|
let tracing_result = self
|
||||||
.handle_input_call_frame_tracing(&execution_receipt, node)
|
.handle_input_call_frame_tracing(&execution_receipt, node)
|
||||||
.await?;
|
|
||||||
self.handle_input_variable_assignment(input, &tracing_result)?;
|
|
||||||
self.handle_input_expectations(input, &execution_receipt, node, &tracing_result)
|
|
||||||
.await?;
|
|
||||||
self.handle_input_diff(case_idx, execution_receipt, node)
|
|
||||||
.await
|
.await
|
||||||
|
.context("Failed during callframe tracing phase of input handling")?;
|
||||||
|
self.handle_input_variable_assignment(input, &tracing_result)
|
||||||
|
.context("Failed to assign variables from callframe output")?;
|
||||||
|
let (_, (geth_trace, diff_mode)) = try_join!(
|
||||||
|
self.handle_input_expectations(input, &execution_receipt, node, &tracing_result),
|
||||||
|
self.handle_input_diff(&execution_receipt, node)
|
||||||
|
)
|
||||||
|
.context("Failed while evaluating expectations and diffs in parallel")?;
|
||||||
|
Ok((execution_receipt, geth_trace, diff_mode))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", name = "Handling Balance Assertion", skip_all)]
|
||||||
|
pub async fn handle_balance_assertion(
|
||||||
|
&mut self,
|
||||||
|
metadata: &Metadata,
|
||||||
|
balance_assertion: &BalanceAssertion,
|
||||||
|
node: &T::Blockchain,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
self.handle_balance_assertion_contract_deployment(metadata, balance_assertion, node)
|
||||||
|
.await
|
||||||
|
.context("Failed to deploy contract for balance assertion")?;
|
||||||
|
self.handle_balance_assertion_execution(balance_assertion, node)
|
||||||
|
.await
|
||||||
|
.context("Failed to execute balance assertion")?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", name = "Handling Storage Assertion", skip_all)]
|
||||||
|
pub async fn handle_storage_empty(
|
||||||
|
&mut self,
|
||||||
|
metadata: &Metadata,
|
||||||
|
storage_empty: &StorageEmptyAssertion,
|
||||||
|
node: &T::Blockchain,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
self.handle_storage_empty_assertion_contract_deployment(metadata, storage_empty, node)
|
||||||
|
.await
|
||||||
|
.context("Failed to deploy contract for storage empty assertion")?;
|
||||||
|
self.handle_storage_empty_assertion_execution(storage_empty, node)
|
||||||
|
.await
|
||||||
|
.context("Failed to execute storage empty assertion")?;
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handles the contract deployment for a given input performing it if it needs to be performed.
|
/// Handles the contract deployment for a given input performing it if it needs to be performed.
|
||||||
async fn handle_contract_deployment(
|
#[instrument(level = "info", skip_all)]
|
||||||
|
async fn handle_input_contract_deployment(
|
||||||
&mut self,
|
&mut self,
|
||||||
metadata: &Metadata,
|
metadata: &Metadata,
|
||||||
case_idx: CaseIdx,
|
|
||||||
input: &Input,
|
input: &Input,
|
||||||
node: &T::Blockchain,
|
node: &T::Blockchain,
|
||||||
) -> anyhow::Result<HashMap<ContractInstance, TransactionReceipt>> {
|
) -> anyhow::Result<HashMap<ContractInstance, TransactionReceipt>> {
|
||||||
let span = tracing::debug_span!(
|
|
||||||
"Handling contract deployment",
|
|
||||||
?case_idx,
|
|
||||||
instance = ?input.instance
|
|
||||||
);
|
|
||||||
let _guard = span.enter();
|
|
||||||
|
|
||||||
let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new();
|
let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new();
|
||||||
for instance in input.find_all_contract_instances().into_iter() {
|
for instance in input.find_all_contract_instances().into_iter() {
|
||||||
if !self.deployed_contracts.contains_key(&instance) {
|
if !self.deployed_contracts.contains_key(&instance) {
|
||||||
@@ -116,11 +188,6 @@ where
|
|||||||
instances_we_must_deploy.insert(input.instance.clone(), true);
|
instances_we_must_deploy.insert(input.instance.clone(), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
tracing::debug!(
|
|
||||||
instances_to_deploy = instances_we_must_deploy.len(),
|
|
||||||
"Computed the number of required deployments for input"
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut receipts = HashMap::new();
|
let mut receipts = HashMap::new();
|
||||||
for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() {
|
for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() {
|
||||||
let calldata = deploy_with_constructor_arguments.then_some(&input.calldata);
|
let calldata = deploy_with_constructor_arguments.then_some(&input.calldata);
|
||||||
@@ -137,7 +204,8 @@ where
|
|||||||
value,
|
value,
|
||||||
node,
|
node,
|
||||||
)
|
)
|
||||||
.await?
|
.await
|
||||||
|
.context("Failed to get or deploy contract instance during input execution")?
|
||||||
{
|
{
|
||||||
receipts.insert(instance.clone(), receipt);
|
receipts.insert(instance.clone(), receipt);
|
||||||
}
|
}
|
||||||
@@ -147,6 +215,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Handles the execution of the input in terms of the calls that need to be made.
|
/// Handles the execution of the input in terms of the calls that need to be made.
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
async fn handle_input_execution(
|
async fn handle_input_execution(
|
||||||
&mut self,
|
&mut self,
|
||||||
input: &Input,
|
input: &Input,
|
||||||
@@ -158,39 +227,27 @@ where
|
|||||||
// lookup the transaction receipt in this case and continue on.
|
// lookup the transaction receipt in this case and continue on.
|
||||||
Method::Deployer => deployment_receipts
|
Method::Deployer => deployment_receipts
|
||||||
.remove(&input.instance)
|
.remove(&input.instance)
|
||||||
.context("Failed to find deployment receipt"),
|
.context("Failed to find deployment receipt for constructor call"),
|
||||||
Method::Fallback | Method::FunctionName(_) => {
|
Method::Fallback | Method::FunctionName(_) => {
|
||||||
let tx = match input
|
let tx = match input
|
||||||
.legacy_transaction(&self.deployed_contracts, &self.variables, node)
|
.legacy_transaction(node, self.default_resolution_context())
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(tx) => {
|
Ok(tx) => tx,
|
||||||
tracing::debug!("Legacy transaction data: {tx:#?}");
|
|
||||||
tx
|
|
||||||
}
|
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
tracing::error!("Failed to construct legacy transaction: {err:?}");
|
|
||||||
return Err(err);
|
return Err(err);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
tracing::trace!("Executing transaction for input: {input:?}");
|
|
||||||
|
|
||||||
match node.execute_transaction(tx).await {
|
match node.execute_transaction(tx).await {
|
||||||
Ok(receipt) => Ok(receipt),
|
Ok(receipt) => Ok(receipt),
|
||||||
Err(err) => {
|
Err(err) => Err(err),
|
||||||
tracing::error!(
|
|
||||||
"Failed to execute transaction when executing the contract: {}, {:?}",
|
|
||||||
&*input.instance,
|
|
||||||
err
|
|
||||||
);
|
|
||||||
Err(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
async fn handle_input_call_frame_tracing(
|
async fn handle_input_call_frame_tracing(
|
||||||
&self,
|
&self,
|
||||||
execution_receipt: &TransactionReceipt,
|
execution_receipt: &TransactionReceipt,
|
||||||
@@ -202,6 +259,14 @@ where
|
|||||||
tracer: Some(GethDebugTracerType::BuiltInTracer(
|
tracer: Some(GethDebugTracerType::BuiltInTracer(
|
||||||
GethDebugBuiltInTracerType::CallTracer,
|
GethDebugBuiltInTracerType::CallTracer,
|
||||||
)),
|
)),
|
||||||
|
tracer_config: GethDebugTracerConfig(serde_json::json! {{
|
||||||
|
"onlyTopCall": true,
|
||||||
|
"withLog": false,
|
||||||
|
"withStorage": false,
|
||||||
|
"withMemory": false,
|
||||||
|
"withStack": false,
|
||||||
|
"withReturnData": true
|
||||||
|
}}),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@@ -213,6 +278,7 @@ where
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
fn handle_input_variable_assignment(
|
fn handle_input_variable_assignment(
|
||||||
&mut self,
|
&mut self,
|
||||||
input: &Input,
|
input: &Input,
|
||||||
@@ -233,21 +299,24 @@ where
|
|||||||
) {
|
) {
|
||||||
let value = U256::from_be_slice(output_word);
|
let value = U256::from_be_slice(output_word);
|
||||||
self.variables.insert(variable_name.clone(), value);
|
self.variables.insert(variable_name.clone(), value);
|
||||||
|
tracing::info!(
|
||||||
|
variable_name,
|
||||||
|
variable_value = hex::encode(value.to_be_bytes::<32>()),
|
||||||
|
"Assigned variable"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
async fn handle_input_expectations(
|
async fn handle_input_expectations(
|
||||||
&mut self,
|
&self,
|
||||||
input: &Input,
|
input: &Input,
|
||||||
execution_receipt: &TransactionReceipt,
|
execution_receipt: &TransactionReceipt,
|
||||||
node: &T::Blockchain,
|
resolver: &impl ResolverApi,
|
||||||
tracing_result: &CallFrame,
|
tracing_result: &CallFrame,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let span = tracing::info_span!("Handling input expectations");
|
|
||||||
let _guard = span.enter();
|
|
||||||
|
|
||||||
// Resolving the `input.expected` into a series of expectations that we can then assert on.
|
// Resolving the `input.expected` into a series of expectations that we can then assert on.
|
||||||
let mut expectations = match input {
|
let mut expectations = match input {
|
||||||
Input {
|
Input {
|
||||||
@@ -276,24 +345,25 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for expectation in expectations.iter() {
|
futures::stream::iter(expectations.into_iter().map(Ok))
|
||||||
|
.try_for_each_concurrent(None, |expectation| async move {
|
||||||
self.handle_input_expectation_item(
|
self.handle_input_expectation_item(
|
||||||
execution_receipt,
|
execution_receipt,
|
||||||
node,
|
resolver,
|
||||||
expectation,
|
expectation,
|
||||||
tracing_result,
|
tracing_result,
|
||||||
)
|
)
|
||||||
.await?;
|
.await
|
||||||
}
|
})
|
||||||
|
.await
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
async fn handle_input_expectation_item(
|
async fn handle_input_expectation_item(
|
||||||
&mut self,
|
&self,
|
||||||
execution_receipt: &TransactionReceipt,
|
execution_receipt: &TransactionReceipt,
|
||||||
node: &T::Blockchain,
|
resolver: &impl ResolverApi,
|
||||||
expectation: &ExpectedOutput,
|
expectation: ExpectedOutput,
|
||||||
tracing_result: &CallFrame,
|
tracing_result: &CallFrame,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
if let Some(ref version_requirement) = expectation.compiler_version {
|
if let Some(ref version_requirement) = expectation.compiler_version {
|
||||||
@@ -302,9 +372,10 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let deployed_contracts = &mut self.deployed_contracts;
|
let resolution_context = self
|
||||||
let variables = &mut self.variables;
|
.default_resolution_context()
|
||||||
let chain_state_provider = node;
|
.with_block_number(execution_receipt.block_number.as_ref())
|
||||||
|
.with_transaction_hash(&execution_receipt.transaction_hash);
|
||||||
|
|
||||||
// Handling the receipt state assertion.
|
// Handling the receipt state assertion.
|
||||||
let expected = !expectation.exception;
|
let expected = !expectation.exception;
|
||||||
@@ -327,13 +398,9 @@ where
|
|||||||
let expected = expected_calldata;
|
let expected = expected_calldata;
|
||||||
let actual = &tracing_result.output.as_ref().unwrap_or_default();
|
let actual = &tracing_result.output.as_ref().unwrap_or_default();
|
||||||
if !expected
|
if !expected
|
||||||
.is_equivalent(
|
.is_equivalent(actual, resolver, resolution_context)
|
||||||
actual,
|
.await
|
||||||
deployed_contracts,
|
.context("Failed to resolve calldata equivalence for return data assertion")?
|
||||||
&*variables,
|
|
||||||
chain_state_provider,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
{
|
{
|
||||||
tracing::error!(
|
tracing::error!(
|
||||||
?execution_receipt,
|
?execution_receipt,
|
||||||
@@ -358,14 +425,16 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Handling the events assertion.
|
// Handling the events assertion.
|
||||||
for (expected_event, actual_event) in
|
for (event_idx, (expected_event, actual_event)) in expected_events
|
||||||
expected_events.iter().zip(execution_receipt.logs())
|
.iter()
|
||||||
|
.zip(execution_receipt.logs())
|
||||||
|
.enumerate()
|
||||||
{
|
{
|
||||||
// Handling the emitter assertion.
|
// Handling the emitter assertion.
|
||||||
if let Some(ref expected_address) = expected_event.address {
|
if let Some(ref expected_address) = expected_event.address {
|
||||||
let expected = Address::from_slice(
|
let expected = Address::from_slice(
|
||||||
Calldata::new_compound([expected_address])
|
Calldata::new_compound([expected_address])
|
||||||
.calldata(deployed_contracts, &*variables, node)
|
.calldata(resolver, resolution_context)
|
||||||
.await?
|
.await?
|
||||||
.get(12..32)
|
.get(12..32)
|
||||||
.expect("Can't fail"),
|
.expect("Can't fail"),
|
||||||
@@ -373,6 +442,7 @@ where
|
|||||||
let actual = actual_event.address();
|
let actual = actual_event.address();
|
||||||
if actual != expected {
|
if actual != expected {
|
||||||
tracing::error!(
|
tracing::error!(
|
||||||
|
event_idx,
|
||||||
%expected,
|
%expected,
|
||||||
%actual,
|
%actual,
|
||||||
"Event emitter assertion failed",
|
"Event emitter assertion failed",
|
||||||
@@ -392,15 +462,12 @@ where
|
|||||||
{
|
{
|
||||||
let expected = Calldata::new_compound([expected]);
|
let expected = Calldata::new_compound([expected]);
|
||||||
if !expected
|
if !expected
|
||||||
.is_equivalent(
|
.is_equivalent(&actual.0, resolver, resolution_context)
|
||||||
&actual.0,
|
.await
|
||||||
deployed_contracts,
|
.context("Failed to resolve event topic equivalence")?
|
||||||
&*variables,
|
|
||||||
chain_state_provider,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
{
|
{
|
||||||
tracing::error!(
|
tracing::error!(
|
||||||
|
event_idx,
|
||||||
?execution_receipt,
|
?execution_receipt,
|
||||||
?expected,
|
?expected,
|
||||||
?actual,
|
?actual,
|
||||||
@@ -416,15 +483,12 @@ where
|
|||||||
let expected = &expected_event.values;
|
let expected = &expected_event.values;
|
||||||
let actual = &actual_event.data().data;
|
let actual = &actual_event.data().data;
|
||||||
if !expected
|
if !expected
|
||||||
.is_equivalent(
|
.is_equivalent(&actual.0, resolver, resolution_context)
|
||||||
&actual.0,
|
.await
|
||||||
deployed_contracts,
|
.context("Failed to resolve event value equivalence")?
|
||||||
&*variables,
|
|
||||||
chain_state_provider,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
{
|
{
|
||||||
tracing::error!(
|
tracing::error!(
|
||||||
|
event_idx,
|
||||||
?execution_receipt,
|
?execution_receipt,
|
||||||
?expected,
|
?expected,
|
||||||
?actual,
|
?actual,
|
||||||
@@ -440,15 +504,12 @@ where
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
async fn handle_input_diff(
|
async fn handle_input_diff(
|
||||||
&mut self,
|
&self,
|
||||||
_: CaseIdx,
|
execution_receipt: &TransactionReceipt,
|
||||||
execution_receipt: TransactionReceipt,
|
|
||||||
node: &T::Blockchain,
|
node: &T::Blockchain,
|
||||||
) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> {
|
) -> anyhow::Result<(GethTrace, DiffMode)> {
|
||||||
let span = tracing::info_span!("Handling input diff");
|
|
||||||
let _guard = span.enter();
|
|
||||||
|
|
||||||
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
|
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
|
||||||
diff_mode: Some(true),
|
diff_mode: Some(true),
|
||||||
disable_code: None,
|
disable_code: None,
|
||||||
@@ -456,11 +517,141 @@ where
|
|||||||
});
|
});
|
||||||
|
|
||||||
let trace = node
|
let trace = node
|
||||||
.trace_transaction(&execution_receipt, trace_options)
|
.trace_transaction(execution_receipt, trace_options)
|
||||||
.await?;
|
.await
|
||||||
let diff = node.state_diff(&execution_receipt).await?;
|
.context("Failed to obtain geth prestate tracer output")?;
|
||||||
|
let diff = node
|
||||||
|
.state_diff(execution_receipt)
|
||||||
|
.await
|
||||||
|
.context("Failed to obtain state diff for transaction")?;
|
||||||
|
|
||||||
Ok((execution_receipt, trace, diff))
|
Ok((trace, diff))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
|
pub async fn handle_balance_assertion_contract_deployment(
|
||||||
|
&mut self,
|
||||||
|
metadata: &Metadata,
|
||||||
|
balance_assertion: &BalanceAssertion,
|
||||||
|
node: &T::Blockchain,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let Some(instance) = balance_assertion
|
||||||
|
.address
|
||||||
|
.strip_suffix(".address")
|
||||||
|
.map(ContractInstance::new)
|
||||||
|
else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
self.get_or_deploy_contract_instance(
|
||||||
|
&instance,
|
||||||
|
metadata,
|
||||||
|
Input::default_caller(),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
node,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
|
pub async fn handle_balance_assertion_execution(
|
||||||
|
&mut self,
|
||||||
|
BalanceAssertion {
|
||||||
|
address: address_string,
|
||||||
|
expected_balance: amount,
|
||||||
|
..
|
||||||
|
}: &BalanceAssertion,
|
||||||
|
node: &T::Blockchain,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let address = Address::from_slice(
|
||||||
|
Calldata::new_compound([address_string])
|
||||||
|
.calldata(node, self.default_resolution_context())
|
||||||
|
.await?
|
||||||
|
.get(12..32)
|
||||||
|
.expect("Can't fail"),
|
||||||
|
);
|
||||||
|
|
||||||
|
let balance = node.balance_of(address).await?;
|
||||||
|
|
||||||
|
let expected = *amount;
|
||||||
|
let actual = balance;
|
||||||
|
if expected != actual {
|
||||||
|
tracing::error!(%expected, %actual, %address, "Balance assertion failed");
|
||||||
|
anyhow::bail!(
|
||||||
|
"Balance assertion failed - Expected {} but got {} for {} resolved to {}",
|
||||||
|
expected,
|
||||||
|
actual,
|
||||||
|
address_string,
|
||||||
|
address,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
|
pub async fn handle_storage_empty_assertion_contract_deployment(
|
||||||
|
&mut self,
|
||||||
|
metadata: &Metadata,
|
||||||
|
storage_empty_assertion: &StorageEmptyAssertion,
|
||||||
|
node: &T::Blockchain,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let Some(instance) = storage_empty_assertion
|
||||||
|
.address
|
||||||
|
.strip_suffix(".address")
|
||||||
|
.map(ContractInstance::new)
|
||||||
|
else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
self.get_or_deploy_contract_instance(
|
||||||
|
&instance,
|
||||||
|
metadata,
|
||||||
|
Input::default_caller(),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
node,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
|
pub async fn handle_storage_empty_assertion_execution(
|
||||||
|
&mut self,
|
||||||
|
StorageEmptyAssertion {
|
||||||
|
address: address_string,
|
||||||
|
is_storage_empty,
|
||||||
|
..
|
||||||
|
}: &StorageEmptyAssertion,
|
||||||
|
node: &T::Blockchain,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let address = Address::from_slice(
|
||||||
|
Calldata::new_compound([address_string])
|
||||||
|
.calldata(node, self.default_resolution_context())
|
||||||
|
.await?
|
||||||
|
.get(12..32)
|
||||||
|
.expect("Can't fail"),
|
||||||
|
);
|
||||||
|
|
||||||
|
let storage = node.latest_state_proof(address, Default::default()).await?;
|
||||||
|
let is_empty = storage.storage_hash == EMPTY_ROOT_HASH;
|
||||||
|
|
||||||
|
let expected = is_storage_empty;
|
||||||
|
let actual = is_empty;
|
||||||
|
|
||||||
|
if *expected != actual {
|
||||||
|
tracing::error!(%expected, %actual, %address, "Storage Empty Assertion failed");
|
||||||
|
anyhow::bail!(
|
||||||
|
"Storage Empty Assertion failed - Expected {} but got {} for {} resolved to {}",
|
||||||
|
expected,
|
||||||
|
actual,
|
||||||
|
address_string,
|
||||||
|
address,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the information of a deployed contract or library from the state. If it's found to not
|
/// Gets the information of a deployed contract or library from the state. If it's found to not
|
||||||
@@ -478,7 +669,7 @@ where
|
|||||||
value: Option<EtherValue>,
|
value: Option<EtherValue>,
|
||||||
node: &T::Blockchain,
|
node: &T::Blockchain,
|
||||||
) -> anyhow::Result<(Address, JsonAbi, Option<TransactionReceipt>)> {
|
) -> anyhow::Result<(Address, JsonAbi, Option<TransactionReceipt>)> {
|
||||||
if let Some((address, abi)) = self.deployed_contracts.get(contract_instance) {
|
if let Some((_, address, abi)) = self.deployed_contracts.get(contract_instance) {
|
||||||
return Ok((*address, abi.clone(), None));
|
return Ok((*address, abi.clone(), None));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -487,7 +678,6 @@ where
|
|||||||
contract_ident,
|
contract_ident,
|
||||||
}) = metadata.contract_sources()?.remove(contract_instance)
|
}) = metadata.contract_sources()?.remove(contract_instance)
|
||||||
else {
|
else {
|
||||||
tracing::error!("Contract source not found for instance");
|
|
||||||
anyhow::bail!(
|
anyhow::bail!(
|
||||||
"Contract source not found for instance {:?}",
|
"Contract source not found for instance {:?}",
|
||||||
contract_instance
|
contract_instance
|
||||||
@@ -500,11 +690,6 @@ where
|
|||||||
.and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref()))
|
.and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref()))
|
||||||
.cloned()
|
.cloned()
|
||||||
else {
|
else {
|
||||||
tracing::error!(
|
|
||||||
contract_source_path = contract_source_path.display().to_string(),
|
|
||||||
contract_ident = contract_ident.as_ref(),
|
|
||||||
"Failed to find information for contract"
|
|
||||||
);
|
|
||||||
anyhow::bail!(
|
anyhow::bail!(
|
||||||
"Failed to find information for contract {:?}",
|
"Failed to find information for contract {:?}",
|
||||||
contract_instance
|
contract_instance
|
||||||
@@ -526,7 +711,7 @@ where
|
|||||||
|
|
||||||
if let Some(calldata) = calldata {
|
if let Some(calldata) = calldata {
|
||||||
let calldata = calldata
|
let calldata = calldata
|
||||||
.calldata(&self.deployed_contracts, None, node)
|
.calldata(node, self.default_resolution_context())
|
||||||
.await?;
|
.await?;
|
||||||
code.extend(calldata);
|
code.extend(calldata);
|
||||||
}
|
}
|
||||||
@@ -553,7 +738,6 @@ where
|
|||||||
};
|
};
|
||||||
|
|
||||||
let Some(address) = receipt.contract_address else {
|
let Some(address) = receipt.contract_address else {
|
||||||
tracing::error!("Contract deployment transaction didn't return an address");
|
|
||||||
anyhow::bail!("Contract deployment didn't return an address");
|
anyhow::bail!("Contract deployment didn't return an address");
|
||||||
};
|
};
|
||||||
tracing::info!(
|
tracing::info!(
|
||||||
@@ -561,18 +745,27 @@ where
|
|||||||
instance_address = ?address,
|
instance_address = ?address,
|
||||||
"Deployed contract"
|
"Deployed contract"
|
||||||
);
|
);
|
||||||
|
self.execution_reporter
|
||||||
|
.report_contract_deployed_event(contract_instance.clone(), address)?;
|
||||||
|
|
||||||
self.deployed_contracts
|
self.deployed_contracts.insert(
|
||||||
.insert(contract_instance.clone(), (address, abi.clone()));
|
contract_instance.clone(),
|
||||||
|
(contract_ident, address, abi.clone()),
|
||||||
|
);
|
||||||
|
|
||||||
Ok((address, abi, Some(receipt)))
|
Ok((address, abi, Some(receipt)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn default_resolution_context(&self) -> ResolutionContext<'_> {
|
||||||
|
ResolutionContext::default()
|
||||||
|
.with_deployed_contracts(&self.deployed_contracts)
|
||||||
|
.with_variables(&self.variables)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct CaseDriver<'a, Leader: Platform, Follower: Platform> {
|
pub struct CaseDriver<'a, Leader: Platform, Follower: Platform> {
|
||||||
metadata: &'a Metadata,
|
metadata: &'a Metadata,
|
||||||
case: &'a Case,
|
case: &'a Case,
|
||||||
case_idx: CaseIdx,
|
|
||||||
leader_node: &'a Leader::Blockchain,
|
leader_node: &'a Leader::Blockchain,
|
||||||
follower_node: &'a Follower::Blockchain,
|
follower_node: &'a Follower::Blockchain,
|
||||||
leader_state: CaseState<Leader>,
|
leader_state: CaseState<Leader>,
|
||||||
@@ -588,7 +781,6 @@ where
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
metadata: &'a Metadata,
|
metadata: &'a Metadata,
|
||||||
case: &'a Case,
|
case: &'a Case,
|
||||||
case_idx: impl Into<CaseIdx>,
|
|
||||||
leader_node: &'a L::Blockchain,
|
leader_node: &'a L::Blockchain,
|
||||||
follower_node: &'a F::Blockchain,
|
follower_node: &'a F::Blockchain,
|
||||||
leader_state: CaseState<L>,
|
leader_state: CaseState<L>,
|
||||||
@@ -597,7 +789,6 @@ where
|
|||||||
Self {
|
Self {
|
||||||
metadata,
|
metadata,
|
||||||
case,
|
case,
|
||||||
case_idx: case_idx.into(),
|
|
||||||
leader_node,
|
leader_node,
|
||||||
follower_node,
|
follower_node,
|
||||||
leader_state,
|
leader_state,
|
||||||
@@ -605,78 +796,61 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn trace_diff_mode(label: &str, diff: &DiffMode) {
|
#[instrument(level = "info", name = "Executing Case", skip_all)]
|
||||||
tracing::trace!("{label} - PRE STATE:");
|
|
||||||
for (addr, state) in &diff.pre {
|
|
||||||
Self::trace_account_state(" [pre]", addr, state);
|
|
||||||
}
|
|
||||||
|
|
||||||
tracing::trace!("{label} - POST STATE:");
|
|
||||||
for (addr, state) in &diff.post {
|
|
||||||
Self::trace_account_state(" [post]", addr, state);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn trace_account_state(prefix: &str, addr: &Address, state: &AccountState) {
|
|
||||||
tracing::trace!("{prefix} 0x{addr:x}");
|
|
||||||
|
|
||||||
if let Some(balance) = &state.balance {
|
|
||||||
tracing::trace!("{prefix} balance: {balance}");
|
|
||||||
}
|
|
||||||
if let Some(nonce) = &state.nonce {
|
|
||||||
tracing::trace!("{prefix} nonce: {nonce}");
|
|
||||||
}
|
|
||||||
if let Some(code) = &state.code {
|
|
||||||
tracing::trace!("{prefix} code: {code}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn execute(&mut self) -> anyhow::Result<usize> {
|
pub async fn execute(&mut self) -> anyhow::Result<usize> {
|
||||||
if !self
|
let mut steps_executed = 0;
|
||||||
.leader_node
|
for (step_idx, step) in self
|
||||||
.matches_target(self.metadata.targets.as_deref())
|
.case
|
||||||
|| !self
|
.steps_iterator()
|
||||||
.follower_node
|
.enumerate()
|
||||||
.matches_target(self.metadata.targets.as_deref())
|
.map(|(idx, v)| (StepIdx::new(idx), v))
|
||||||
{
|
{
|
||||||
tracing::warn!(
|
let (leader_step_output, follower_step_output) = try_join!(
|
||||||
targets = ?self.metadata.targets,
|
self.leader_state
|
||||||
"Either the leader or follower node do not support the targets of the file"
|
.handle_step(self.metadata, &step, self.leader_node)
|
||||||
);
|
.instrument(info_span!(
|
||||||
return Ok(0);
|
"Handling Step",
|
||||||
|
%step_idx,
|
||||||
|
target = "Leader",
|
||||||
|
)),
|
||||||
|
self.follower_state
|
||||||
|
.handle_step(self.metadata, &step, self.follower_node)
|
||||||
|
.instrument(info_span!(
|
||||||
|
"Handling Step",
|
||||||
|
%step_idx,
|
||||||
|
target = "Follower",
|
||||||
|
))
|
||||||
|
)?;
|
||||||
|
|
||||||
|
match (leader_step_output, follower_step_output) {
|
||||||
|
(StepOutput::FunctionCall(..), StepOutput::FunctionCall(..)) => {
|
||||||
|
// TODO: We need to actually work out how/if we will compare the diff between
|
||||||
|
// the leader and the follower. The diffs are almost guaranteed to be different
|
||||||
|
// from leader and follower and therefore without an actual strategy for this
|
||||||
|
// we have something that's guaranteed to fail. Even a simple call to some
|
||||||
|
// contract will produce two non-equal diffs because on the leader the contract
|
||||||
|
// has address X and on the follower it has address Y. On the leader contract X
|
||||||
|
// contains address A in the state and on the follower it contains address B. So
|
||||||
|
// this isn't exactly a straightforward thing to do and I'm not even sure that
|
||||||
|
// it's possible to do. Once we have an actual strategy for doing the diffs we
|
||||||
|
// will implement it here. Until then, this remains empty.
|
||||||
|
}
|
||||||
|
(StepOutput::BalanceAssertion, StepOutput::BalanceAssertion) => {}
|
||||||
|
(StepOutput::StorageEmptyAssertion, StepOutput::StorageEmptyAssertion) => {}
|
||||||
|
_ => unreachable!("The two step outputs can not be of a different kind"),
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut inputs_executed = 0;
|
steps_executed += 1;
|
||||||
for (input_idx, input) in self.case.inputs_iterator().enumerate() {
|
|
||||||
let tracing_span = tracing::info_span!("Handling input", input_idx);
|
|
||||||
let _guard = tracing_span.enter();
|
|
||||||
|
|
||||||
let (leader_receipt, _, leader_diff) = self
|
|
||||||
.leader_state
|
|
||||||
.handle_input(self.metadata, self.case_idx, &input, self.leader_node)
|
|
||||||
.await?;
|
|
||||||
let (follower_receipt, _, follower_diff) = self
|
|
||||||
.follower_state
|
|
||||||
.handle_input(self.metadata, self.case_idx, &input, self.follower_node)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if leader_diff == follower_diff {
|
|
||||||
tracing::debug!("State diffs match between leader and follower.");
|
|
||||||
} else {
|
|
||||||
tracing::debug!("State diffs mismatch between leader and follower.");
|
|
||||||
Self::trace_diff_mode("Leader", &leader_diff);
|
|
||||||
Self::trace_diff_mode("Follower", &follower_diff);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if leader_receipt.logs() != follower_receipt.logs() {
|
Ok(steps_executed)
|
||||||
tracing::debug!("Log/event mismatch between leader and follower.");
|
|
||||||
tracing::trace!("Leader logs: {:?}", leader_receipt.logs());
|
|
||||||
tracing::trace!("Follower logs: {:?}", follower_receipt.logs());
|
|
||||||
}
|
|
||||||
|
|
||||||
inputs_executed += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(inputs_executed)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
#[allow(clippy::large_enum_variant)]
|
||||||
|
pub enum StepOutput {
|
||||||
|
FunctionCall(TransactionReceipt, GethTrace, DiffMode),
|
||||||
|
BalanceAssertion,
|
||||||
|
StorageEmptyAssertion,
|
||||||
|
}
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ pub trait Platform {
|
|||||||
type Compiler: SolidityCompiler;
|
type Compiler: SolidityCompiler;
|
||||||
|
|
||||||
/// Returns the matching [TestingPlatform] of the [revive_dt_config::Arguments].
|
/// Returns the matching [TestingPlatform] of the [revive_dt_config::Arguments].
|
||||||
fn config_id() -> TestingPlatform;
|
fn config_id() -> &'static TestingPlatform;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
@@ -29,8 +29,8 @@ impl Platform for Geth {
|
|||||||
type Blockchain = geth::GethNode;
|
type Blockchain = geth::GethNode;
|
||||||
type Compiler = solc::Solc;
|
type Compiler = solc::Solc;
|
||||||
|
|
||||||
fn config_id() -> TestingPlatform {
|
fn config_id() -> &'static TestingPlatform {
|
||||||
TestingPlatform::Geth
|
&TestingPlatform::Geth
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -41,7 +41,7 @@ impl Platform for Kitchensink {
|
|||||||
type Blockchain = KitchensinkNode;
|
type Blockchain = KitchensinkNode;
|
||||||
type Compiler = revive_resolc::Resolc;
|
type Compiler = revive_resolc::Resolc;
|
||||||
|
|
||||||
fn config_id() -> TestingPlatform {
|
fn config_id() -> &'static TestingPlatform {
|
||||||
TestingPlatform::Kitchensink
|
&TestingPlatform::Kitchensink
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
+664
-406
File diff suppressed because it is too large
Load Diff
@@ -11,14 +11,22 @@ rust-version.workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
revive-dt-common = { workspace = true }
|
revive-dt-common = { workspace = true }
|
||||||
|
|
||||||
|
revive-common = { workspace = true }
|
||||||
|
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
alloy-primitives = { workspace = true }
|
alloy-primitives = { workspace = true }
|
||||||
alloy-sol-types = { workspace = true }
|
alloy-sol-types = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
|
futures = { workspace = true }
|
||||||
|
regex = { workspace = true }
|
||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
|
schemars = { workspace = true }
|
||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
serde = { workspace = true, features = ["derive"] }
|
serde = { workspace = true, features = ["derive"] }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|||||||
+66
-16
@@ -1,31 +1,73 @@
|
|||||||
use serde::Deserialize;
|
use schemars::JsonSchema;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use revive_dt_common::macros::define_wrapper_type;
|
use revive_dt_common::{macros::define_wrapper_type, types::Mode};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
input::{Expected, Input},
|
input::{Expected, Step},
|
||||||
mode::Mode,
|
mode::ParsedMode,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)]
|
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)]
|
||||||
pub struct Case {
|
pub struct Case {
|
||||||
|
/// An optional name of the test case.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub name: Option<String>,
|
pub name: Option<String>,
|
||||||
|
|
||||||
|
/// An optional comment on the case which has no impact on the execution in any way.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub comment: Option<String>,
|
pub comment: Option<String>,
|
||||||
pub modes: Option<Vec<Mode>>,
|
|
||||||
pub inputs: Vec<Input>,
|
/// This represents a mode that has been parsed from test metadata.
|
||||||
|
///
|
||||||
|
/// Mode strings can take the following form (in pseudo-regex):
|
||||||
|
///
|
||||||
|
/// ```text
|
||||||
|
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// If this is provided then it takes higher priority than the modes specified in the metadata
|
||||||
|
/// file.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub modes: Option<Vec<ParsedMode>>,
|
||||||
|
|
||||||
|
/// The set of steps to run as part of this test case.
|
||||||
|
#[serde(rename = "inputs")]
|
||||||
|
pub steps: Vec<Step>,
|
||||||
|
|
||||||
|
/// An optional name of the group of tests that this test belongs to.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub group: Option<String>,
|
pub group: Option<String>,
|
||||||
|
|
||||||
|
/// An optional set of expectations and assertions to make about the transaction after it ran.
|
||||||
|
///
|
||||||
|
/// If this is not specified then the only assertion that will be ran is that the transaction
|
||||||
|
/// was successful.
|
||||||
|
///
|
||||||
|
/// This expectation that's on the case itself will be attached to the final step of the case.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub expected: Option<Expected>,
|
pub expected: Option<Expected>,
|
||||||
|
|
||||||
|
/// An optional boolean which defines if the case as a whole should be ignored. If null then the
|
||||||
|
/// case will not be ignored.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub ignore: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Case {
|
impl Case {
|
||||||
pub fn inputs_iterator(&self) -> impl Iterator<Item = Input> {
|
#[allow(irrefutable_let_patterns)]
|
||||||
let inputs_len = self.inputs.len();
|
pub fn steps_iterator(&self) -> impl Iterator<Item = Step> {
|
||||||
self.inputs
|
let steps_len = self.steps.len();
|
||||||
|
self.steps
|
||||||
.clone()
|
.clone()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.map(move |(idx, mut input)| {
|
.map(move |(idx, mut step)| {
|
||||||
if idx + 1 == inputs_len {
|
let Step::FunctionCall(ref mut input) = step else {
|
||||||
|
return step;
|
||||||
|
};
|
||||||
|
|
||||||
|
if idx + 1 == steps_len {
|
||||||
if input.expected.is_none() {
|
if input.expected.is_none() {
|
||||||
input.expected = self.expected.clone();
|
input.expected = self.expected.clone();
|
||||||
}
|
}
|
||||||
@@ -35,16 +77,24 @@ impl Case {
|
|||||||
// the case? What are we supposed to do with that final expected field on the
|
// the case? What are we supposed to do with that final expected field on the
|
||||||
// case?
|
// case?
|
||||||
|
|
||||||
input
|
step
|
||||||
} else {
|
} else {
|
||||||
input
|
step
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn solc_modes(&self) -> Vec<Mode> {
|
||||||
|
match &self.modes {
|
||||||
|
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
|
||||||
|
None => Mode::all().cloned().collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
define_wrapper_type!(
|
define_wrapper_type!(
|
||||||
/// A wrapper type for the index of test cases found in metadata file.
|
/// A wrapper type for the index of test cases found in metadata file.
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
pub struct CaseIdx(usize);
|
#[serde(transparent)]
|
||||||
|
pub struct CaseIdx(usize) impl Display, FromStr;
|
||||||
);
|
);
|
||||||
|
|||||||
+101
-69
@@ -3,97 +3,129 @@ use std::{
|
|||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use revive_dt_common::iterators::FilesWithExtensionIterator;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tracing::{debug, info};
|
||||||
|
|
||||||
use crate::metadata::MetadataFile;
|
use crate::metadata::{Metadata, MetadataFile};
|
||||||
|
use anyhow::Context as _;
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, Hash)]
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
pub struct Corpus {
|
#[serde(untagged)]
|
||||||
pub name: String,
|
pub enum Corpus {
|
||||||
pub path: PathBuf,
|
SinglePath { name: String, path: PathBuf },
|
||||||
|
MultiplePaths { name: String, paths: Vec<PathBuf> },
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Corpus {
|
impl Corpus {
|
||||||
/// Try to read and parse the corpus definition file at given `path`.
|
pub fn try_from_path(file_path: impl AsRef<Path>) -> anyhow::Result<Self> {
|
||||||
pub fn try_from_path(path: &Path) -> anyhow::Result<Self> {
|
let mut corpus = File::open(file_path.as_ref())
|
||||||
let file = File::open(path)?;
|
.map_err(anyhow::Error::from)
|
||||||
let mut corpus: Corpus = serde_json::from_reader(file)?;
|
.and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into))
|
||||||
|
.with_context(|| {
|
||||||
// Ensure that the path mentioned in the corpus is relative to the corpus file.
|
format!(
|
||||||
// Canonicalizing also helps make the path in any errors unambiguous.
|
"Failed to open and deserialize corpus file at {}",
|
||||||
corpus.path = path
|
file_path.as_ref().display()
|
||||||
.parent()
|
|
||||||
.ok_or_else(|| {
|
|
||||||
anyhow::anyhow!("Corpus path '{}' does not point to a file", path.display())
|
|
||||||
})?
|
|
||||||
.canonicalize()
|
|
||||||
.map_err(|error| {
|
|
||||||
anyhow::anyhow!(
|
|
||||||
"Failed to canonicalize path to corpus '{}': {error}",
|
|
||||||
path.display()
|
|
||||||
)
|
)
|
||||||
})?
|
})?;
|
||||||
.join(corpus.path);
|
|
||||||
|
let corpus_directory = file_path
|
||||||
|
.as_ref()
|
||||||
|
.canonicalize()
|
||||||
|
.context("Failed to canonicalize the path to the corpus file")?
|
||||||
|
.parent()
|
||||||
|
.context("Corpus file has no parent")?
|
||||||
|
.to_path_buf();
|
||||||
|
|
||||||
|
for path in corpus.paths_iter_mut() {
|
||||||
|
*path = corpus_directory.join(path.as_path())
|
||||||
|
}
|
||||||
|
|
||||||
Ok(corpus)
|
Ok(corpus)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Scan the corpus base directory and return all tests found.
|
|
||||||
pub fn enumerate_tests(&self) -> Vec<MetadataFile> {
|
pub fn enumerate_tests(&self) -> Vec<MetadataFile> {
|
||||||
let mut tests = Vec::new();
|
let mut tests = self
|
||||||
collect_metadata(&self.path, &mut tests);
|
.paths_iter()
|
||||||
|
.flat_map(|root_path| {
|
||||||
|
if !root_path.is_dir() {
|
||||||
|
Box::new(std::iter::once(root_path.to_path_buf()))
|
||||||
|
as Box<dyn Iterator<Item = _>>
|
||||||
|
} else {
|
||||||
|
Box::new(
|
||||||
|
FilesWithExtensionIterator::new(root_path)
|
||||||
|
.with_use_cached_fs(true)
|
||||||
|
.with_allowed_extension("sol")
|
||||||
|
.with_allowed_extension("json"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
.map(move |metadata_file_path| (root_path, metadata_file_path))
|
||||||
|
})
|
||||||
|
.filter_map(|(root_path, metadata_file_path)| {
|
||||||
|
Metadata::try_from_file(&metadata_file_path)
|
||||||
|
.or_else(|| {
|
||||||
|
debug!(
|
||||||
|
discovered_from = %root_path.display(),
|
||||||
|
metadata_file_path = %metadata_file_path.display(),
|
||||||
|
"Skipping file since it doesn't contain valid metadata"
|
||||||
|
);
|
||||||
|
None
|
||||||
|
})
|
||||||
|
.map(|metadata| MetadataFile {
|
||||||
|
metadata_file_path,
|
||||||
|
corpus_file_path: root_path.to_path_buf(),
|
||||||
|
content: metadata,
|
||||||
|
})
|
||||||
|
.inspect(|metadata_file| {
|
||||||
|
debug!(
|
||||||
|
metadata_file_path = %metadata_file.relative_path().display(),
|
||||||
|
"Loaded metadata file"
|
||||||
|
)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
|
||||||
|
tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
|
||||||
|
info!(
|
||||||
|
len = tests.len(),
|
||||||
|
corpus_name = self.name(),
|
||||||
|
"Found tests in Corpus"
|
||||||
|
);
|
||||||
tests
|
tests
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/// Recursively walks `path` and parses any JSON or Solidity file into a test
|
pub fn name(&self) -> &str {
|
||||||
/// definition [Metadata].
|
match self {
|
||||||
///
|
Corpus::SinglePath { name, .. } | Corpus::MultiplePaths { name, .. } => name.as_str(),
|
||||||
/// Found tests are inserted into `tests`.
|
|
||||||
///
|
|
||||||
/// `path` is expected to be a directory.
|
|
||||||
pub fn collect_metadata(path: &Path, tests: &mut Vec<MetadataFile>) {
|
|
||||||
if path.is_dir() {
|
|
||||||
let dir_entry = match std::fs::read_dir(path) {
|
|
||||||
Ok(dir_entry) => dir_entry,
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!("failed to read dir '{}': {error}", path.display());
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
|
||||||
for entry in dir_entry {
|
|
||||||
let entry = match entry {
|
|
||||||
Ok(entry) => entry,
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!("error reading dir entry: {error}");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let path = entry.path();
|
|
||||||
if path.is_dir() {
|
|
||||||
collect_metadata(&path, tests);
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if path.is_file() {
|
pub fn paths_iter(&self) -> impl Iterator<Item = &Path> {
|
||||||
if let Some(metadata) = MetadataFile::try_from_file(&path) {
|
match self {
|
||||||
tests.push(metadata)
|
Corpus::SinglePath { path, .. } => {
|
||||||
|
Box::new(std::iter::once(path.as_path())) as Box<dyn Iterator<Item = _>>
|
||||||
|
}
|
||||||
|
Corpus::MultiplePaths { paths, .. } => {
|
||||||
|
Box::new(paths.iter().map(|path| path.as_path())) as Box<dyn Iterator<Item = _>>
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
let Some(extension) = path.extension() else {
|
pub fn paths_iter_mut(&mut self) -> impl Iterator<Item = &mut PathBuf> {
|
||||||
tracing::error!("Failed to get file extension");
|
match self {
|
||||||
return;
|
Corpus::SinglePath { path, .. } => {
|
||||||
};
|
Box::new(std::iter::once(path)) as Box<dyn Iterator<Item = _>>
|
||||||
if extension.eq_ignore_ascii_case("sol") || extension.eq_ignore_ascii_case("json") {
|
|
||||||
if let Some(metadata) = MetadataFile::try_from_file(path) {
|
|
||||||
tests.push(metadata)
|
|
||||||
}
|
}
|
||||||
} else {
|
Corpus::MultiplePaths { paths, .. } => {
|
||||||
tracing::error!(?extension, "Unsupported file extension");
|
Box::new(paths.iter_mut()) as Box<dyn Iterator<Item = _>>
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn path_count(&self) -> usize {
|
||||||
|
match self {
|
||||||
|
Corpus::SinglePath { .. } => 1,
|
||||||
|
Corpus::MultiplePaths { paths, .. } => paths.len(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
+339
-215
@@ -2,59 +2,172 @@ use std::collections::HashMap;
|
|||||||
|
|
||||||
use alloy::{
|
use alloy::{
|
||||||
eips::BlockNumberOrTag,
|
eips::BlockNumberOrTag,
|
||||||
hex::ToHexExt,
|
json_abi::Function,
|
||||||
json_abi::JsonAbi,
|
|
||||||
network::TransactionBuilder,
|
network::TransactionBuilder,
|
||||||
primitives::{Address, Bytes, U256},
|
primitives::{Address, Bytes, U256},
|
||||||
rpc::types::TransactionRequest,
|
rpc::types::TransactionRequest,
|
||||||
};
|
};
|
||||||
use alloy_primitives::{FixedBytes, utils::parse_units};
|
use alloy_primitives::{FixedBytes, utils::parse_units};
|
||||||
use anyhow::Context;
|
use anyhow::Context as _;
|
||||||
|
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, stream};
|
||||||
|
use schemars::JsonSchema;
|
||||||
use semver::VersionReq;
|
use semver::VersionReq;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use revive_dt_common::macros::define_wrapper_type;
|
use revive_dt_common::macros::define_wrapper_type;
|
||||||
|
use tracing::{Instrument, info_span, instrument};
|
||||||
|
|
||||||
use crate::metadata::ContractInstance;
|
|
||||||
use crate::traits::ResolverApi;
|
use crate::traits::ResolverApi;
|
||||||
|
use crate::{metadata::ContractInstance, traits::ResolutionContext};
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq)]
|
/// A test step.
|
||||||
|
///
|
||||||
|
/// A test step can be anything. It could be an invocation to a function, an assertion, or any other
|
||||||
|
/// action that needs to be run or executed on the nodes used in the tests.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
|
||||||
|
#[serde(untagged)]
|
||||||
|
pub enum Step {
|
||||||
|
/// A function call or an invocation to some function on some smart contract.
|
||||||
|
FunctionCall(Box<Input>),
|
||||||
|
/// A step for performing a balance assertion on some account or contract.
|
||||||
|
BalanceAssertion(Box<BalanceAssertion>),
|
||||||
|
/// A step for asserting that the storage of some contract or account is empty.
|
||||||
|
StorageEmptyAssertion(Box<StorageEmptyAssertion>),
|
||||||
|
}
|
||||||
|
|
||||||
|
define_wrapper_type!(
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||||
|
pub struct StepIdx(usize) impl Display;
|
||||||
|
);
|
||||||
|
|
||||||
|
/// This is an input step which is a transaction description that the framework translates into a
|
||||||
|
/// transaction and executes on the nodes.
|
||||||
|
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
|
||||||
pub struct Input {
|
pub struct Input {
|
||||||
|
/// The address of the account performing the call and paying the fees for it.
|
||||||
#[serde(default = "Input::default_caller")]
|
#[serde(default = "Input::default_caller")]
|
||||||
|
#[schemars(with = "String")]
|
||||||
pub caller: Address,
|
pub caller: Address,
|
||||||
|
|
||||||
|
/// An optional comment on the step which has no impact on the execution in any way.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub comment: Option<String>,
|
pub comment: Option<String>,
|
||||||
|
|
||||||
|
/// The contract instance that's being called in this transaction step.
|
||||||
#[serde(default = "Input::default_instance")]
|
#[serde(default = "Input::default_instance")]
|
||||||
pub instance: ContractInstance,
|
pub instance: ContractInstance,
|
||||||
|
|
||||||
|
/// The method that's being called in this step.
|
||||||
pub method: Method,
|
pub method: Method,
|
||||||
|
|
||||||
|
/// The calldata that the function should be invoked with.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub calldata: Calldata,
|
pub calldata: Calldata,
|
||||||
|
|
||||||
|
/// A set of assertions and expectations to have for the transaction.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub expected: Option<Expected>,
|
pub expected: Option<Expected>,
|
||||||
|
|
||||||
|
/// An optional value to provide as part of the transaction.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub value: Option<EtherValue>,
|
pub value: Option<EtherValue>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
#[schemars(skip)]
|
||||||
pub storage: Option<HashMap<String, Calldata>>,
|
pub storage: Option<HashMap<String, Calldata>>,
|
||||||
|
|
||||||
|
/// Variable assignment to perform in the framework allowing us to reference them again later on
|
||||||
|
/// during the execution.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub variable_assignments: Option<VariableAssignments>,
|
pub variable_assignments: Option<VariableAssignments>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, Eq, PartialEq)]
|
/// This represents a balance assertion step where the framework needs to query the balance of some
|
||||||
|
/// account or contract and assert that it's some amount.
|
||||||
|
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
|
||||||
|
pub struct BalanceAssertion {
|
||||||
|
/// An optional comment on the balance assertion.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
|
||||||
|
/// The address that the balance assertion should be done on.
|
||||||
|
///
|
||||||
|
/// This is a string which will be resolved into an address when being processed. Therefore,
|
||||||
|
/// this could be a normal hex address, a variable such as `Test.address`, or perhaps even a
|
||||||
|
/// full on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are
|
||||||
|
/// followed in the calldata.
|
||||||
|
pub address: String,
|
||||||
|
|
||||||
|
/// The amount of balance to assert that the account or contract has. This is a 256 bit string
|
||||||
|
/// that's serialized and deserialized into a decimal string.
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
pub expected_balance: U256,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
|
||||||
|
pub struct StorageEmptyAssertion {
|
||||||
|
/// An optional comment on the storage empty assertion.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub comment: Option<String>,
|
||||||
|
|
||||||
|
/// The address that the balance assertion should be done on.
|
||||||
|
///
|
||||||
|
/// This is a string which will be resolved into an address when being processed. Therefore,
|
||||||
|
/// this could be a normal hex address, a variable such as `Test.address`, or perhaps even a
|
||||||
|
/// full on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are
|
||||||
|
/// followed in the calldata.
|
||||||
|
pub address: String,
|
||||||
|
|
||||||
|
/// A boolean of whether the storage of the address is empty or not.
|
||||||
|
pub is_storage_empty: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A set of expectations and assertions to make about the transaction after it ran.
|
||||||
|
///
|
||||||
|
/// If this is not specified then the only assertion that will be ran is that the transaction
|
||||||
|
/// was successful.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, Eq, PartialEq)]
|
||||||
#[serde(untagged)]
|
#[serde(untagged)]
|
||||||
pub enum Expected {
|
pub enum Expected {
|
||||||
|
/// An assertion that the transaction succeeded and returned the provided set of data.
|
||||||
Calldata(Calldata),
|
Calldata(Calldata),
|
||||||
|
/// A more complex assertion.
|
||||||
Expected(ExpectedOutput),
|
Expected(ExpectedOutput),
|
||||||
|
/// A set of assertions.
|
||||||
ExpectedMany(Vec<ExpectedOutput>),
|
ExpectedMany(Vec<ExpectedOutput>),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq)]
|
/// A set of assertions to run on the transaction.
|
||||||
|
#[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema, Eq, PartialEq)]
|
||||||
pub struct ExpectedOutput {
|
pub struct ExpectedOutput {
|
||||||
|
/// An optional compiler version that's required in order for this assertion to run.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
#[schemars(with = "Option<String>")]
|
||||||
pub compiler_version: Option<VersionReq>,
|
pub compiler_version: Option<VersionReq>,
|
||||||
|
|
||||||
|
/// An optional field of the expected returns from the invocation.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub return_data: Option<Calldata>,
|
pub return_data: Option<Calldata>,
|
||||||
|
|
||||||
|
/// An optional set of assertions to run on the emitted events from the transaction.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub events: Option<Vec<Event>>,
|
pub events: Option<Vec<Event>>,
|
||||||
|
|
||||||
|
/// A boolean which defines whether we expect the transaction to succeed or fail.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub exception: bool,
|
pub exception: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq)]
|
#[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema, Eq, PartialEq)]
|
||||||
pub struct Event {
|
pub struct Event {
|
||||||
|
/// An optional field of the address of the emitter of the event.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub address: Option<String>,
|
pub address: Option<String>,
|
||||||
|
|
||||||
|
/// The set of topics to expect the event to have.
|
||||||
pub topics: Vec<String>,
|
pub topics: Vec<String>,
|
||||||
|
|
||||||
|
/// The set of values to expect the event to have.
|
||||||
pub values: Calldata,
|
pub values: Calldata,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -109,18 +222,19 @@ pub struct Event {
|
|||||||
/// [`Single`]: Calldata::Single
|
/// [`Single`]: Calldata::Single
|
||||||
/// [`Compound`]: Calldata::Compound
|
/// [`Compound`]: Calldata::Compound
|
||||||
/// [reverse polish notation]: https://en.wikipedia.org/wiki/Reverse_Polish_notation
|
/// [reverse polish notation]: https://en.wikipedia.org/wiki/Reverse_Polish_notation
|
||||||
#[derive(Clone, Debug, Deserialize, Eq, PartialEq)]
|
#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, Eq, PartialEq)]
|
||||||
#[serde(untagged)]
|
#[serde(untagged)]
|
||||||
pub enum Calldata {
|
pub enum Calldata {
|
||||||
Single(Bytes),
|
Single(#[schemars(with = "String")] Bytes),
|
||||||
Compound(Vec<CalldataItem>),
|
Compound(Vec<CalldataItem>),
|
||||||
}
|
}
|
||||||
|
|
||||||
define_wrapper_type! {
|
define_wrapper_type! {
|
||||||
/// This represents an item in the [`Calldata::Compound`] variant.
|
/// This represents an item in the [`Calldata::Compound`] variant. Each item will be resolved
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
/// according to the resolution rules of the tool.
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema)]
|
||||||
#[serde(transparent)]
|
#[serde(transparent)]
|
||||||
pub struct CalldataItem(String);
|
pub struct CalldataItem(String) impl Display;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
@@ -143,7 +257,7 @@ enum Operation {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Specify how the contract is called.
|
/// Specify how the contract is called.
|
||||||
#[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)]
|
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema, Clone, Eq, PartialEq)]
|
||||||
pub enum Method {
|
pub enum Method {
|
||||||
/// Initiate a deploy transaction, calling contracts constructor.
|
/// Initiate a deploy transaction, calling contracts constructor.
|
||||||
///
|
///
|
||||||
@@ -164,11 +278,16 @@ pub enum Method {
|
|||||||
}
|
}
|
||||||
|
|
||||||
define_wrapper_type!(
|
define_wrapper_type!(
|
||||||
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
/// Defines an Ether value.
|
||||||
pub struct EtherValue(U256);
|
///
|
||||||
|
/// This is an unsigned 256 bit integer that's followed by some denomination which can either be
|
||||||
|
/// eth, ether, gwei, or wei.
|
||||||
|
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, JsonSchema)]
|
||||||
|
#[schemars(with = "String")]
|
||||||
|
pub struct EtherValue(U256) impl Display;
|
||||||
);
|
);
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq)]
|
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
|
||||||
pub struct VariableAssignments {
|
pub struct VariableAssignments {
|
||||||
/// A vector of the variable names to assign to the return data.
|
/// A vector of the variable names to assign to the return data.
|
||||||
///
|
///
|
||||||
@@ -187,50 +306,41 @@ impl Input {
|
|||||||
ContractInstance::new("Test")
|
ContractInstance::new("Test")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn instance_to_address(
|
pub async fn encoded_input(
|
||||||
&self,
|
&self,
|
||||||
instance: &ContractInstance,
|
resolver: &impl ResolverApi,
|
||||||
deployed_contracts: &HashMap<ContractInstance, (Address, JsonAbi)>,
|
context: ResolutionContext<'_>,
|
||||||
) -> anyhow::Result<Address> {
|
|
||||||
deployed_contracts
|
|
||||||
.get(instance)
|
|
||||||
.map(|(a, _)| *a)
|
|
||||||
.ok_or_else(|| anyhow::anyhow!("instance {instance:?} not deployed"))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn encoded_input<'a>(
|
|
||||||
&'a self,
|
|
||||||
deployed_contracts: &HashMap<ContractInstance, (Address, JsonAbi)>,
|
|
||||||
variables: impl Into<Option<&'a HashMap<String, U256>>> + Clone,
|
|
||||||
chain_state_provider: &impl ResolverApi,
|
|
||||||
) -> anyhow::Result<Bytes> {
|
) -> anyhow::Result<Bytes> {
|
||||||
match self.method {
|
match self.method {
|
||||||
Method::Deployer | Method::Fallback => {
|
Method::Deployer | Method::Fallback => {
|
||||||
let calldata = self
|
let calldata = self
|
||||||
.calldata
|
.calldata
|
||||||
.calldata(deployed_contracts, variables, chain_state_provider)
|
.calldata(resolver, context)
|
||||||
.await?;
|
.await
|
||||||
|
.context("Failed to produce calldata for deployer/fallback method")?;
|
||||||
|
|
||||||
Ok(calldata.into())
|
Ok(calldata.into())
|
||||||
}
|
}
|
||||||
Method::FunctionName(ref function_name) => {
|
Method::FunctionName(ref function_name) => {
|
||||||
let Some(abi) = deployed_contracts.get(&self.instance).map(|(_, a)| a) else {
|
let Some(abi) = context.deployed_contract_abi(&self.instance) else {
|
||||||
tracing::error!(
|
|
||||||
contract_name = self.instance.as_ref(),
|
|
||||||
available_abis = ?deployed_contracts.keys().collect::<Vec<_>>(),
|
|
||||||
"Attempted to lookup ABI of contract but it wasn't found"
|
|
||||||
);
|
|
||||||
anyhow::bail!("ABI for instance '{}' not found", self.instance.as_ref());
|
anyhow::bail!("ABI for instance '{}' not found", self.instance.as_ref());
|
||||||
};
|
};
|
||||||
|
|
||||||
tracing::trace!("ABI found for instance: {}", &self.instance.as_ref());
|
|
||||||
|
|
||||||
// We follow the same logic that's implemented in the matter-labs-tester where they resolve
|
// We follow the same logic that's implemented in the matter-labs-tester where they resolve
|
||||||
// the function name into a function selector and they assume that he function doesn't have
|
// the function name into a function selector and they assume that he function doesn't have
|
||||||
// any existing overloads.
|
// any existing overloads.
|
||||||
|
// Overloads are handled by providing the full function signature in the "function
|
||||||
|
// name".
|
||||||
// https://github.com/matter-labs/era-compiler-tester/blob/1dfa7d07cba0734ca97e24704f12dd57f6990c2c/compiler_tester/src/test/case/input/mod.rs#L158-L190
|
// https://github.com/matter-labs/era-compiler-tester/blob/1dfa7d07cba0734ca97e24704f12dd57f6990c2c/compiler_tester/src/test/case/input/mod.rs#L158-L190
|
||||||
let function = abi
|
let selector =
|
||||||
.functions()
|
if function_name.contains('(') && function_name.contains(')') {
|
||||||
|
Function::parse(function_name)
|
||||||
|
.context(
|
||||||
|
"Failed to parse the provided function name into a function signature",
|
||||||
|
)?
|
||||||
|
.selector()
|
||||||
|
} else {
|
||||||
|
abi.functions()
|
||||||
.find(|function| function.signature().starts_with(function_name))
|
.find(|function| function.signature().starts_with(function_name))
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
anyhow::anyhow!(
|
anyhow::anyhow!(
|
||||||
@@ -238,14 +348,13 @@ impl Input {
|
|||||||
function_name,
|
function_name,
|
||||||
&self.instance
|
&self.instance
|
||||||
)
|
)
|
||||||
})?;
|
})
|
||||||
|
.with_context(|| format!(
|
||||||
tracing::trace!("Functions found for instance: {}", self.instance.as_ref());
|
"Failed to resolve function selector for {:?} on instance {:?}",
|
||||||
|
function_name, &self.instance
|
||||||
tracing::trace!(
|
))?
|
||||||
"Starting encoding ABI's parameters for instance: {}",
|
.selector()
|
||||||
self.instance.as_ref()
|
};
|
||||||
);
|
|
||||||
|
|
||||||
// Allocating a vector that we will be using for the calldata. The vector size will be:
|
// Allocating a vector that we will be using for the calldata. The vector size will be:
|
||||||
// 4 bytes for the function selector.
|
// 4 bytes for the function selector.
|
||||||
@@ -254,15 +363,11 @@ impl Input {
|
|||||||
// We're using indices in the following code in order to avoid the need for us to allocate
|
// We're using indices in the following code in order to avoid the need for us to allocate
|
||||||
// a new buffer for each one of the resolved arguments.
|
// a new buffer for each one of the resolved arguments.
|
||||||
let mut calldata = Vec::<u8>::with_capacity(4 + self.calldata.size_requirement());
|
let mut calldata = Vec::<u8>::with_capacity(4 + self.calldata.size_requirement());
|
||||||
calldata.extend(function.selector().0);
|
calldata.extend(selector.0);
|
||||||
self.calldata
|
self.calldata
|
||||||
.calldata_into_slice(
|
.calldata_into_slice(&mut calldata, resolver, context)
|
||||||
&mut calldata,
|
.await
|
||||||
deployed_contracts,
|
.context("Failed to append encoded argument to calldata buffer")?;
|
||||||
variables,
|
|
||||||
chain_state_provider,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(calldata.into())
|
Ok(calldata.into())
|
||||||
}
|
}
|
||||||
@@ -270,15 +375,15 @@ impl Input {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Parse this input into a legacy transaction.
|
/// Parse this input into a legacy transaction.
|
||||||
pub async fn legacy_transaction<'a>(
|
pub async fn legacy_transaction(
|
||||||
&'a self,
|
&self,
|
||||||
deployed_contracts: &HashMap<ContractInstance, (Address, JsonAbi)>,
|
resolver: &impl ResolverApi,
|
||||||
variables: impl Into<Option<&'a HashMap<String, U256>>> + Clone,
|
context: ResolutionContext<'_>,
|
||||||
chain_state_provider: &impl ResolverApi,
|
|
||||||
) -> anyhow::Result<TransactionRequest> {
|
) -> anyhow::Result<TransactionRequest> {
|
||||||
let input_data = self
|
let input_data = self
|
||||||
.encoded_input(deployed_contracts, variables, chain_state_provider)
|
.encoded_input(resolver, context)
|
||||||
.await?;
|
.await
|
||||||
|
.context("Failed to encode input bytes for transaction request")?;
|
||||||
let transaction_request = TransactionRequest::default().from(self.caller).value(
|
let transaction_request = TransactionRequest::default().from(self.caller).value(
|
||||||
self.value
|
self.value
|
||||||
.map(|value| value.into_inner())
|
.map(|value| value.into_inner())
|
||||||
@@ -287,7 +392,10 @@ impl Input {
|
|||||||
match self.method {
|
match self.method {
|
||||||
Method::Deployer => Ok(transaction_request.with_deploy_code(input_data)),
|
Method::Deployer => Ok(transaction_request.with_deploy_code(input_data)),
|
||||||
_ => Ok(transaction_request
|
_ => Ok(transaction_request
|
||||||
.to(self.instance_to_address(&self.instance, deployed_contracts)?)
|
.to(context
|
||||||
|
.deployed_contract_address(&self.instance)
|
||||||
|
.context("Failed to get the contract address")
|
||||||
|
.copied()?)
|
||||||
.input(input_data.into())),
|
.input(input_data.into())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -356,49 +464,41 @@ impl Calldata {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn calldata<'a>(
|
pub async fn calldata(
|
||||||
&'a self,
|
&self,
|
||||||
deployed_contracts: &HashMap<ContractInstance, (Address, JsonAbi)>,
|
resolver: &impl ResolverApi,
|
||||||
variables: impl Into<Option<&'a HashMap<String, U256>>> + Clone,
|
context: ResolutionContext<'_>,
|
||||||
chain_state_provider: &impl ResolverApi,
|
|
||||||
) -> anyhow::Result<Vec<u8>> {
|
) -> anyhow::Result<Vec<u8>> {
|
||||||
let mut buffer = Vec::<u8>::with_capacity(self.size_requirement());
|
let mut buffer = Vec::<u8>::with_capacity(self.size_requirement());
|
||||||
self.calldata_into_slice(
|
self.calldata_into_slice(&mut buffer, resolver, context)
|
||||||
&mut buffer,
|
|
||||||
deployed_contracts,
|
|
||||||
variables,
|
|
||||||
chain_state_provider,
|
|
||||||
)
|
|
||||||
.await?;
|
.await?;
|
||||||
Ok(buffer)
|
Ok(buffer)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn calldata_into_slice<'a>(
|
pub async fn calldata_into_slice(
|
||||||
&'a self,
|
&self,
|
||||||
buffer: &mut Vec<u8>,
|
buffer: &mut Vec<u8>,
|
||||||
deployed_contracts: &HashMap<ContractInstance, (Address, JsonAbi)>,
|
resolver: &impl ResolverApi,
|
||||||
variables: impl Into<Option<&'a HashMap<String, U256>>> + Clone,
|
context: ResolutionContext<'_>,
|
||||||
chain_state_provider: &impl ResolverApi,
|
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
match self {
|
match self {
|
||||||
Calldata::Single(bytes) => {
|
Calldata::Single(bytes) => {
|
||||||
buffer.extend_from_slice(bytes);
|
buffer.extend_from_slice(bytes);
|
||||||
}
|
}
|
||||||
Calldata::Compound(items) => {
|
Calldata::Compound(items) => {
|
||||||
for (arg_idx, arg) in items.iter().enumerate() {
|
let resolved = stream::iter(items.iter().enumerate())
|
||||||
match arg
|
.map(|(arg_idx, arg)| async move {
|
||||||
.resolve(deployed_contracts, variables.clone(), chain_state_provider)
|
arg.resolve(resolver, context)
|
||||||
|
.instrument(info_span!("Resolving argument", %arg, arg_idx))
|
||||||
|
.map_ok(|value| value.to_be_bytes::<32>())
|
||||||
.await
|
.await
|
||||||
{
|
})
|
||||||
Ok(resolved) => {
|
.buffered(0xFF)
|
||||||
buffer.extend(resolved.to_be_bytes::<32>());
|
.try_collect::<Vec<_>>()
|
||||||
}
|
.await
|
||||||
Err(error) => {
|
.context("Failed to resolve one or more calldata arguments")?;
|
||||||
tracing::error!(?arg, arg_idx, ?error, "Failed to resolve argument");
|
|
||||||
return Err(error);
|
buffer.extend(resolved.into_iter().flatten());
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -412,23 +512,21 @@ impl Calldata {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Checks if this [`Calldata`] is equivalent to the passed calldata bytes.
|
/// Checks if this [`Calldata`] is equivalent to the passed calldata bytes.
|
||||||
pub async fn is_equivalent<'a>(
|
pub async fn is_equivalent(
|
||||||
&'a self,
|
&self,
|
||||||
other: &[u8],
|
other: &[u8],
|
||||||
deployed_contracts: &HashMap<ContractInstance, (Address, JsonAbi)>,
|
resolver: &impl ResolverApi,
|
||||||
variables: impl Into<Option<&'a HashMap<String, U256>>> + Clone,
|
context: ResolutionContext<'_>,
|
||||||
chain_state_provider: &impl ResolverApi,
|
|
||||||
) -> anyhow::Result<bool> {
|
) -> anyhow::Result<bool> {
|
||||||
match self {
|
match self {
|
||||||
Calldata::Single(calldata) => Ok(calldata == other),
|
Calldata::Single(calldata) => Ok(calldata == other),
|
||||||
Calldata::Compound(items) => {
|
Calldata::Compound(items) => {
|
||||||
// Chunking the "other" calldata into 32 byte chunks since each
|
stream::iter(items.iter().zip(other.chunks(32)))
|
||||||
// one of the items in the compound calldata represents 32 bytes
|
.map(|(this, other)| async move {
|
||||||
for (this, other) in items.iter().zip(other.chunks(32)) {
|
|
||||||
// The matterlabs format supports wildcards and therefore we
|
// The matterlabs format supports wildcards and therefore we
|
||||||
// also need to support them.
|
// also need to support them.
|
||||||
if this.as_ref() == "*" {
|
if this.as_ref() == "*" {
|
||||||
continue;
|
return Ok::<_, anyhow::Error>(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
let other = if other.len() < 32 {
|
let other = if other.len() < 32 {
|
||||||
@@ -440,31 +538,33 @@ impl Calldata {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let this = this
|
let this = this
|
||||||
.resolve(deployed_contracts, variables.clone(), chain_state_provider)
|
.resolve(resolver, context)
|
||||||
.await?;
|
.await
|
||||||
|
.context("Failed to resolve calldata item during equivalence check")?;
|
||||||
let other = U256::from_be_slice(&other);
|
let other = U256::from_be_slice(&other);
|
||||||
if this != other {
|
Ok(this == other)
|
||||||
return Ok(false);
|
})
|
||||||
}
|
.buffered(0xFF)
|
||||||
}
|
.all(|v| async move { v.is_ok_and(|v| v) })
|
||||||
Ok(true)
|
.map(Ok)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CalldataItem {
|
impl CalldataItem {
|
||||||
async fn resolve<'a>(
|
#[instrument(level = "info", skip_all, err)]
|
||||||
&'a self,
|
async fn resolve(
|
||||||
deployed_contracts: &HashMap<ContractInstance, (Address, JsonAbi)>,
|
&self,
|
||||||
variables: impl Into<Option<&'a HashMap<String, U256>>> + Clone,
|
resolver: &impl ResolverApi,
|
||||||
chain_state_provider: &impl ResolverApi,
|
context: ResolutionContext<'_>,
|
||||||
) -> anyhow::Result<U256> {
|
) -> anyhow::Result<U256> {
|
||||||
let mut stack = Vec::<CalldataToken<U256>>::new();
|
let mut stack = Vec::<CalldataToken<U256>>::new();
|
||||||
|
|
||||||
for token in self
|
for token in self
|
||||||
.calldata_tokens()
|
.calldata_tokens()
|
||||||
.map(|token| token.resolve(deployed_contracts, variables.clone(), chain_state_provider))
|
.map(|token| token.resolve(resolver, context))
|
||||||
{
|
{
|
||||||
let token = token.await?;
|
let token = token.await?;
|
||||||
let new_token = match token {
|
let new_token = match token {
|
||||||
@@ -505,21 +605,14 @@ impl CalldataItem {
|
|||||||
match stack.as_slice() {
|
match stack.as_slice() {
|
||||||
// Empty stack means that we got an empty compound calldata which we resolve to zero.
|
// Empty stack means that we got an empty compound calldata which we resolve to zero.
|
||||||
[] => Ok(U256::ZERO),
|
[] => Ok(U256::ZERO),
|
||||||
[CalldataToken::Item(item)] => {
|
[CalldataToken::Item(item)] => Ok(*item),
|
||||||
tracing::debug!(
|
|
||||||
original = self.0,
|
|
||||||
resolved = item.to_be_bytes::<32>().encode_hex(),
|
|
||||||
"Resolved a Calldata item"
|
|
||||||
);
|
|
||||||
Ok(*item)
|
|
||||||
}
|
|
||||||
_ => Err(anyhow::anyhow!(
|
_ => Err(anyhow::anyhow!(
|
||||||
"Invalid calldata arithmetic operation - Invalid stack"
|
"Invalid calldata arithmetic operation - Invalid stack"
|
||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn calldata_tokens<'a>(&'a self) -> impl Iterator<Item = CalldataToken<&'a str>> + 'a {
|
fn calldata_tokens(&self) -> impl Iterator<Item = CalldataToken<&str>> {
|
||||||
self.0.split(' ').map(|item| match item {
|
self.0.split(' ').map(|item| match item {
|
||||||
"+" => CalldataToken::Operation(Operation::Addition),
|
"+" => CalldataToken::Operation(Operation::Addition),
|
||||||
"-" => CalldataToken::Operation(Operation::Subtraction),
|
"-" => CalldataToken::Operation(Operation::Subtraction),
|
||||||
@@ -543,9 +636,11 @@ impl<T> CalldataToken<T> {
|
|||||||
const GAS_LIMIT_VARIABLE: &str = "$GAS_LIMIT";
|
const GAS_LIMIT_VARIABLE: &str = "$GAS_LIMIT";
|
||||||
const COINBASE_VARIABLE: &str = "$COINBASE";
|
const COINBASE_VARIABLE: &str = "$COINBASE";
|
||||||
const DIFFICULTY_VARIABLE: &str = "$DIFFICULTY";
|
const DIFFICULTY_VARIABLE: &str = "$DIFFICULTY";
|
||||||
|
const BLOCK_BASE_FEE_VARIABLE: &str = "$BASE_FEE";
|
||||||
const BLOCK_HASH_VARIABLE_PREFIX: &str = "$BLOCK_HASH";
|
const BLOCK_HASH_VARIABLE_PREFIX: &str = "$BLOCK_HASH";
|
||||||
const BLOCK_NUMBER_VARIABLE: &str = "$BLOCK_NUMBER";
|
const BLOCK_NUMBER_VARIABLE: &str = "$BLOCK_NUMBER";
|
||||||
const BLOCK_TIMESTAMP_VARIABLE: &str = "$BLOCK_TIMESTAMP";
|
const BLOCK_TIMESTAMP_VARIABLE: &str = "$BLOCK_TIMESTAMP";
|
||||||
|
const TRANSACTION_GAS_PRICE: &str = "$TRANSACTION_GAS_PRICE";
|
||||||
const VARIABLE_PREFIX: &str = "$VARIABLE:";
|
const VARIABLE_PREFIX: &str = "$VARIABLE:";
|
||||||
|
|
||||||
fn into_item(self) -> Option<T> {
|
fn into_item(self) -> Option<T> {
|
||||||
@@ -565,24 +660,21 @@ impl<T: AsRef<str>> CalldataToken<T> {
|
|||||||
/// This piece of code is taken from the matter-labs-tester repository which is licensed under
|
/// This piece of code is taken from the matter-labs-tester repository which is licensed under
|
||||||
/// MIT or Apache. The original source code can be found here:
|
/// MIT or Apache. The original source code can be found here:
|
||||||
/// https://github.com/matter-labs/era-compiler-tester/blob/0ed598a27f6eceee7008deab3ff2311075a2ec69/compiler_tester/src/test/case/input/value.rs#L43-L146
|
/// https://github.com/matter-labs/era-compiler-tester/blob/0ed598a27f6eceee7008deab3ff2311075a2ec69/compiler_tester/src/test/case/input/value.rs#L43-L146
|
||||||
async fn resolve<'a>(
|
async fn resolve(
|
||||||
self,
|
self,
|
||||||
deployed_contracts: &HashMap<ContractInstance, (Address, JsonAbi)>,
|
resolver: &impl ResolverApi,
|
||||||
variables: impl Into<Option<&'a HashMap<String, U256>>> + Clone,
|
context: ResolutionContext<'_>,
|
||||||
chain_state_provider: &impl ResolverApi,
|
|
||||||
) -> anyhow::Result<CalldataToken<U256>> {
|
) -> anyhow::Result<CalldataToken<U256>> {
|
||||||
match self {
|
match self {
|
||||||
Self::Item(item) => {
|
Self::Item(item) => {
|
||||||
let item = item.as_ref();
|
let item = item.as_ref();
|
||||||
let value = if let Some(instance) = item.strip_suffix(Self::ADDRESS_VARIABLE_SUFFIX)
|
let value = if let Some(instance) = item.strip_suffix(Self::ADDRESS_VARIABLE_SUFFIX)
|
||||||
{
|
{
|
||||||
Ok(U256::from_be_slice(
|
context
|
||||||
deployed_contracts
|
.deployed_contract_address(&ContractInstance::new(instance))
|
||||||
.get(&ContractInstance::new(instance))
|
.ok_or_else(|| anyhow::anyhow!("Instance `{}` not found", instance))
|
||||||
.map(|(a, _)| *a)
|
.map(AsRef::as_ref)
|
||||||
.ok_or_else(|| anyhow::anyhow!("Instance `{}` not found", instance))?
|
.map(U256::from_be_slice)
|
||||||
.as_ref(),
|
|
||||||
))
|
|
||||||
} else if let Some(value) = item.strip_prefix(Self::NEGATIVE_VALUE_PREFIX) {
|
} else if let Some(value) = item.strip_prefix(Self::NEGATIVE_VALUE_PREFIX) {
|
||||||
let value = U256::from_str_radix(value, 10).map_err(|error| {
|
let value = U256::from_str_radix(value, 10).map_err(|error| {
|
||||||
anyhow::anyhow!("Invalid decimal literal after `-`: {}", error)
|
anyhow::anyhow!("Invalid decimal literal after `-`: {}", error)
|
||||||
@@ -595,27 +687,36 @@ impl<T: AsRef<str>> CalldataToken<T> {
|
|||||||
.ok_or_else(|| anyhow::anyhow!("`-0` is invalid literal"))?;
|
.ok_or_else(|| anyhow::anyhow!("`-0` is invalid literal"))?;
|
||||||
Ok(U256::MAX.checked_sub(value).expect("Always valid"))
|
Ok(U256::MAX.checked_sub(value).expect("Always valid"))
|
||||||
} else if let Some(value) = item.strip_prefix(Self::HEX_LITERAL_PREFIX) {
|
} else if let Some(value) = item.strip_prefix(Self::HEX_LITERAL_PREFIX) {
|
||||||
Ok(U256::from_str_radix(value, 16).map_err(|error| {
|
U256::from_str_radix(value, 16)
|
||||||
anyhow::anyhow!("Invalid hexadecimal literal: {}", error)
|
.map_err(|error| anyhow::anyhow!("Invalid hexadecimal literal: {}", error))
|
||||||
})?)
|
|
||||||
} else if item == Self::CHAIN_VARIABLE {
|
} else if item == Self::CHAIN_VARIABLE {
|
||||||
let chain_id = chain_state_provider.chain_id().await?;
|
resolver.chain_id().await.map(U256::from)
|
||||||
Ok(U256::from(chain_id))
|
} else if item == Self::TRANSACTION_GAS_PRICE {
|
||||||
|
context
|
||||||
|
.transaction_hash()
|
||||||
|
.context("No transaction hash provided to get the transaction gas price")
|
||||||
|
.map(|tx_hash| resolver.transaction_gas_price(tx_hash))?
|
||||||
|
.await
|
||||||
|
.map(U256::from)
|
||||||
} else if item == Self::GAS_LIMIT_VARIABLE {
|
} else if item == Self::GAS_LIMIT_VARIABLE {
|
||||||
let gas_limit = chain_state_provider
|
resolver
|
||||||
.block_gas_limit(BlockNumberOrTag::Latest)
|
.block_gas_limit(context.resolve_block_number(BlockNumberOrTag::Latest))
|
||||||
.await?;
|
.await
|
||||||
Ok(U256::from(gas_limit))
|
.map(U256::from)
|
||||||
} else if item == Self::COINBASE_VARIABLE {
|
} else if item == Self::COINBASE_VARIABLE {
|
||||||
let coinbase = chain_state_provider
|
resolver
|
||||||
.block_coinbase(BlockNumberOrTag::Latest)
|
.block_coinbase(context.resolve_block_number(BlockNumberOrTag::Latest))
|
||||||
.await?;
|
.await
|
||||||
Ok(U256::from_be_slice(coinbase.as_ref()))
|
.map(|address| U256::from_be_slice(address.as_ref()))
|
||||||
} else if item == Self::DIFFICULTY_VARIABLE {
|
} else if item == Self::DIFFICULTY_VARIABLE {
|
||||||
let block_difficulty = chain_state_provider
|
resolver
|
||||||
.block_difficulty(BlockNumberOrTag::Latest)
|
.block_difficulty(context.resolve_block_number(BlockNumberOrTag::Latest))
|
||||||
.await?;
|
.await
|
||||||
Ok(block_difficulty)
|
} else if item == Self::BLOCK_BASE_FEE_VARIABLE {
|
||||||
|
resolver
|
||||||
|
.block_base_fee(context.resolve_block_number(BlockNumberOrTag::Latest))
|
||||||
|
.await
|
||||||
|
.map(U256::from)
|
||||||
} else if item.starts_with(Self::BLOCK_HASH_VARIABLE_PREFIX) {
|
} else if item.starts_with(Self::BLOCK_HASH_VARIABLE_PREFIX) {
|
||||||
let offset: u64 = item
|
let offset: u64 = item
|
||||||
.split(':')
|
.split(':')
|
||||||
@@ -623,35 +724,41 @@ impl<T: AsRef<str>> CalldataToken<T> {
|
|||||||
.and_then(|value| value.parse().ok())
|
.and_then(|value| value.parse().ok())
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
|
|
||||||
let current_block_number = chain_state_provider.last_block_number().await?;
|
let current_block_number = match context.tip_block_number() {
|
||||||
let desired_block_number = current_block_number - offset;
|
Some(block_number) => *block_number,
|
||||||
|
None => resolver.last_block_number().await.context(
|
||||||
|
"Failed to query last block number while resolving $BLOCK_HASH",
|
||||||
|
)?,
|
||||||
|
};
|
||||||
|
let desired_block_number = current_block_number.saturating_sub(offset);
|
||||||
|
|
||||||
let block_hash = chain_state_provider
|
let block_hash = resolver
|
||||||
.block_hash(desired_block_number.into())
|
.block_hash(desired_block_number.into())
|
||||||
.await?;
|
.await
|
||||||
|
.context("Failed to resolve block hash for desired block number")?;
|
||||||
|
|
||||||
Ok(U256::from_be_bytes(block_hash.0))
|
Ok(U256::from_be_bytes(block_hash.0))
|
||||||
} else if item == Self::BLOCK_NUMBER_VARIABLE {
|
} else if item == Self::BLOCK_NUMBER_VARIABLE {
|
||||||
let current_block_number = chain_state_provider.last_block_number().await?;
|
let current_block_number = match context.tip_block_number() {
|
||||||
|
Some(block_number) => *block_number,
|
||||||
|
None => resolver.last_block_number().await.context(
|
||||||
|
"Failed to query last block number while resolving $BLOCK_NUMBER",
|
||||||
|
)?,
|
||||||
|
};
|
||||||
Ok(U256::from(current_block_number))
|
Ok(U256::from(current_block_number))
|
||||||
} else if item == Self::BLOCK_TIMESTAMP_VARIABLE {
|
} else if item == Self::BLOCK_TIMESTAMP_VARIABLE {
|
||||||
let timestamp = chain_state_provider
|
resolver
|
||||||
.block_timestamp(BlockNumberOrTag::Latest)
|
.block_timestamp(context.resolve_block_number(BlockNumberOrTag::Latest))
|
||||||
.await?;
|
.await
|
||||||
Ok(U256::from(timestamp))
|
.map(U256::from)
|
||||||
} else if let Some(variable_name) = item.strip_prefix(Self::VARIABLE_PREFIX) {
|
} else if let Some(variable_name) = item.strip_prefix(Self::VARIABLE_PREFIX) {
|
||||||
let Some(variables) = variables.into() else {
|
context
|
||||||
anyhow::bail!(
|
.variable(variable_name)
|
||||||
"Variable resolution required but no variables were passed in"
|
.context("Variable lookup failed")
|
||||||
);
|
.copied()
|
||||||
};
|
|
||||||
let Some(variable) = variables.get(variable_name) else {
|
|
||||||
anyhow::bail!("No variable found with the name {}", variable_name)
|
|
||||||
};
|
|
||||||
Ok(*variable)
|
|
||||||
} else {
|
} else {
|
||||||
Ok(U256::from_str_radix(item, 10)
|
U256::from_str_radix(item, 10)
|
||||||
.map_err(|error| anyhow::anyhow!("Invalid decimal literal: {}", error))?)
|
.map_err(|error| anyhow::anyhow!("Invalid decimal literal: {}", error))
|
||||||
};
|
};
|
||||||
value.map(CalldataToken::Item)
|
value.map(CalldataToken::Item)
|
||||||
}
|
}
|
||||||
@@ -689,51 +796,52 @@ impl<'de> Deserialize<'de> for EtherValue {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use super::*;
|
use alloy::{eips::BlockNumberOrTag, json_abi::JsonAbi};
|
||||||
use alloy::json_abi::JsonAbi;
|
use alloy_primitives::{BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, address};
|
||||||
use alloy_primitives::address;
|
|
||||||
use alloy_sol_types::SolValue;
|
use alloy_sol_types::SolValue;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
use crate::metadata::ContractIdent;
|
||||||
|
|
||||||
struct MockResolver;
|
struct MockResolver;
|
||||||
|
|
||||||
impl ResolverApi for MockResolver {
|
impl ResolverApi for MockResolver {
|
||||||
async fn chain_id(&self) -> anyhow::Result<alloy_primitives::ChainId> {
|
async fn chain_id(&self) -> anyhow::Result<ChainId> {
|
||||||
Ok(0x123)
|
Ok(0x123)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn block_gas_limit(&self, _: alloy::eips::BlockNumberOrTag) -> anyhow::Result<u128> {
|
async fn block_gas_limit(&self, _: BlockNumberOrTag) -> anyhow::Result<u128> {
|
||||||
Ok(0x1234)
|
Ok(0x1234)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn block_coinbase(
|
async fn block_coinbase(&self, _: BlockNumberOrTag) -> anyhow::Result<Address> {
|
||||||
&self,
|
|
||||||
_: alloy::eips::BlockNumberOrTag,
|
|
||||||
) -> anyhow::Result<Address> {
|
|
||||||
Ok(Address::ZERO)
|
Ok(Address::ZERO)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn block_difficulty(&self, _: alloy::eips::BlockNumberOrTag) -> anyhow::Result<U256> {
|
async fn block_difficulty(&self, _: BlockNumberOrTag) -> anyhow::Result<U256> {
|
||||||
Ok(U256::from(0x12345u128))
|
Ok(U256::from(0x12345u128))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn block_hash(
|
async fn block_base_fee(&self, _: BlockNumberOrTag) -> anyhow::Result<u64> {
|
||||||
&self,
|
Ok(0x100)
|
||||||
_: alloy::eips::BlockNumberOrTag,
|
}
|
||||||
) -> anyhow::Result<alloy_primitives::BlockHash> {
|
|
||||||
|
async fn block_hash(&self, _: BlockNumberOrTag) -> anyhow::Result<BlockHash> {
|
||||||
Ok([0xEE; 32].into())
|
Ok([0xEE; 32].into())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn block_timestamp(
|
async fn block_timestamp(&self, _: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> {
|
||||||
&self,
|
|
||||||
_: alloy::eips::BlockNumberOrTag,
|
|
||||||
) -> anyhow::Result<alloy_primitives::BlockTimestamp> {
|
|
||||||
Ok(0x123456)
|
Ok(0x123456)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn last_block_number(&self) -> anyhow::Result<alloy_primitives::BlockNumber> {
|
async fn last_block_number(&self) -> anyhow::Result<BlockNumber> {
|
||||||
Ok(0x1234567)
|
Ok(0x1234567)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn transaction_gas_price(&self, _: &TxHash) -> anyhow::Result<u128> {
|
||||||
|
Ok(0x200)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -769,13 +877,12 @@ mod tests {
|
|||||||
let mut contracts = HashMap::new();
|
let mut contracts = HashMap::new();
|
||||||
contracts.insert(
|
contracts.insert(
|
||||||
ContractInstance::new("Contract"),
|
ContractInstance::new("Contract"),
|
||||||
(Address::ZERO, parsed_abi),
|
(ContractIdent::new("Contract"), Address::ZERO, parsed_abi),
|
||||||
);
|
);
|
||||||
|
|
||||||
let encoded = input
|
let resolver = MockResolver;
|
||||||
.encoded_input(&contracts, None, &MockResolver)
|
let context = ResolutionContext::default().with_deployed_contracts(&contracts);
|
||||||
.await
|
let encoded = input.encoded_input(&resolver, context).await.unwrap();
|
||||||
.unwrap();
|
|
||||||
assert!(encoded.0.starts_with(&selector));
|
assert!(encoded.0.starts_with(&selector));
|
||||||
|
|
||||||
type T = (u64,);
|
type T = (u64,);
|
||||||
@@ -814,13 +921,12 @@ mod tests {
|
|||||||
let mut contracts = HashMap::new();
|
let mut contracts = HashMap::new();
|
||||||
contracts.insert(
|
contracts.insert(
|
||||||
ContractInstance::new("Contract"),
|
ContractInstance::new("Contract"),
|
||||||
(Address::ZERO, parsed_abi),
|
(ContractIdent::new("Contract"), Address::ZERO, parsed_abi),
|
||||||
);
|
);
|
||||||
|
|
||||||
let encoded = input
|
let resolver = MockResolver;
|
||||||
.encoded_input(&contracts, None, &MockResolver)
|
let context = ResolutionContext::default().with_deployed_contracts(&contracts);
|
||||||
.await
|
let encoded = input.encoded_input(&resolver, context).await.unwrap();
|
||||||
.unwrap();
|
|
||||||
assert!(encoded.0.starts_with(&selector));
|
assert!(encoded.0.starts_with(&selector));
|
||||||
|
|
||||||
type T = (alloy_primitives::Address,);
|
type T = (alloy_primitives::Address,);
|
||||||
@@ -862,13 +968,12 @@ mod tests {
|
|||||||
let mut contracts = HashMap::new();
|
let mut contracts = HashMap::new();
|
||||||
contracts.insert(
|
contracts.insert(
|
||||||
ContractInstance::new("Contract"),
|
ContractInstance::new("Contract"),
|
||||||
(Address::ZERO, parsed_abi),
|
(ContractIdent::new("Contract"), Address::ZERO, parsed_abi),
|
||||||
);
|
);
|
||||||
|
|
||||||
let encoded = input
|
let resolver = MockResolver;
|
||||||
.encoded_input(&contracts, None, &MockResolver)
|
let context = ResolutionContext::default().with_deployed_contracts(&contracts);
|
||||||
.await
|
let encoded = input.encoded_input(&resolver, context).await.unwrap();
|
||||||
.unwrap();
|
|
||||||
assert!(encoded.0.starts_with(&selector));
|
assert!(encoded.0.starts_with(&selector));
|
||||||
|
|
||||||
type T = (alloy_primitives::Address,);
|
type T = (alloy_primitives::Address,);
|
||||||
@@ -881,12 +986,11 @@ mod tests {
|
|||||||
|
|
||||||
async fn resolve_calldata_item(
|
async fn resolve_calldata_item(
|
||||||
input: &str,
|
input: &str,
|
||||||
deployed_contracts: &HashMap<ContractInstance, (Address, JsonAbi)>,
|
deployed_contracts: &HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||||
chain_state_provider: &impl ResolverApi,
|
resolver: &impl ResolverApi,
|
||||||
) -> anyhow::Result<U256> {
|
) -> anyhow::Result<U256> {
|
||||||
CalldataItem::new(input)
|
let context = ResolutionContext::default().with_deployed_contracts(deployed_contracts);
|
||||||
.resolve(deployed_contracts, None, chain_state_provider)
|
CalldataItem::new(input).resolve(resolver, context).await
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -964,6 +1068,26 @@ mod tests {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn resolver_can_resolve_block_base_fee_variable() {
|
||||||
|
// Arrange
|
||||||
|
let input = "$BASE_FEE";
|
||||||
|
|
||||||
|
// Act
|
||||||
|
let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await;
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
let resolved = resolved.expect("Failed to resolve argument");
|
||||||
|
assert_eq!(
|
||||||
|
resolved,
|
||||||
|
MockResolver
|
||||||
|
.block_base_fee(Default::default())
|
||||||
|
.await
|
||||||
|
.map(U256::from)
|
||||||
|
.unwrap()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn resolver_can_resolve_block_hash_variable() {
|
async fn resolver_can_resolve_block_hash_variable() {
|
||||||
// Arrange
|
// Arrange
|
||||||
|
|||||||
+304
-67
@@ -1,20 +1,24 @@
|
|||||||
use std::{
|
use std::{
|
||||||
|
cmp::Ordering,
|
||||||
collections::BTreeMap,
|
collections::BTreeMap,
|
||||||
fmt::Display,
|
fmt::Display,
|
||||||
fs::{File, read_to_string},
|
fs::File,
|
||||||
ops::Deref,
|
ops::Deref,
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
str::FromStr,
|
str::FromStr,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use schemars::JsonSchema;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use revive_dt_common::{iterators::FilesWithExtensionIterator, macros::define_wrapper_type};
|
use revive_common::EVMVersion;
|
||||||
|
use revive_dt_common::{
|
||||||
use crate::{
|
cached_fs::read_to_string, iterators::FilesWithExtensionIterator, macros::define_wrapper_type,
|
||||||
case::Case,
|
types::Mode,
|
||||||
mode::{Mode, SolcMode},
|
|
||||||
};
|
};
|
||||||
|
use tracing::error;
|
||||||
|
|
||||||
|
use crate::{case::Case, mode::ParsedMode};
|
||||||
|
|
||||||
pub const METADATA_FILE_EXTENSION: &str = "json";
|
pub const METADATA_FILE_EXTENSION: &str = "json";
|
||||||
pub const SOLIDITY_CASE_FILE_EXTENSION: &str = "sol";
|
pub const SOLIDITY_CASE_FILE_EXTENSION: &str = "sol";
|
||||||
@@ -22,16 +26,26 @@ pub const SOLIDITY_CASE_COMMENT_MARKER: &str = "//!";
|
|||||||
|
|
||||||
#[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)]
|
#[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)]
|
||||||
pub struct MetadataFile {
|
pub struct MetadataFile {
|
||||||
pub path: PathBuf,
|
/// The path of the metadata file. This will either be a JSON or solidity file.
|
||||||
|
pub metadata_file_path: PathBuf,
|
||||||
|
|
||||||
|
/// This is the path contained within the corpus file. This could either be the path of some dir
|
||||||
|
/// or could be the actual metadata file path.
|
||||||
|
pub corpus_file_path: PathBuf,
|
||||||
|
|
||||||
|
/// The metadata contained within the file.
|
||||||
pub content: Metadata,
|
pub content: Metadata,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MetadataFile {
|
impl MetadataFile {
|
||||||
pub fn try_from_file(path: &Path) -> Option<Self> {
|
pub fn relative_path(&self) -> &Path {
|
||||||
Metadata::try_from_file(path).map(|metadata| Self {
|
if self.corpus_file_path.is_file() {
|
||||||
path: path.to_owned(),
|
&self.corpus_file_path
|
||||||
content: metadata,
|
} else {
|
||||||
})
|
self.metadata_file_path
|
||||||
|
.strip_prefix(&self.corpus_file_path)
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -43,34 +57,83 @@ impl Deref for MetadataFile {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)]
|
/// A MatterLabs metadata file.
|
||||||
|
///
|
||||||
|
/// This defines the structure that the MatterLabs metadata files follow for defining the tests or
|
||||||
|
/// the workloads.
|
||||||
|
///
|
||||||
|
/// Each metadata file is composed of multiple test cases where each test case is isolated from the
|
||||||
|
/// others and runs in a completely different address space. Each test case is composed of a number
|
||||||
|
/// of steps and assertions that should be performed as part of the test case.
|
||||||
|
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema, Clone, Eq, PartialEq)]
|
||||||
pub struct Metadata {
|
pub struct Metadata {
|
||||||
pub targets: Option<Vec<String>>,
|
/// This is an optional comment on the metadata file which has no impact on the execution in any
|
||||||
pub cases: Vec<Case>,
|
/// way.
|
||||||
pub contracts: Option<BTreeMap<ContractInstance, ContractPathAndIdent>>,
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
// TODO: Convert into wrapper types for clarity.
|
pub comment: Option<String>,
|
||||||
pub libraries: Option<BTreeMap<PathBuf, BTreeMap<ContractIdent, ContractInstance>>>,
|
|
||||||
|
/// An optional boolean which defines if the metadata file as a whole should be ignored. If null
|
||||||
|
/// then the metadata file will not be ignored.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub ignore: Option<bool>,
|
pub ignore: Option<bool>,
|
||||||
pub modes: Option<Vec<Mode>>,
|
|
||||||
|
/// An optional vector of targets that this Metadata file's cases can be executed on. As an
|
||||||
|
/// example, if we wish for the metadata file's cases to only be run on PolkaVM then we'd
|
||||||
|
/// specify a target of "PolkaVM" in here.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub targets: Option<Vec<String>>,
|
||||||
|
|
||||||
|
/// A vector of the test cases and workloads contained within the metadata file. This is their
|
||||||
|
/// primary description.
|
||||||
|
pub cases: Vec<Case>,
|
||||||
|
|
||||||
|
/// A map of all of the contracts that the test requires to run.
|
||||||
|
///
|
||||||
|
/// This is a map where the key is the name of the contract instance and the value is the
|
||||||
|
/// contract's path and ident in the file.
|
||||||
|
///
|
||||||
|
/// If any contract is to be used by the test then it must be included in here first so that the
|
||||||
|
/// framework is aware of its path, compiles it, and prepares it.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub contracts: Option<BTreeMap<ContractInstance, ContractPathAndIdent>>,
|
||||||
|
|
||||||
|
/// The set of libraries that this metadata file requires.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub libraries: Option<BTreeMap<PathBuf, BTreeMap<ContractIdent, ContractInstance>>>,
|
||||||
|
|
||||||
|
/// This represents a mode that has been parsed from test metadata.
|
||||||
|
///
|
||||||
|
/// Mode strings can take the following form (in pseudo-regex):
|
||||||
|
///
|
||||||
|
/// ```text
|
||||||
|
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
||||||
|
/// ```
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub modes: Option<Vec<ParsedMode>>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
#[schemars(skip)]
|
||||||
pub file_path: Option<PathBuf>,
|
pub file_path: Option<PathBuf>,
|
||||||
|
|
||||||
|
/// This field specifies an EVM version requirement that the test case has where the test might
|
||||||
|
/// be run of the evm version of the nodes match the evm version specified here.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub required_evm_version: Option<EvmVersionRequirement>,
|
||||||
|
|
||||||
|
/// A set of compilation directives that will be passed to the compiler whenever the contracts
|
||||||
|
/// for the test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`]
|
||||||
|
/// is just a filter for when a test can run whereas this is an instruction to the compiler.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub compiler_directives: Option<CompilationDirectives>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Metadata {
|
impl Metadata {
|
||||||
/// Returns the solc modes of this metadata, inserting a default mode if not present.
|
/// Returns the modes that we should test from this metadata.
|
||||||
pub fn solc_modes(&self) -> Vec<SolcMode> {
|
pub fn solc_modes(&self) -> Vec<Mode> {
|
||||||
self.modes
|
match &self.modes {
|
||||||
.to_owned()
|
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
|
||||||
.unwrap_or_else(|| vec![Mode::Solidity(Default::default())])
|
None => Mode::all().cloned().collect(),
|
||||||
.iter()
|
|
||||||
.filter_map(|mode| match mode {
|
|
||||||
Mode::Solidity(solc_mode) => Some(solc_mode),
|
|
||||||
Mode::Unknown(mode) => {
|
|
||||||
tracing::debug!("compiler: ignoring unknown mode '{mode}'");
|
|
||||||
None
|
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.cloned()
|
|
||||||
.collect()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the base directory of this metadata.
|
/// Returns the base directory of this metadata.
|
||||||
@@ -102,7 +165,15 @@ impl Metadata {
|
|||||||
) in contracts
|
) in contracts
|
||||||
{
|
{
|
||||||
let alias = alias.clone();
|
let alias = alias.clone();
|
||||||
let absolute_path = directory.join(contract_source_path).canonicalize()?;
|
let absolute_path = directory
|
||||||
|
.join(contract_source_path)
|
||||||
|
.canonicalize()
|
||||||
|
.map_err(|error| {
|
||||||
|
anyhow::anyhow!(
|
||||||
|
"Failed to canonicalize contract source path '{}': {error}",
|
||||||
|
directory.join(contract_source_path).display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
let contract_ident = contract_ident.clone();
|
let contract_ident = contract_ident.clone();
|
||||||
|
|
||||||
sources.insert(
|
sources.insert(
|
||||||
@@ -126,10 +197,7 @@ impl Metadata {
|
|||||||
pub fn try_from_file(path: &Path) -> Option<Self> {
|
pub fn try_from_file(path: &Path) -> Option<Self> {
|
||||||
assert!(path.is_file(), "not a file: {}", path.display());
|
assert!(path.is_file(), "not a file: {}", path.display());
|
||||||
|
|
||||||
let Some(file_extension) = path.extension() else {
|
let file_extension = path.extension()?;
|
||||||
tracing::debug!("skipping corpus file: {}", path.display());
|
|
||||||
return None;
|
|
||||||
};
|
|
||||||
|
|
||||||
if file_extension == METADATA_FILE_EXTENSION {
|
if file_extension == METADATA_FILE_EXTENSION {
|
||||||
return Self::try_from_json(path);
|
return Self::try_from_json(path);
|
||||||
@@ -139,18 +207,12 @@ impl Metadata {
|
|||||||
return Self::try_from_solidity(path);
|
return Self::try_from_solidity(path);
|
||||||
}
|
}
|
||||||
|
|
||||||
tracing::debug!("ignoring invalid corpus file: {}", path.display());
|
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
fn try_from_json(path: &Path) -> Option<Self> {
|
fn try_from_json(path: &Path) -> Option<Self> {
|
||||||
let file = File::open(path)
|
let file = File::open(path)
|
||||||
.inspect_err(|error| {
|
.inspect_err(|err| error!(path = %path.display(), %err, "Failed to open file"))
|
||||||
tracing::error!(
|
|
||||||
"opening JSON test metadata file '{}' error: {error}",
|
|
||||||
path.display()
|
|
||||||
);
|
|
||||||
})
|
|
||||||
.ok()?;
|
.ok()?;
|
||||||
|
|
||||||
match serde_json::from_reader::<_, Metadata>(file) {
|
match serde_json::from_reader::<_, Metadata>(file) {
|
||||||
@@ -158,11 +220,8 @@ impl Metadata {
|
|||||||
metadata.file_path = Some(path.to_path_buf());
|
metadata.file_path = Some(path.to_path_buf());
|
||||||
Some(metadata)
|
Some(metadata)
|
||||||
}
|
}
|
||||||
Err(error) => {
|
Err(err) => {
|
||||||
tracing::error!(
|
error!(path = %path.display(), %err, "Deserialization of metadata failed");
|
||||||
"parsing JSON test metadata file '{}' error: {error}",
|
|
||||||
path.display()
|
|
||||||
);
|
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -170,12 +229,7 @@ impl Metadata {
|
|||||||
|
|
||||||
fn try_from_solidity(path: &Path) -> Option<Self> {
|
fn try_from_solidity(path: &Path) -> Option<Self> {
|
||||||
let spec = read_to_string(path)
|
let spec = read_to_string(path)
|
||||||
.inspect_err(|error| {
|
.inspect_err(|err| error!(path = %path.display(), %err, "Failed to read file content"))
|
||||||
tracing::error!(
|
|
||||||
"opening JSON test metadata file '{}' error: {error}",
|
|
||||||
path.display()
|
|
||||||
);
|
|
||||||
})
|
|
||||||
.ok()?
|
.ok()?
|
||||||
.lines()
|
.lines()
|
||||||
.filter_map(|line| line.strip_prefix(SOLIDITY_CASE_COMMENT_MARKER))
|
.filter_map(|line| line.strip_prefix(SOLIDITY_CASE_COMMENT_MARKER))
|
||||||
@@ -203,11 +257,8 @@ impl Metadata {
|
|||||||
);
|
);
|
||||||
Some(metadata)
|
Some(metadata)
|
||||||
}
|
}
|
||||||
Err(error) => {
|
Err(err) => {
|
||||||
tracing::error!(
|
error!(path = %path.display(), %err, "Failed to deserialize metadata");
|
||||||
"parsing Solidity test metadata file '{}' error: '{error}' from data: {spec}",
|
|
||||||
path.display()
|
|
||||||
);
|
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -231,7 +282,9 @@ impl Metadata {
|
|||||||
Ok(Box::new(std::iter::once(metadata_file_path.clone())))
|
Ok(Box::new(std::iter::once(metadata_file_path.clone())))
|
||||||
} else {
|
} else {
|
||||||
Ok(Box::new(
|
Ok(Box::new(
|
||||||
FilesWithExtensionIterator::new(self.directory()?).with_allowed_extension("sol"),
|
FilesWithExtensionIterator::new(self.directory()?)
|
||||||
|
.with_allowed_extension("sol")
|
||||||
|
.with_use_cached_fs(true),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -242,10 +295,10 @@ define_wrapper_type!(
|
|||||||
///
|
///
|
||||||
/// Typically, this is used as the key to the "contracts" field of metadata files.
|
/// Typically, this is used as the key to the "contracts" field of metadata files.
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize,
|
Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema
|
||||||
)]
|
)]
|
||||||
#[serde(transparent)]
|
#[serde(transparent)]
|
||||||
pub struct ContractInstance(String);
|
pub struct ContractInstance(String) impl Display;
|
||||||
);
|
);
|
||||||
|
|
||||||
define_wrapper_type!(
|
define_wrapper_type!(
|
||||||
@@ -253,10 +306,10 @@ define_wrapper_type!(
|
|||||||
///
|
///
|
||||||
/// A contract identifier is the name of the contract in the source code.
|
/// A contract identifier is the name of the contract in the source code.
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize,
|
Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema
|
||||||
)]
|
)]
|
||||||
#[serde(transparent)]
|
#[serde(transparent)]
|
||||||
pub struct ContractIdent(String);
|
pub struct ContractIdent(String) impl Display;
|
||||||
);
|
);
|
||||||
|
|
||||||
/// Represents an identifier used for contracts.
|
/// Represents an identifier used for contracts.
|
||||||
@@ -266,7 +319,9 @@ define_wrapper_type!(
|
|||||||
/// ```text
|
/// ```text
|
||||||
/// ${path}:${contract_ident}
|
/// ${path}:${contract_ident}
|
||||||
/// ```
|
/// ```
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
#[derive(
|
||||||
|
Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema,
|
||||||
|
)]
|
||||||
#[serde(try_from = "String", into = "String")]
|
#[serde(try_from = "String", into = "String")]
|
||||||
pub struct ContractPathAndIdent {
|
pub struct ContractPathAndIdent {
|
||||||
/// The path of the contract source code relative to the directory containing the metadata file.
|
/// The path of the contract source code relative to the directory containing the metadata file.
|
||||||
@@ -343,6 +398,188 @@ impl From<ContractPathAndIdent> for String {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// An EVM version requirement that the test case has. This gets serialized and deserialized from
|
||||||
|
/// and into [`String`]. This follows a simple format of (>=|<=|=|>|<) followed by a string of the
|
||||||
|
/// EVM version.
|
||||||
|
///
|
||||||
|
/// When specified, the framework will only run the test if the node's EVM version matches that
|
||||||
|
/// required by the metadata file.
|
||||||
|
#[derive(
|
||||||
|
Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema,
|
||||||
|
)]
|
||||||
|
#[serde(try_from = "String", into = "String")]
|
||||||
|
pub struct EvmVersionRequirement {
|
||||||
|
ordering: Ordering,
|
||||||
|
or_equal: bool,
|
||||||
|
evm_version: EVMVersion,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EvmVersionRequirement {
|
||||||
|
pub fn new_greater_than_or_equals(version: EVMVersion) -> Self {
|
||||||
|
Self {
|
||||||
|
ordering: Ordering::Greater,
|
||||||
|
or_equal: true,
|
||||||
|
evm_version: version,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_greater_than(version: EVMVersion) -> Self {
|
||||||
|
Self {
|
||||||
|
ordering: Ordering::Greater,
|
||||||
|
or_equal: false,
|
||||||
|
evm_version: version,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_equals(version: EVMVersion) -> Self {
|
||||||
|
Self {
|
||||||
|
ordering: Ordering::Equal,
|
||||||
|
or_equal: false,
|
||||||
|
evm_version: version,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_less_than(version: EVMVersion) -> Self {
|
||||||
|
Self {
|
||||||
|
ordering: Ordering::Less,
|
||||||
|
or_equal: false,
|
||||||
|
evm_version: version,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_less_than_or_equals(version: EVMVersion) -> Self {
|
||||||
|
Self {
|
||||||
|
ordering: Ordering::Less,
|
||||||
|
or_equal: true,
|
||||||
|
evm_version: version,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn matches(&self, other: &EVMVersion) -> bool {
|
||||||
|
let ordering = other.cmp(&self.evm_version);
|
||||||
|
ordering == self.ordering || (self.or_equal && matches!(ordering, Ordering::Equal))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for EvmVersionRequirement {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
let Self {
|
||||||
|
ordering,
|
||||||
|
or_equal,
|
||||||
|
evm_version,
|
||||||
|
} = self;
|
||||||
|
match ordering {
|
||||||
|
Ordering::Less => write!(f, "<")?,
|
||||||
|
Ordering::Equal => write!(f, "=")?,
|
||||||
|
Ordering::Greater => write!(f, ">")?,
|
||||||
|
}
|
||||||
|
if *or_equal && !matches!(ordering, Ordering::Equal) {
|
||||||
|
write!(f, "=")?;
|
||||||
|
}
|
||||||
|
write!(f, "{evm_version}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for EvmVersionRequirement {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
match s.as_bytes() {
|
||||||
|
[b'>', b'=', remaining @ ..] => Ok(Self {
|
||||||
|
ordering: Ordering::Greater,
|
||||||
|
or_equal: true,
|
||||||
|
evm_version: str::from_utf8(remaining)?.try_into()?,
|
||||||
|
}),
|
||||||
|
[b'>', remaining @ ..] => Ok(Self {
|
||||||
|
ordering: Ordering::Greater,
|
||||||
|
or_equal: false,
|
||||||
|
evm_version: str::from_utf8(remaining)?.try_into()?,
|
||||||
|
}),
|
||||||
|
[b'<', b'=', remaining @ ..] => Ok(Self {
|
||||||
|
ordering: Ordering::Less,
|
||||||
|
or_equal: true,
|
||||||
|
evm_version: str::from_utf8(remaining)?.try_into()?,
|
||||||
|
}),
|
||||||
|
[b'<', remaining @ ..] => Ok(Self {
|
||||||
|
ordering: Ordering::Less,
|
||||||
|
or_equal: false,
|
||||||
|
evm_version: str::from_utf8(remaining)?.try_into()?,
|
||||||
|
}),
|
||||||
|
[b'=', remaining @ ..] => Ok(Self {
|
||||||
|
ordering: Ordering::Equal,
|
||||||
|
or_equal: false,
|
||||||
|
evm_version: str::from_utf8(remaining)?.try_into()?,
|
||||||
|
}),
|
||||||
|
_ => anyhow::bail!("Invalid EVM version requirement {s}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<String> for EvmVersionRequirement {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
|
||||||
|
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||||
|
value.parse()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<EvmVersionRequirement> for String {
|
||||||
|
fn from(value: EvmVersionRequirement) -> Self {
|
||||||
|
value.to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A set of compilation directives that will be passed to the compiler whenever the contracts for
|
||||||
|
/// the test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`] is
|
||||||
|
/// just a filter for when a test can run whereas this is an instruction to the compiler.
|
||||||
|
/// Defines how the compiler should handle revert strings.
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Debug,
|
||||||
|
Copy,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
PartialOrd,
|
||||||
|
Ord,
|
||||||
|
Hash,
|
||||||
|
Default,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
JsonSchema,
|
||||||
|
)]
|
||||||
|
pub struct CompilationDirectives {
|
||||||
|
/// Defines how the revert strings should be handled.
|
||||||
|
pub revert_string_handling: Option<RevertString>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Defines how the compiler should handle revert strings.
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Debug,
|
||||||
|
Copy,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
PartialOrd,
|
||||||
|
Ord,
|
||||||
|
Hash,
|
||||||
|
Default,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
JsonSchema,
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub enum RevertString {
|
||||||
|
/// The default handling of the revert strings.
|
||||||
|
#[default]
|
||||||
|
Default,
|
||||||
|
/// The debug handling of the revert strings.
|
||||||
|
Debug,
|
||||||
|
/// Strip the revert strings.
|
||||||
|
Strip,
|
||||||
|
/// Provide verbose debug strings for the revert string.
|
||||||
|
VerboseDebug,
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|||||||
+240
-89
@@ -1,106 +1,257 @@
|
|||||||
use revive_dt_common::types::VersionOrRequirement;
|
use anyhow::Context as _;
|
||||||
use semver::Version;
|
use regex::Regex;
|
||||||
use serde::de::Deserializer;
|
use revive_dt_common::iterators::EitherIter;
|
||||||
|
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
|
||||||
|
use schemars::JsonSchema;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::fmt::Display;
|
||||||
|
use std::str::FromStr;
|
||||||
|
use std::sync::LazyLock;
|
||||||
|
|
||||||
/// Specifies the compilation mode of the test artifact.
|
/// This represents a mode that has been parsed from test metadata.
|
||||||
#[derive(Hash, Debug, Clone, Eq, PartialEq)]
|
///
|
||||||
pub enum Mode {
|
/// Mode strings can take the following form (in pseudo-regex):
|
||||||
Solidity(SolcMode),
|
///
|
||||||
Unknown(String),
|
/// ```text
|
||||||
|
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// We can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)]
|
||||||
|
#[serde(try_from = "String", into = "String")]
|
||||||
|
pub struct ParsedMode {
|
||||||
|
pub pipeline: Option<ModePipeline>,
|
||||||
|
pub optimize_flag: Option<bool>,
|
||||||
|
pub optimize_setting: Option<ModeOptimizerSetting>,
|
||||||
|
pub version: Option<semver::VersionReq>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Specify Solidity specific compiler options.
|
impl FromStr for ParsedMode {
|
||||||
#[derive(Hash, Debug, Default, Clone, Eq, PartialEq, Serialize, Deserialize)]
|
type Err = anyhow::Error;
|
||||||
pub struct SolcMode {
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
pub solc_version: Option<semver::VersionReq>,
|
static REGEX: LazyLock<Regex> = LazyLock::new(|| {
|
||||||
solc_optimize: Option<bool>,
|
Regex::new(r"(?x)
|
||||||
pub llvm_optimizer_settings: Vec<String>,
|
^
|
||||||
}
|
(?:(?P<pipeline>[YEILV])(?P<optimize_flag>[+-])?)? # Pipeline to use eg Y, E+, E-
|
||||||
|
\s*
|
||||||
|
(?P<optimize_setting>M[a-zA-Z0-9])? # Optimize setting eg M0, Ms, Mz
|
||||||
|
\s*
|
||||||
|
(?P<version>[>=<]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8
|
||||||
|
$
|
||||||
|
").unwrap()
|
||||||
|
});
|
||||||
|
|
||||||
impl SolcMode {
|
let Some(caps) = REGEX.captures(s) else {
|
||||||
/// Try to parse a mode string into a solc mode.
|
anyhow::bail!("Cannot parse mode '{s}' from string");
|
||||||
/// Returns `None` if the string wasn't a solc YUL mode string.
|
|
||||||
///
|
|
||||||
/// The mode string is expected to start with the `Y` ID (YUL ID),
|
|
||||||
/// optionally followed by `+` or `-` for the solc optimizer settings.
|
|
||||||
///
|
|
||||||
/// Options can be separated by a whitespace contain the following
|
|
||||||
/// - A solc `SemVer version requirement` string
|
|
||||||
/// - One or more `-OX` where X is a supposed to be an LLVM opt mode
|
|
||||||
pub fn parse_from_mode_string(mode_string: &str) -> Option<Self> {
|
|
||||||
let mut result = Self::default();
|
|
||||||
|
|
||||||
let mut parts = mode_string.trim().split(" ");
|
|
||||||
|
|
||||||
match parts.next()? {
|
|
||||||
"Y" => {}
|
|
||||||
"Y+" => result.solc_optimize = Some(true),
|
|
||||||
"Y-" => result.solc_optimize = Some(false),
|
|
||||||
_ => return None,
|
|
||||||
}
|
|
||||||
|
|
||||||
for part in parts {
|
|
||||||
if let Ok(solc_version) = semver::VersionReq::parse(part) {
|
|
||||||
result.solc_version = Some(solc_version);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if let Some(level) = part.strip_prefix("-O") {
|
|
||||||
result.llvm_optimizer_settings.push(level.to_string());
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
panic!("the YUL mode string {mode_string} failed to parse, invalid part: {part}")
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns whether to enable the solc optimizer.
|
|
||||||
pub fn solc_optimize(&self) -> bool {
|
|
||||||
self.solc_optimize.unwrap_or(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Calculate the latest matching solc patch version. Returns:
|
|
||||||
/// - `latest_supported` if no version request was specified.
|
|
||||||
/// - A matching version with the same minor version as `latest_supported`, if any.
|
|
||||||
/// - `None` if no minor version of the `latest_supported` version matches.
|
|
||||||
pub fn last_patch_version(&self, latest_supported: &Version) -> Option<Version> {
|
|
||||||
let Some(version_req) = self.solc_version.as_ref() else {
|
|
||||||
return Some(latest_supported.to_owned());
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// lgtm
|
let pipeline = match caps.name("pipeline") {
|
||||||
for patch in (0..latest_supported.patch + 1).rev() {
|
Some(m) => Some(
|
||||||
let version = Version::new(0, latest_supported.minor, patch);
|
ModePipeline::from_str(m.as_str())
|
||||||
if version_req.matches(&version) {
|
.context("Failed to parse mode pipeline from string")?,
|
||||||
return Some(version);
|
),
|
||||||
}
|
None => None,
|
||||||
}
|
};
|
||||||
|
|
||||||
None
|
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
|
||||||
}
|
|
||||||
|
|
||||||
/// Resolves the [`SolcMode`]'s solidity version requirement into a [`VersionOrRequirement`] if
|
let optimize_setting = match caps.name("optimize_setting") {
|
||||||
/// the requirement is present on the object. Otherwise, the passed default version is used.
|
Some(m) => Some(
|
||||||
pub fn compiler_version_to_use(&self, default: Version) -> VersionOrRequirement {
|
ModeOptimizerSetting::from_str(m.as_str())
|
||||||
match self.solc_version {
|
.context("Failed to parse optimizer setting from string")?,
|
||||||
Some(ref requirement) => requirement.clone().into(),
|
),
|
||||||
None => default.into(),
|
None => None,
|
||||||
}
|
};
|
||||||
|
|
||||||
|
let version = match caps.name("version") {
|
||||||
|
Some(m) => Some(
|
||||||
|
semver::VersionReq::parse(m.as_str())
|
||||||
|
.map_err(|e| {
|
||||||
|
anyhow::anyhow!(
|
||||||
|
"Cannot parse the version requirement '{}': {e}",
|
||||||
|
m.as_str()
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.context("Failed to parse semver requirement from mode string")?,
|
||||||
|
),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(ParsedMode {
|
||||||
|
pipeline,
|
||||||
|
optimize_flag,
|
||||||
|
optimize_setting,
|
||||||
|
version,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'de> Deserialize<'de> for Mode {
|
impl Display for ParsedMode {
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
where
|
let mut has_written = false;
|
||||||
D: Deserializer<'de>,
|
|
||||||
{
|
|
||||||
let mode_string = String::deserialize(deserializer)?;
|
|
||||||
|
|
||||||
if let Some(solc_mode) = SolcMode::parse_from_mode_string(&mode_string) {
|
if let Some(pipeline) = self.pipeline {
|
||||||
return Ok(Self::Solidity(solc_mode));
|
pipeline.fmt(f)?;
|
||||||
|
if let Some(optimize_flag) = self.optimize_flag {
|
||||||
|
f.write_str(if optimize_flag { "+" } else { "-" })?;
|
||||||
|
}
|
||||||
|
has_written = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Self::Unknown(mode_string))
|
if let Some(optimize_setting) = self.optimize_setting {
|
||||||
|
if has_written {
|
||||||
|
f.write_str(" ")?;
|
||||||
|
}
|
||||||
|
optimize_setting.fmt(f)?;
|
||||||
|
has_written = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(version) = &self.version {
|
||||||
|
if has_written {
|
||||||
|
f.write_str(" ")?;
|
||||||
|
}
|
||||||
|
version.fmt(f)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<ParsedMode> for String {
|
||||||
|
fn from(parsed_mode: ParsedMode) -> Self {
|
||||||
|
parsed_mode.to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<String> for ParsedMode {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||||
|
ParsedMode::from_str(&value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ParsedMode {
|
||||||
|
/// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try.
|
||||||
|
pub fn to_modes(&self) -> impl Iterator<Item = Mode> {
|
||||||
|
let pipeline_iter = self.pipeline.as_ref().map_or_else(
|
||||||
|
|| EitherIter::A(ModePipeline::test_cases()),
|
||||||
|
|p| EitherIter::B(std::iter::once(*p)),
|
||||||
|
);
|
||||||
|
|
||||||
|
let optimize_flag_setting = self.optimize_flag.map(|flag| {
|
||||||
|
if flag {
|
||||||
|
ModeOptimizerSetting::M3
|
||||||
|
} else {
|
||||||
|
ModeOptimizerSetting::M0
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let optimize_flag_iter = match optimize_flag_setting {
|
||||||
|
Some(setting) => EitherIter::A(std::iter::once(setting)),
|
||||||
|
None => EitherIter::B(ModeOptimizerSetting::test_cases()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else(
|
||||||
|
|| EitherIter::A(optimize_flag_iter),
|
||||||
|
|s| EitherIter::B(std::iter::once(*s)),
|
||||||
|
);
|
||||||
|
|
||||||
|
pipeline_iter.flat_map(move |pipeline| {
|
||||||
|
optimize_settings_iter
|
||||||
|
.clone()
|
||||||
|
.map(move |optimize_setting| Mode {
|
||||||
|
pipeline,
|
||||||
|
optimize_setting,
|
||||||
|
version: self.version.clone(),
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s.
|
||||||
|
/// This avoids any duplicate entries.
|
||||||
|
pub fn many_to_modes<'a>(
|
||||||
|
parsed: impl Iterator<Item = &'a ParsedMode>,
|
||||||
|
) -> impl Iterator<Item = Mode> {
|
||||||
|
let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect();
|
||||||
|
modes.into_iter()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parsed_mode_from_str() {
|
||||||
|
let strings = vec![
|
||||||
|
("Mz", "Mz"),
|
||||||
|
("Y", "Y"),
|
||||||
|
("Y+", "Y+"),
|
||||||
|
("Y-", "Y-"),
|
||||||
|
("E", "E"),
|
||||||
|
("E+", "E+"),
|
||||||
|
("E-", "E-"),
|
||||||
|
("Y M0", "Y M0"),
|
||||||
|
("Y M1", "Y M1"),
|
||||||
|
("Y M2", "Y M2"),
|
||||||
|
("Y M3", "Y M3"),
|
||||||
|
("Y Ms", "Y Ms"),
|
||||||
|
("Y Mz", "Y Mz"),
|
||||||
|
("E M0", "E M0"),
|
||||||
|
("E M1", "E M1"),
|
||||||
|
("E M2", "E M2"),
|
||||||
|
("E M3", "E M3"),
|
||||||
|
("E Ms", "E Ms"),
|
||||||
|
("E Mz", "E Mz"),
|
||||||
|
// When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning)
|
||||||
|
("Y 0.8.0", "Y ^0.8.0"),
|
||||||
|
("E+ 0.8.0", "E+ ^0.8.0"),
|
||||||
|
("Y M3 >=0.8.0", "Y M3 >=0.8.0"),
|
||||||
|
("E Mz <0.7.0", "E Mz <0.7.0"),
|
||||||
|
// We can parse +- _and_ M1/M2 but the latter takes priority.
|
||||||
|
("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"),
|
||||||
|
("E- M2 0.7.0", "E- M2 ^0.7.0"),
|
||||||
|
// We don't see this in the wild but it is parsed.
|
||||||
|
("<=0.8", "<=0.8"),
|
||||||
|
];
|
||||||
|
|
||||||
|
for (actual, expected) in strings {
|
||||||
|
let parsed = ParsedMode::from_str(actual)
|
||||||
|
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||||
|
assert_eq!(
|
||||||
|
expected,
|
||||||
|
parsed.to_string(),
|
||||||
|
"Mode string '{actual}' did not parse to '{expected}': got '{parsed}'"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parsed_mode_to_test_modes() {
|
||||||
|
let strings = vec![
|
||||||
|
("Mz", vec!["Y Mz", "E Mz"]),
|
||||||
|
("Y", vec!["Y M0", "Y M3"]),
|
||||||
|
("E", vec!["E M0", "E M3"]),
|
||||||
|
("Y+", vec!["Y M3"]),
|
||||||
|
("Y-", vec!["Y M0"]),
|
||||||
|
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
|
||||||
|
(
|
||||||
|
"<=0.8",
|
||||||
|
vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"],
|
||||||
|
),
|
||||||
|
];
|
||||||
|
|
||||||
|
for (actual, expected) in strings {
|
||||||
|
let parsed = ParsedMode::from_str(actual)
|
||||||
|
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||||
|
let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect();
|
||||||
|
let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
expected_set, actual_set,
|
||||||
|
"Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'"
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,22 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use alloy::eips::BlockNumberOrTag;
|
use alloy::eips::BlockNumberOrTag;
|
||||||
|
use alloy::json_abi::JsonAbi;
|
||||||
use alloy::primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, U256};
|
use alloy::primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, U256};
|
||||||
|
use alloy_primitives::TxHash;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
|
||||||
|
use crate::metadata::{ContractIdent, ContractInstance};
|
||||||
|
|
||||||
/// A trait of the interface are required to implement to be used by the resolution logic that this
|
/// A trait of the interface are required to implement to be used by the resolution logic that this
|
||||||
/// crate implements to go from string calldata and into the bytes calldata.
|
/// crate implements to go from string calldata and into the bytes calldata.
|
||||||
pub trait ResolverApi {
|
pub trait ResolverApi {
|
||||||
/// Returns the ID of the chain that the node is on.
|
/// Returns the ID of the chain that the node is on.
|
||||||
fn chain_id(&self) -> impl Future<Output = Result<ChainId>>;
|
fn chain_id(&self) -> impl Future<Output = Result<ChainId>>;
|
||||||
|
|
||||||
|
/// Returns the gas price for the specified transaction.
|
||||||
|
fn transaction_gas_price(&self, tx_hash: &TxHash) -> impl Future<Output = Result<u128>>;
|
||||||
|
|
||||||
// TODO: This is currently a u128 due to Kitchensink needing more than 64 bits for its gas limit
|
// TODO: This is currently a u128 due to Kitchensink needing more than 64 bits for its gas limit
|
||||||
// when we implement the changes to the gas we need to adjust this to be a u64.
|
// when we implement the changes to the gas we need to adjust this to be a u64.
|
||||||
/// Returns the gas limit of the specified block.
|
/// Returns the gas limit of the specified block.
|
||||||
@@ -19,6 +28,9 @@ pub trait ResolverApi {
|
|||||||
/// Returns the difficulty of the specified block.
|
/// Returns the difficulty of the specified block.
|
||||||
fn block_difficulty(&self, number: BlockNumberOrTag) -> impl Future<Output = Result<U256>>;
|
fn block_difficulty(&self, number: BlockNumberOrTag) -> impl Future<Output = Result<U256>>;
|
||||||
|
|
||||||
|
/// Returns the base fee of the specified block.
|
||||||
|
fn block_base_fee(&self, number: BlockNumberOrTag) -> impl Future<Output = Result<u64>>;
|
||||||
|
|
||||||
/// Returns the hash of the specified block.
|
/// Returns the hash of the specified block.
|
||||||
fn block_hash(&self, number: BlockNumberOrTag) -> impl Future<Output = Result<BlockHash>>;
|
fn block_hash(&self, number: BlockNumberOrTag) -> impl Future<Output = Result<BlockHash>>;
|
||||||
|
|
||||||
@@ -31,3 +43,115 @@ pub trait ResolverApi {
|
|||||||
/// Returns the number of the last block.
|
/// Returns the number of the last block.
|
||||||
fn last_block_number(&self) -> impl Future<Output = Result<BlockNumber>>;
|
fn last_block_number(&self) -> impl Future<Output = Result<BlockNumber>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, Default)]
|
||||||
|
/// Contextual information required by the code that's performing the resolution.
|
||||||
|
pub struct ResolutionContext<'a> {
|
||||||
|
/// When provided the contracts provided here will be used for resolutions.
|
||||||
|
deployed_contracts: Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||||
|
|
||||||
|
/// When provided the variables in here will be used for performing resolutions.
|
||||||
|
variables: Option<&'a HashMap<String, U256>>,
|
||||||
|
|
||||||
|
/// When provided this block number will be treated as the tip of the chain.
|
||||||
|
block_number: Option<&'a BlockNumber>,
|
||||||
|
|
||||||
|
/// When provided the resolver will use this transaction hash for all of its resolutions.
|
||||||
|
transaction_hash: Option<&'a TxHash>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> ResolutionContext<'a> {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Default::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_from_parts(
|
||||||
|
deployed_contracts: impl Into<
|
||||||
|
Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||||
|
>,
|
||||||
|
variables: impl Into<Option<&'a HashMap<String, U256>>>,
|
||||||
|
block_number: impl Into<Option<&'a BlockNumber>>,
|
||||||
|
transaction_hash: impl Into<Option<&'a TxHash>>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
deployed_contracts: deployed_contracts.into(),
|
||||||
|
variables: variables.into(),
|
||||||
|
block_number: block_number.into(),
|
||||||
|
transaction_hash: transaction_hash.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_deployed_contracts(
|
||||||
|
mut self,
|
||||||
|
deployed_contracts: impl Into<
|
||||||
|
Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||||
|
>,
|
||||||
|
) -> Self {
|
||||||
|
self.deployed_contracts = deployed_contracts.into();
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_variables(
|
||||||
|
mut self,
|
||||||
|
variables: impl Into<Option<&'a HashMap<String, U256>>>,
|
||||||
|
) -> Self {
|
||||||
|
self.variables = variables.into();
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_block_number(mut self, block_number: impl Into<Option<&'a BlockNumber>>) -> Self {
|
||||||
|
self.block_number = block_number.into();
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_transaction_hash(
|
||||||
|
mut self,
|
||||||
|
transaction_hash: impl Into<Option<&'a TxHash>>,
|
||||||
|
) -> Self {
|
||||||
|
self.transaction_hash = transaction_hash.into();
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn resolve_block_number(&self, number: BlockNumberOrTag) -> BlockNumberOrTag {
|
||||||
|
match self.block_number {
|
||||||
|
Some(block_number) => match number {
|
||||||
|
BlockNumberOrTag::Latest => BlockNumberOrTag::Number(*block_number),
|
||||||
|
n @ (BlockNumberOrTag::Finalized
|
||||||
|
| BlockNumberOrTag::Safe
|
||||||
|
| BlockNumberOrTag::Earliest
|
||||||
|
| BlockNumberOrTag::Pending
|
||||||
|
| BlockNumberOrTag::Number(_)) => n,
|
||||||
|
},
|
||||||
|
None => number,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deployed_contract(
|
||||||
|
&self,
|
||||||
|
instance: &ContractInstance,
|
||||||
|
) -> Option<&(ContractIdent, Address, JsonAbi)> {
|
||||||
|
self.deployed_contracts
|
||||||
|
.and_then(|deployed_contracts| deployed_contracts.get(instance))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deployed_contract_address(&self, instance: &ContractInstance) -> Option<&Address> {
|
||||||
|
self.deployed_contract(instance).map(|(_, a, _)| a)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deployed_contract_abi(&self, instance: &ContractInstance) -> Option<&JsonAbi> {
|
||||||
|
self.deployed_contract(instance).map(|(_, _, a)| a)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn variable(&self, name: impl AsRef<str>) -> Option<&U256> {
|
||||||
|
self.variables
|
||||||
|
.and_then(|variables| variables.get(name.as_ref()))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn tip_block_number(&self) -> Option<&'a BlockNumber> {
|
||||||
|
self.block_number
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn transaction_hash(&self) -> Option<&'a TxHash> {
|
||||||
|
self.transaction_hash
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -11,3 +11,6 @@ rust-version.workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
//! This crate implements all node interactions.
|
//! This crate implements all node interactions.
|
||||||
|
|
||||||
|
use alloy::primitives::{Address, StorageKey, U256};
|
||||||
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace};
|
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace};
|
||||||
use alloy::rpc::types::{TransactionReceipt, TransactionRequest};
|
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
|
||||||
/// An interface for all interactions with Ethereum compatible nodes.
|
/// An interface for all interactions with Ethereum compatible nodes.
|
||||||
@@ -21,4 +22,14 @@ pub trait EthereumNode {
|
|||||||
|
|
||||||
/// Returns the state diff of the transaction hash in the [TransactionReceipt].
|
/// Returns the state diff of the transaction hash in the [TransactionReceipt].
|
||||||
fn state_diff(&self, receipt: &TransactionReceipt) -> impl Future<Output = Result<DiffMode>>;
|
fn state_diff(&self, receipt: &TransactionReceipt) -> impl Future<Output = Result<DiffMode>>;
|
||||||
|
|
||||||
|
/// Returns the balance of the provided [`Address`] back.
|
||||||
|
fn balance_of(&self, address: Address) -> impl Future<Output = Result<U256>>;
|
||||||
|
|
||||||
|
/// Returns the latest storage proof of the provided [`Address`]
|
||||||
|
fn latest_state_proof(
|
||||||
|
&self,
|
||||||
|
address: Address,
|
||||||
|
keys: Vec<StorageKey>,
|
||||||
|
) -> impl Future<Output = Result<EIP1186AccountProofResponse>>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ alloy = { workspace = true }
|
|||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
|
|
||||||
|
revive-common = { workspace = true }
|
||||||
revive-dt-common = { workspace = true }
|
revive-dt-common = { workspace = true }
|
||||||
revive-dt-config = { workspace = true }
|
revive-dt-config = { workspace = true }
|
||||||
revive-dt-format = { workspace = true }
|
revive-dt-format = { workspace = true }
|
||||||
@@ -28,3 +29,6 @@ sp-runtime = { workspace = true }
|
|||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
temp-dir = { workspace = true }
|
temp-dir = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|||||||
+334
-211
@@ -3,9 +3,13 @@
|
|||||||
use std::{
|
use std::{
|
||||||
fs::{File, OpenOptions, create_dir_all, remove_dir_all},
|
fs::{File, OpenOptions, create_dir_all, remove_dir_all},
|
||||||
io::{BufRead, BufReader, Read, Write},
|
io::{BufRead, BufReader, Read, Write},
|
||||||
|
ops::ControlFlow,
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
process::{Child, Command, Stdio},
|
process::{Child, Command, Stdio},
|
||||||
sync::atomic::{AtomicU32, Ordering},
|
sync::{
|
||||||
|
Arc,
|
||||||
|
atomic::{AtomicU32, Ordering},
|
||||||
|
},
|
||||||
time::{Duration, Instant},
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -13,23 +17,28 @@ use alloy::{
|
|||||||
eips::BlockNumberOrTag,
|
eips::BlockNumberOrTag,
|
||||||
genesis::{Genesis, GenesisAccount},
|
genesis::{Genesis, GenesisAccount},
|
||||||
network::{Ethereum, EthereumWallet, NetworkWallet},
|
network::{Ethereum, EthereumWallet, NetworkWallet},
|
||||||
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, FixedBytes, U256},
|
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
|
||||||
providers::{
|
providers::{
|
||||||
Provider, ProviderBuilder,
|
Provider, ProviderBuilder,
|
||||||
ext::DebugApi,
|
ext::DebugApi,
|
||||||
fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller},
|
fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller},
|
||||||
},
|
},
|
||||||
rpc::types::{
|
rpc::types::{
|
||||||
TransactionReceipt, TransactionRequest,
|
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
|
||||||
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
|
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
|
||||||
},
|
},
|
||||||
signers::local::PrivateKeySigner,
|
|
||||||
};
|
};
|
||||||
use revive_dt_common::fs::clear_directory;
|
use anyhow::Context as _;
|
||||||
use revive_dt_config::Arguments;
|
use revive_common::EVMVersion;
|
||||||
|
use tracing::{Instrument, instrument};
|
||||||
|
|
||||||
|
use revive_dt_common::{
|
||||||
|
fs::clear_directory,
|
||||||
|
futures::{PollingWaitBehavior, poll},
|
||||||
|
};
|
||||||
|
use revive_dt_config::*;
|
||||||
use revive_dt_format::traits::ResolverApi;
|
use revive_dt_format::traits::ResolverApi;
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
use tracing::Level;
|
|
||||||
|
|
||||||
use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE};
|
use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE};
|
||||||
|
|
||||||
@@ -43,6 +52,7 @@ static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
|
|||||||
///
|
///
|
||||||
/// Prunes the child process and the base directory on drop.
|
/// Prunes the child process and the base directory on drop.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
pub struct GethNode {
|
pub struct GethNode {
|
||||||
connection_string: String,
|
connection_string: String,
|
||||||
base_directory: PathBuf,
|
base_directory: PathBuf,
|
||||||
@@ -51,10 +61,10 @@ pub struct GethNode {
|
|||||||
geth: PathBuf,
|
geth: PathBuf,
|
||||||
id: u32,
|
id: u32,
|
||||||
handle: Option<Child>,
|
handle: Option<Child>,
|
||||||
network_id: u64,
|
start_timeout: Duration,
|
||||||
start_timeout: u64,
|
wallet: Arc<EthereumWallet>,
|
||||||
wallet: EthereumWallet,
|
|
||||||
nonce_manager: CachedNonceManager,
|
nonce_manager: CachedNonceManager,
|
||||||
|
chain_id_filler: ChainIdFiller,
|
||||||
/// This vector stores [`File`] objects that we use for logging which we want to flush when the
|
/// This vector stores [`File`] objects that we use for logging which we want to flush when the
|
||||||
/// node object is dropped. We do not store them in a structured fashion at the moment (in
|
/// node object is dropped. We do not store them in a structured fashion at the moment (in
|
||||||
/// separate fields) as the logic that we need to apply to them is all the same regardless of
|
/// separate fields) as the logic that we need to apply to them is all the same regardless of
|
||||||
@@ -77,17 +87,22 @@ impl GethNode {
|
|||||||
const GETH_STDERR_LOG_FILE_NAME: &str = "node_stderr.log";
|
const GETH_STDERR_LOG_FILE_NAME: &str = "node_stderr.log";
|
||||||
|
|
||||||
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
|
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
|
||||||
|
const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet";
|
||||||
|
|
||||||
|
const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60);
|
||||||
|
const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60);
|
||||||
|
|
||||||
/// Create the node directory and call `geth init` to configure the genesis.
|
/// Create the node directory and call `geth init` to configure the genesis.
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn init(&mut self, genesis: String) -> anyhow::Result<&mut Self> {
|
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
|
||||||
let _ = clear_directory(&self.base_directory);
|
let _ = clear_directory(&self.base_directory);
|
||||||
let _ = clear_directory(&self.logs_directory);
|
let _ = clear_directory(&self.logs_directory);
|
||||||
|
|
||||||
create_dir_all(&self.base_directory)?;
|
create_dir_all(&self.base_directory)
|
||||||
create_dir_all(&self.logs_directory)?;
|
.context("Failed to create base directory for geth node")?;
|
||||||
|
create_dir_all(&self.logs_directory)
|
||||||
|
.context("Failed to create logs directory for geth node")?;
|
||||||
|
|
||||||
let mut genesis = serde_json::from_str::<Genesis>(&genesis)?;
|
|
||||||
for signer_address in
|
for signer_address in
|
||||||
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
|
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
|
||||||
{
|
{
|
||||||
@@ -99,25 +114,37 @@ impl GethNode {
|
|||||||
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
|
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
|
||||||
}
|
}
|
||||||
let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE);
|
let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE);
|
||||||
serde_json::to_writer(File::create(&genesis_path)?, &genesis)?;
|
serde_json::to_writer(
|
||||||
|
File::create(&genesis_path).context("Failed to create geth genesis file")?,
|
||||||
|
&genesis,
|
||||||
|
)
|
||||||
|
.context("Failed to serialize geth genesis JSON to file")?;
|
||||||
|
|
||||||
let mut child = Command::new(&self.geth)
|
let mut child = Command::new(&self.geth)
|
||||||
|
.arg("--state.scheme")
|
||||||
|
.arg("hash")
|
||||||
.arg("init")
|
.arg("init")
|
||||||
.arg("--datadir")
|
.arg("--datadir")
|
||||||
.arg(&self.data_directory)
|
.arg(&self.data_directory)
|
||||||
.arg(genesis_path)
|
.arg(genesis_path)
|
||||||
.stderr(Stdio::piped())
|
.stderr(Stdio::piped())
|
||||||
.stdout(Stdio::null())
|
.stdout(Stdio::null())
|
||||||
.spawn()?;
|
.spawn()
|
||||||
|
.context("Failed to spawn geth --init process")?;
|
||||||
|
|
||||||
let mut stderr = String::new();
|
let mut stderr = String::new();
|
||||||
child
|
child
|
||||||
.stderr
|
.stderr
|
||||||
.take()
|
.take()
|
||||||
.expect("should be piped")
|
.expect("should be piped")
|
||||||
.read_to_string(&mut stderr)?;
|
.read_to_string(&mut stderr)
|
||||||
|
.context("Failed to read geth --init stderr")?;
|
||||||
|
|
||||||
if !child.wait()?.success() {
|
if !child
|
||||||
|
.wait()
|
||||||
|
.context("Failed waiting for geth --init process to finish")?
|
||||||
|
.success()
|
||||||
|
{
|
||||||
anyhow::bail!("failed to initialize geth node #{:?}: {stderr}", &self.id);
|
anyhow::bail!("failed to initialize geth node #{:?}: {stderr}", &self.id);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -127,7 +154,7 @@ impl GethNode {
|
|||||||
/// Spawn the go-ethereum node child process.
|
/// Spawn the go-ethereum node child process.
|
||||||
///
|
///
|
||||||
/// [Instance::init] must be called prior.
|
/// [Instance::init] must be called prior.
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn spawn_process(&mut self) -> anyhow::Result<&mut Self> {
|
fn spawn_process(&mut self) -> anyhow::Result<&mut Self> {
|
||||||
// This is the `OpenOptions` that we wish to use for all of the log files that we will be
|
// This is the `OpenOptions` that we wish to use for all of the log files that we will be
|
||||||
// opening in this method. We need to construct it in this way to:
|
// opening in this method. We need to construct it in this way to:
|
||||||
@@ -142,16 +169,17 @@ impl GethNode {
|
|||||||
|
|
||||||
let stdout_logs_file = open_options
|
let stdout_logs_file = open_options
|
||||||
.clone()
|
.clone()
|
||||||
.open(self.geth_stdout_log_file_path())?;
|
.open(self.geth_stdout_log_file_path())
|
||||||
let stderr_logs_file = open_options.open(self.geth_stderr_log_file_path())?;
|
.context("Failed to open geth stdout logs file")?;
|
||||||
|
let stderr_logs_file = open_options
|
||||||
|
.open(self.geth_stderr_log_file_path())
|
||||||
|
.context("Failed to open geth stderr logs file")?;
|
||||||
self.handle = Command::new(&self.geth)
|
self.handle = Command::new(&self.geth)
|
||||||
.arg("--dev")
|
.arg("--dev")
|
||||||
.arg("--datadir")
|
.arg("--datadir")
|
||||||
.arg(&self.data_directory)
|
.arg(&self.data_directory)
|
||||||
.arg("--ipcpath")
|
.arg("--ipcpath")
|
||||||
.arg(&self.connection_string)
|
.arg(&self.connection_string)
|
||||||
.arg("--networkid")
|
|
||||||
.arg(self.network_id.to_string())
|
|
||||||
.arg("--nodiscover")
|
.arg("--nodiscover")
|
||||||
.arg("--maxpeers")
|
.arg("--maxpeers")
|
||||||
.arg("0")
|
.arg("0")
|
||||||
@@ -159,14 +187,30 @@ impl GethNode {
|
|||||||
.arg("0")
|
.arg("0")
|
||||||
.arg("--cache.blocklogs")
|
.arg("--cache.blocklogs")
|
||||||
.arg("512")
|
.arg("512")
|
||||||
.stderr(stderr_logs_file.try_clone()?)
|
.arg("--state.scheme")
|
||||||
.stdout(stdout_logs_file.try_clone()?)
|
.arg("hash")
|
||||||
.spawn()?
|
.arg("--syncmode")
|
||||||
|
.arg("full")
|
||||||
|
.arg("--gcmode")
|
||||||
|
.arg("archive")
|
||||||
|
.stderr(
|
||||||
|
stderr_logs_file
|
||||||
|
.try_clone()
|
||||||
|
.context("Failed to clone geth stderr log file handle")?,
|
||||||
|
)
|
||||||
|
.stdout(
|
||||||
|
stdout_logs_file
|
||||||
|
.try_clone()
|
||||||
|
.context("Failed to clone geth stdout log file handle")?,
|
||||||
|
)
|
||||||
|
.spawn()
|
||||||
|
.context("Failed to spawn geth node process")?
|
||||||
.into();
|
.into();
|
||||||
|
|
||||||
if let Err(error) = self.wait_ready() {
|
if let Err(error) = self.wait_ready() {
|
||||||
tracing::error!(?error, "Failed to start geth, shutting down gracefully");
|
tracing::error!(?error, "Failed to start geth, shutting down gracefully");
|
||||||
self.shutdown()?;
|
self.shutdown()
|
||||||
|
.context("Failed to gracefully shutdown after geth start error")?;
|
||||||
return Err(error);
|
return Err(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -179,7 +223,7 @@ impl GethNode {
|
|||||||
/// Wait for the g-ethereum node child process getting ready.
|
/// Wait for the g-ethereum node child process getting ready.
|
||||||
///
|
///
|
||||||
/// [Instance::spawn_process] must be called priorly.
|
/// [Instance::spawn_process] must be called priorly.
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn wait_ready(&mut self) -> anyhow::Result<&mut Self> {
|
fn wait_ready(&mut self) -> anyhow::Result<&mut Self> {
|
||||||
let start_time = Instant::now();
|
let start_time = Instant::now();
|
||||||
|
|
||||||
@@ -188,10 +232,12 @@ impl GethNode {
|
|||||||
.write(false)
|
.write(false)
|
||||||
.append(false)
|
.append(false)
|
||||||
.truncate(false)
|
.truncate(false)
|
||||||
.open(self.geth_stderr_log_file_path())?;
|
.open(self.geth_stderr_log_file_path())
|
||||||
|
.context("Failed to open geth stderr logs file for readiness check")?;
|
||||||
|
|
||||||
let maximum_wait_time = Duration::from_millis(self.start_timeout);
|
let maximum_wait_time = self.start_timeout;
|
||||||
let mut stderr = BufReader::new(logs_file).lines();
|
let mut stderr = BufReader::new(logs_file).lines();
|
||||||
|
let mut lines = vec![];
|
||||||
loop {
|
loop {
|
||||||
if let Some(Ok(line)) = stderr.next() {
|
if let Some(Ok(line)) = stderr.next() {
|
||||||
if line.contains(Self::ERROR_MARKER) {
|
if line.contains(Self::ERROR_MARKER) {
|
||||||
@@ -200,142 +246,156 @@ impl GethNode {
|
|||||||
if line.contains(Self::READY_MARKER) {
|
if line.contains(Self::READY_MARKER) {
|
||||||
return Ok(self);
|
return Ok(self);
|
||||||
}
|
}
|
||||||
|
lines.push(line);
|
||||||
}
|
}
|
||||||
if Instant::now().duration_since(start_time) > maximum_wait_time {
|
if Instant::now().duration_since(start_time) > maximum_wait_time {
|
||||||
anyhow::bail!("Timeout in starting geth");
|
anyhow::bail!(
|
||||||
|
"Timeout in starting geth: took longer than {}ms. stdout:\n\n{}\n",
|
||||||
|
self.start_timeout.as_millis(),
|
||||||
|
lines.join("\n")
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id), level = Level::TRACE)]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn geth_stdout_log_file_path(&self) -> PathBuf {
|
fn geth_stdout_log_file_path(&self) -> PathBuf {
|
||||||
self.logs_directory.join(Self::GETH_STDOUT_LOG_FILE_NAME)
|
self.logs_directory.join(Self::GETH_STDOUT_LOG_FILE_NAME)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id), level = Level::TRACE)]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn geth_stderr_log_file_path(&self) -> PathBuf {
|
fn geth_stderr_log_file_path(&self) -> PathBuf {
|
||||||
self.logs_directory.join(Self::GETH_STDERR_LOG_FILE_NAME)
|
self.logs_directory.join(Self::GETH_STDERR_LOG_FILE_NAME)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn provider(
|
async fn provider(
|
||||||
&self,
|
&self,
|
||||||
) -> impl Future<
|
) -> anyhow::Result<FillProvider<impl TxFiller<Ethereum>, impl Provider<Ethereum>, Ethereum>>
|
||||||
Output = anyhow::Result<
|
{
|
||||||
FillProvider<impl TxFiller<Ethereum>, impl Provider<Ethereum>, Ethereum>,
|
|
||||||
>,
|
|
||||||
> + 'static {
|
|
||||||
let connection_string = self.connection_string();
|
|
||||||
let wallet = self.wallet.clone();
|
|
||||||
|
|
||||||
// Note: We would like all providers to make use of the same nonce manager so that we have
|
|
||||||
// monotonically increasing nonces that are cached. The cached nonce manager uses Arc's in
|
|
||||||
// its implementation and therefore it means that when we clone it then it still references
|
|
||||||
// the same state.
|
|
||||||
let nonce_manager = self.nonce_manager.clone();
|
|
||||||
|
|
||||||
Box::pin(async move {
|
|
||||||
ProviderBuilder::new()
|
ProviderBuilder::new()
|
||||||
.disable_recommended_fillers()
|
.disable_recommended_fillers()
|
||||||
.filler(FallbackGasFiller::new(500_000_000, 500_000_000, 1))
|
.filler(FallbackGasFiller::new(
|
||||||
.filler(ChainIdFiller::default())
|
25_000_000,
|
||||||
.filler(NonceFiller::new(nonce_manager))
|
1_000_000_000,
|
||||||
.wallet(wallet)
|
1_000_000_000,
|
||||||
.connect(&connection_string)
|
))
|
||||||
|
.filler(self.chain_id_filler.clone())
|
||||||
|
.filler(NonceFiller::new(self.nonce_manager.clone()))
|
||||||
|
.wallet(self.wallet.clone())
|
||||||
|
.connect(&self.connection_string)
|
||||||
.await
|
.await
|
||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EthereumNode for GethNode {
|
impl EthereumNode for GethNode {
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
#[instrument(
|
||||||
|
level = "info",
|
||||||
|
skip_all,
|
||||||
|
fields(geth_node_id = self.id, connection_string = self.connection_string),
|
||||||
|
err,
|
||||||
|
)]
|
||||||
async fn execute_transaction(
|
async fn execute_transaction(
|
||||||
&self,
|
&self,
|
||||||
transaction: TransactionRequest,
|
transaction: TransactionRequest,
|
||||||
) -> anyhow::Result<alloy::rpc::types::TransactionReceipt> {
|
) -> anyhow::Result<alloy::rpc::types::TransactionReceipt> {
|
||||||
let outer_span = tracing::debug_span!("Submitting transaction", ?transaction);
|
let provider = self
|
||||||
let _outer_guard = outer_span.enter();
|
.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to create provider for transaction submission")?;
|
||||||
|
|
||||||
let provider = self.provider().await?;
|
let pending_transaction = provider
|
||||||
|
.send_transaction(transaction)
|
||||||
|
.await
|
||||||
|
.inspect_err(
|
||||||
|
|err| tracing::error!(%err, "Encountered an error when submitting the transaction"),
|
||||||
|
)
|
||||||
|
.context("Failed to submit transaction to geth node")?;
|
||||||
|
let transaction_hash = *pending_transaction.tx_hash();
|
||||||
|
|
||||||
let pending_transaction = provider.send_transaction(transaction).await?;
|
// The following is a fix for the "transaction indexing is in progress" error that we used
|
||||||
let transaction_hash = pending_transaction.tx_hash();
|
// to get. You can find more information on this in the following GH issue in geth
|
||||||
|
|
||||||
let span = tracing::info_span!("Awaiting transaction receipt", ?transaction_hash);
|
|
||||||
let _guard = span.enter();
|
|
||||||
|
|
||||||
// The following is a fix for the "transaction indexing is in progress" error that we
|
|
||||||
// used to get. You can find more information on this in the following GH issue in geth
|
|
||||||
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
|
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
|
||||||
// before we can get the receipt of the transaction it needs to have been indexed by the
|
// before we can get the receipt of the transaction it needs to have been indexed by the
|
||||||
// node's indexer. Just because the transaction has been confirmed it doesn't mean that
|
// node's indexer. Just because the transaction has been confirmed it doesn't mean that it
|
||||||
// it has been indexed. When we call alloy's `get_receipt` it checks if the transaction
|
// has been indexed. When we call alloy's `get_receipt` it checks if the transaction was
|
||||||
// was confirmed. If it has been, then it will call `eth_getTransactionReceipt` method
|
// confirmed. If it has been, then it will call `eth_getTransactionReceipt` method which
|
||||||
// which _might_ return the above error if the tx has not yet been indexed yet. So, we
|
// _might_ return the above error if the tx has not yet been indexed yet. So, we need to
|
||||||
// need to implement a retry mechanism for the receipt to keep retrying to get it until
|
// implement a retry mechanism for the receipt to keep retrying to get it until it
|
||||||
// it eventually works, but we only do that if the error we get back is the "transaction
|
// eventually works, but we only do that if the error we get back is the "transaction
|
||||||
// indexing is in progress" error or if the receipt is None.
|
// indexing is in progress" error or if the receipt is None.
|
||||||
//
|
//
|
||||||
// Getting the transaction indexed and taking a receipt can take a long time especially
|
// Getting the transaction indexed and taking a receipt can take a long time especially when
|
||||||
// when a lot of transactions are being submitted to the node. Thus, while initially we
|
// a lot of transactions are being submitted to the node. Thus, while initially we only
|
||||||
// only allowed for 60 seconds of waiting with a 1 second delay in polling, we need to
|
// allowed for 60 seconds of waiting with a 1 second delay in polling, we need to allow for
|
||||||
// allow for a larger wait time. Therefore, in here we allow for 5 minutes of waiting
|
// a larger wait time. Therefore, in here we allow for 5 minutes of waiting with exponential
|
||||||
// with exponential backoff each time we attempt to get the receipt and find that it's
|
// backoff each time we attempt to get the receipt and find that it's not available.
|
||||||
// not available.
|
let provider = Arc::new(provider);
|
||||||
let mut retries = 0;
|
poll(
|
||||||
let mut total_wait_duration = Duration::from_secs(0);
|
Self::RECEIPT_POLLING_DURATION,
|
||||||
let max_allowed_wait_duration = Duration::from_secs(5 * 60);
|
PollingWaitBehavior::Constant(Duration::from_millis(200)),
|
||||||
loop {
|
move || {
|
||||||
if total_wait_duration >= max_allowed_wait_duration {
|
let provider = provider.clone();
|
||||||
tracing::error!(
|
async move {
|
||||||
?total_wait_duration,
|
match provider.get_transaction_receipt(transaction_hash).await {
|
||||||
?max_allowed_wait_duration,
|
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
|
||||||
retry_count = retries,
|
Ok(None) => Ok(ControlFlow::Continue(())),
|
||||||
"Failed to get receipt after polling for it"
|
|
||||||
);
|
|
||||||
anyhow::bail!(
|
|
||||||
"Polled for receipt for {total_wait_duration:?} but failed to get it"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
match provider.get_transaction_receipt(*transaction_hash).await {
|
|
||||||
Ok(Some(receipt)) => {
|
|
||||||
tracing::info!(?total_wait_duration, "Found receipt");
|
|
||||||
break Ok(receipt);
|
|
||||||
}
|
|
||||||
Ok(None) => {}
|
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
let error_string = error.to_string();
|
let error_string = error.to_string();
|
||||||
if !error_string.contains(Self::TRANSACTION_INDEXING_ERROR) {
|
match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) {
|
||||||
break Err(error.into());
|
true => Ok(ControlFlow::Continue(())),
|
||||||
|
false => Err(error.into()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
|
||||||
let next_wait_duration = Duration::from_secs(2u64.pow(retries))
|
|
||||||
.min(max_allowed_wait_duration - total_wait_duration);
|
|
||||||
total_wait_duration += next_wait_duration;
|
|
||||||
retries += 1;
|
|
||||||
|
|
||||||
tokio::time::sleep(next_wait_duration).await;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.instrument(tracing::info_span!(
|
||||||
|
"Awaiting transaction receipt",
|
||||||
|
?transaction_hash
|
||||||
|
))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
async fn trace_transaction(
|
async fn trace_transaction(
|
||||||
&self,
|
&self,
|
||||||
transaction: &TransactionReceipt,
|
transaction: &TransactionReceipt,
|
||||||
trace_options: GethDebugTracingOptions,
|
trace_options: GethDebugTracingOptions,
|
||||||
) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> {
|
) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> {
|
||||||
let tx_hash = transaction.transaction_hash;
|
let provider = Arc::new(
|
||||||
Ok(self
|
self.provider()
|
||||||
.provider()
|
.await
|
||||||
.await?
|
.context("Failed to create provider for tracing")?,
|
||||||
.debug_trace_transaction(tx_hash, trace_options)
|
);
|
||||||
.await?)
|
poll(
|
||||||
|
Self::TRACE_POLLING_DURATION,
|
||||||
|
PollingWaitBehavior::Constant(Duration::from_millis(200)),
|
||||||
|
move || {
|
||||||
|
let provider = provider.clone();
|
||||||
|
let trace_options = trace_options.clone();
|
||||||
|
async move {
|
||||||
|
match provider
|
||||||
|
.debug_trace_transaction(transaction.transaction_hash, trace_options)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(trace) => Ok(ControlFlow::Break(trace)),
|
||||||
|
Err(error) => {
|
||||||
|
let error_string = error.to_string();
|
||||||
|
match error_string.contains(Self::TRANSACTION_TRACING_ERROR) {
|
||||||
|
true => Ok(ControlFlow::Continue(())),
|
||||||
|
false => Err(error.into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result<DiffMode> {
|
async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result<DiffMode> {
|
||||||
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
|
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
|
||||||
diff_mode: Some(true),
|
diff_mode: Some(true),
|
||||||
@@ -344,79 +404,146 @@ impl EthereumNode for GethNode {
|
|||||||
});
|
});
|
||||||
match self
|
match self
|
||||||
.trace_transaction(transaction, trace_options)
|
.trace_transaction(transaction, trace_options)
|
||||||
.await?
|
.await
|
||||||
.try_into_pre_state_frame()?
|
.context("Failed to trace transaction for prestate diff")?
|
||||||
|
.try_into_pre_state_frame()
|
||||||
|
.context("Failed to convert trace into pre-state frame")?
|
||||||
{
|
{
|
||||||
PreStateFrame::Diff(diff) => Ok(diff),
|
PreStateFrame::Diff(diff) => Ok(diff),
|
||||||
_ => anyhow::bail!("expected a diff mode trace"),
|
_ => anyhow::bail!("expected a diff mode trace"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
async fn balance_of(&self, address: Address) -> anyhow::Result<U256> {
|
||||||
|
self.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to get the Geth provider")?
|
||||||
|
.get_balance(address)
|
||||||
|
.await
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
async fn latest_state_proof(
|
||||||
|
&self,
|
||||||
|
address: Address,
|
||||||
|
keys: Vec<StorageKey>,
|
||||||
|
) -> anyhow::Result<EIP1186AccountProofResponse> {
|
||||||
|
self.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to get the Geth provider")?
|
||||||
|
.get_proof(address, keys)
|
||||||
|
.latest()
|
||||||
|
.await
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ResolverApi for GethNode {
|
impl ResolverApi for GethNode {
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
async fn chain_id(&self) -> anyhow::Result<alloy::primitives::ChainId> {
|
async fn chain_id(&self) -> anyhow::Result<alloy::primitives::ChainId> {
|
||||||
self.provider()
|
self.provider()
|
||||||
.await?
|
.await
|
||||||
|
.context("Failed to get the Geth provider")?
|
||||||
.get_chain_id()
|
.get_chain_id()
|
||||||
.await
|
.await
|
||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result<u128> {
|
||||||
|
self.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to get the Geth provider")?
|
||||||
|
.get_transaction_receipt(*tx_hash)
|
||||||
|
.await?
|
||||||
|
.context("Failed to get the transaction receipt")
|
||||||
|
.map(|receipt| receipt.effective_gas_price)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result<u128> {
|
async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result<u128> {
|
||||||
self.provider()
|
self.provider()
|
||||||
.await?
|
.await
|
||||||
|
.context("Failed to get the Geth provider")?
|
||||||
.get_block_by_number(number)
|
.get_block_by_number(number)
|
||||||
.await?
|
.await
|
||||||
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
|
.context("Failed to get the geth block")?
|
||||||
|
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||||
.map(|block| block.header.gas_limit as _)
|
.map(|block| block.header.gas_limit as _)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result<Address> {
|
async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result<Address> {
|
||||||
self.provider()
|
self.provider()
|
||||||
.await?
|
.await
|
||||||
|
.context("Failed to get the Geth provider")?
|
||||||
.get_block_by_number(number)
|
.get_block_by_number(number)
|
||||||
.await?
|
.await
|
||||||
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
|
.context("Failed to get the geth block")?
|
||||||
|
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||||
.map(|block| block.header.beneficiary)
|
.map(|block| block.header.beneficiary)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result<U256> {
|
async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result<U256> {
|
||||||
self.provider()
|
self.provider()
|
||||||
.await?
|
.await
|
||||||
|
.context("Failed to get the Geth provider")?
|
||||||
.get_block_by_number(number)
|
.get_block_by_number(number)
|
||||||
.await?
|
.await
|
||||||
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
|
.context("Failed to get the geth block")?
|
||||||
.map(|block| block.header.difficulty)
|
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||||
|
.map(|block| U256::from_be_bytes(block.header.mix_hash.0))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result<u64> {
|
||||||
|
self.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to get the Geth provider")?
|
||||||
|
.get_block_by_number(number)
|
||||||
|
.await
|
||||||
|
.context("Failed to get the geth block")?
|
||||||
|
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||||
|
.and_then(|block| {
|
||||||
|
block
|
||||||
|
.header
|
||||||
|
.base_fee_per_gas
|
||||||
|
.context("Failed to get the base fee per gas")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockHash> {
|
async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockHash> {
|
||||||
self.provider()
|
self.provider()
|
||||||
.await?
|
.await
|
||||||
|
.context("Failed to get the Geth provider")?
|
||||||
.get_block_by_number(number)
|
.get_block_by_number(number)
|
||||||
.await?
|
.await
|
||||||
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
|
.context("Failed to get the geth block")?
|
||||||
|
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||||
.map(|block| block.header.hash)
|
.map(|block| block.header.hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> {
|
async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> {
|
||||||
self.provider()
|
self.provider()
|
||||||
.await?
|
.await
|
||||||
|
.context("Failed to get the Geth provider")?
|
||||||
.get_block_by_number(number)
|
.get_block_by_number(number)
|
||||||
.await?
|
.await
|
||||||
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
|
.context("Failed to get the geth block")?
|
||||||
|
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||||
.map(|block| block.header.timestamp)
|
.map(|block| block.header.timestamp)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
async fn last_block_number(&self) -> anyhow::Result<BlockNumber> {
|
async fn last_block_number(&self) -> anyhow::Result<BlockNumber> {
|
||||||
self.provider()
|
self.provider()
|
||||||
.await?
|
.await
|
||||||
|
.context("Failed to get the Geth provider")?
|
||||||
.get_block_number()
|
.get_block_number()
|
||||||
.await
|
.await
|
||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
@@ -424,44 +551,59 @@ impl ResolverApi for GethNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Node for GethNode {
|
impl Node for GethNode {
|
||||||
fn new(config: &Arguments) -> Self {
|
fn new(
|
||||||
let geth_directory = config.directory().join(Self::BASE_DIRECTORY);
|
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||||
|
+ AsRef<ConcurrencyConfiguration>
|
||||||
|
+ AsRef<GenesisConfiguration>
|
||||||
|
+ AsRef<WalletConfiguration>
|
||||||
|
+ AsRef<GethConfiguration>
|
||||||
|
+ AsRef<KitchensinkConfiguration>
|
||||||
|
+ AsRef<ReviveDevNodeConfiguration>
|
||||||
|
+ AsRef<EthRpcConfiguration>
|
||||||
|
+ Clone,
|
||||||
|
) -> Self {
|
||||||
|
let working_directory_configuration =
|
||||||
|
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
||||||
|
let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context);
|
||||||
|
let geth_configuration = AsRef::<GethConfiguration>::as_ref(&context);
|
||||||
|
|
||||||
|
let geth_directory = working_directory_configuration
|
||||||
|
.as_path()
|
||||||
|
.join(Self::BASE_DIRECTORY);
|
||||||
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
|
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||||
let base_directory = geth_directory.join(id.to_string());
|
let base_directory = geth_directory.join(id.to_string());
|
||||||
|
|
||||||
let mut wallet = config.wallet();
|
let wallet = wallet_configuration.wallet();
|
||||||
for signer in (1..=config.private_keys_to_add)
|
|
||||||
.map(|id| U256::from(id))
|
|
||||||
.map(|id| id.to_be_bytes::<32>())
|
|
||||||
.map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap())
|
|
||||||
{
|
|
||||||
wallet.register_signer(signer);
|
|
||||||
}
|
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
connection_string: base_directory.join(Self::IPC_FILE).display().to_string(),
|
connection_string: base_directory.join(Self::IPC_FILE).display().to_string(),
|
||||||
data_directory: base_directory.join(Self::DATA_DIRECTORY),
|
data_directory: base_directory.join(Self::DATA_DIRECTORY),
|
||||||
logs_directory: base_directory.join(Self::LOGS_DIRECTORY),
|
logs_directory: base_directory.join(Self::LOGS_DIRECTORY),
|
||||||
base_directory,
|
base_directory,
|
||||||
geth: config.geth.clone(),
|
geth: geth_configuration.path.clone(),
|
||||||
id,
|
id,
|
||||||
handle: None,
|
handle: None,
|
||||||
network_id: config.network_id,
|
start_timeout: geth_configuration.start_timeout_ms,
|
||||||
start_timeout: config.geth_start_timeout,
|
wallet: wallet.clone(),
|
||||||
wallet,
|
chain_id_filler: Default::default(),
|
||||||
|
nonce_manager: Default::default(),
|
||||||
// We know that we only need to be storing 2 files so we can specify that when creating
|
// We know that we only need to be storing 2 files so we can specify that when creating
|
||||||
// the vector. It's the stdout and stderr of the geth node.
|
// the vector. It's the stdout and stderr of the geth node.
|
||||||
logs_file_to_flush: Vec::with_capacity(2),
|
logs_file_to_flush: Vec::with_capacity(2),
|
||||||
nonce_manager: Default::default(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn id(&self) -> usize {
|
||||||
|
self.id as _
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn connection_string(&self) -> String {
|
fn connection_string(&self) -> String {
|
||||||
self.connection_string.clone()
|
self.connection_string.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn shutdown(&mut self) -> anyhow::Result<()> {
|
fn shutdown(&mut self) -> anyhow::Result<()> {
|
||||||
// Terminate the processes in a graceful manner to allow for the output to be flushed.
|
// Terminate the processes in a graceful manner to allow for the output to be flushed.
|
||||||
if let Some(mut child) = self.handle.take() {
|
if let Some(mut child) = self.handle.take() {
|
||||||
@@ -483,36 +625,41 @@ impl Node for GethNode {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn spawn(&mut self, genesis: String) -> anyhow::Result<()> {
|
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
|
||||||
self.init(genesis)?.spawn_process()?;
|
self.init(genesis)?.spawn_process()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn version(&self) -> anyhow::Result<String> {
|
fn version(&self) -> anyhow::Result<String> {
|
||||||
let output = Command::new(&self.geth)
|
let output = Command::new(&self.geth)
|
||||||
.arg("--version")
|
.arg("--version")
|
||||||
.stdin(Stdio::null())
|
.stdin(Stdio::null())
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
.stderr(Stdio::null())
|
.stderr(Stdio::null())
|
||||||
.spawn()?
|
.spawn()
|
||||||
.wait_with_output()?
|
.context("Failed to spawn geth --version process")?
|
||||||
|
.wait_with_output()
|
||||||
|
.context("Failed to wait for geth --version output")?
|
||||||
.stdout;
|
.stdout;
|
||||||
Ok(String::from_utf8_lossy(&output).into())
|
Ok(String::from_utf8_lossy(&output).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
fn matches_target(targets: Option<&[String]>) -> bool {
|
||||||
fn matches_target(&self, targets: Option<&[String]>) -> bool {
|
|
||||||
match targets {
|
match targets {
|
||||||
None => true,
|
None => true,
|
||||||
Some(targets) => targets.iter().any(|str| str.as_str() == "evm"),
|
Some(targets) => targets.iter().any(|str| str.as_str() == "evm"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn evm_version() -> EVMVersion {
|
||||||
|
EVMVersion::Cancun
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for GethNode {
|
impl Drop for GethNode {
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
self.shutdown().expect("Failed to shutdown")
|
self.shutdown().expect("Failed to shutdown")
|
||||||
}
|
}
|
||||||
@@ -520,49 +667,25 @@ impl Drop for GethNode {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use revive_dt_config::Arguments;
|
|
||||||
|
|
||||||
use temp_dir::TempDir;
|
|
||||||
|
|
||||||
use crate::{GENESIS_JSON, Node};
|
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
fn test_config() -> (Arguments, TempDir) {
|
fn test_config() -> ExecutionContext {
|
||||||
let mut config = Arguments::default();
|
ExecutionContext::default()
|
||||||
let temp_dir = TempDir::new().unwrap();
|
|
||||||
config.working_directory = temp_dir.path().to_path_buf().into();
|
|
||||||
|
|
||||||
(config, temp_dir)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_node() -> (GethNode, TempDir) {
|
fn new_node() -> (ExecutionContext, GethNode) {
|
||||||
let (args, temp_dir) = test_config();
|
let context = test_config();
|
||||||
let mut node = GethNode::new(&args);
|
let mut node = GethNode::new(&context);
|
||||||
node.init(GENESIS_JSON.to_owned())
|
node.init(context.genesis_configuration.genesis().unwrap().clone())
|
||||||
.expect("Failed to initialize the node")
|
.expect("Failed to initialize the node")
|
||||||
.spawn_process()
|
.spawn_process()
|
||||||
.expect("Failed to spawn the node process");
|
.expect("Failed to spawn the node process");
|
||||||
(node, temp_dir)
|
(context, node)
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn init_works() {
|
|
||||||
GethNode::new(&test_config().0)
|
|
||||||
.init(GENESIS_JSON.to_string())
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn spawn_works() {
|
|
||||||
GethNode::new(&test_config().0)
|
|
||||||
.spawn(GENESIS_JSON.to_string())
|
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn version_works() {
|
fn version_works() {
|
||||||
let version = GethNode::new(&test_config().0).version().unwrap();
|
let version = GethNode::new(&test_config()).version().unwrap();
|
||||||
assert!(
|
assert!(
|
||||||
version.starts_with("geth version"),
|
version.starts_with("geth version"),
|
||||||
"expected version string, got: '{version}'"
|
"expected version string, got: '{version}'"
|
||||||
@@ -572,7 +695,7 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn can_get_chain_id_from_node() {
|
async fn can_get_chain_id_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let (node, _temp_dir) = new_node();
|
let (_context, node) = new_node();
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
let chain_id = node.chain_id().await;
|
let chain_id = node.chain_id().await;
|
||||||
@@ -585,7 +708,7 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn can_get_gas_limit_from_node() {
|
async fn can_get_gas_limit_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let (node, _temp_dir) = new_node();
|
let (_context, node) = new_node();
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest).await;
|
let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest).await;
|
||||||
@@ -598,7 +721,7 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn can_get_coinbase_from_node() {
|
async fn can_get_coinbase_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let (node, _temp_dir) = new_node();
|
let (_context, node) = new_node();
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
let coinbase = node.block_coinbase(BlockNumberOrTag::Latest).await;
|
let coinbase = node.block_coinbase(BlockNumberOrTag::Latest).await;
|
||||||
@@ -611,7 +734,7 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn can_get_block_difficulty_from_node() {
|
async fn can_get_block_difficulty_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let (node, _temp_dir) = new_node();
|
let (_context, node) = new_node();
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest).await;
|
let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest).await;
|
||||||
@@ -624,7 +747,7 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn can_get_block_hash_from_node() {
|
async fn can_get_block_hash_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let (node, _temp_dir) = new_node();
|
let (_context, node) = new_node();
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
let block_hash = node.block_hash(BlockNumberOrTag::Latest).await;
|
let block_hash = node.block_hash(BlockNumberOrTag::Latest).await;
|
||||||
@@ -636,7 +759,7 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn can_get_block_timestamp_from_node() {
|
async fn can_get_block_timestamp_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let (node, _temp_dir) = new_node();
|
let (_context, node) = new_node();
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest).await;
|
let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest).await;
|
||||||
@@ -648,7 +771,7 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn can_get_block_number_from_node() {
|
async fn can_get_block_number_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let (node, _temp_dir) = new_node();
|
let (_context, node) = new_node();
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
let block_number = node.last_block_number().await;
|
let block_number = node.last_block_number().await;
|
||||||
|
|||||||
+258
-164
@@ -3,7 +3,10 @@ use std::{
|
|||||||
io::{BufRead, Write},
|
io::{BufRead, Write},
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
process::{Child, Command, Stdio},
|
process::{Child, Command, Stdio},
|
||||||
sync::atomic::{AtomicU32, Ordering},
|
sync::{
|
||||||
|
Arc,
|
||||||
|
atomic::{AtomicU32, Ordering},
|
||||||
|
},
|
||||||
time::Duration,
|
time::Duration,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -16,7 +19,8 @@ use alloy::{
|
|||||||
TransactionBuilderError, UnbuiltTransactionError,
|
TransactionBuilderError, UnbuiltTransactionError,
|
||||||
},
|
},
|
||||||
primitives::{
|
primitives::{
|
||||||
Address, B64, B256, BlockHash, BlockNumber, BlockTimestamp, Bloom, Bytes, FixedBytes, U256,
|
Address, B64, B256, BlockHash, BlockNumber, BlockTimestamp, Bloom, Bytes, StorageKey,
|
||||||
|
TxHash, U256,
|
||||||
},
|
},
|
||||||
providers::{
|
providers::{
|
||||||
Provider, ProviderBuilder,
|
Provider, ProviderBuilder,
|
||||||
@@ -24,21 +28,21 @@ use alloy::{
|
|||||||
fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller},
|
fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller},
|
||||||
},
|
},
|
||||||
rpc::types::{
|
rpc::types::{
|
||||||
TransactionReceipt,
|
EIP1186AccountProofResponse, TransactionReceipt,
|
||||||
eth::{Block, Header, Transaction},
|
eth::{Block, Header, Transaction},
|
||||||
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
|
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
|
||||||
},
|
},
|
||||||
signers::local::PrivateKeySigner,
|
|
||||||
};
|
};
|
||||||
|
use anyhow::Context as _;
|
||||||
|
use revive_common::EVMVersion;
|
||||||
use revive_dt_common::fs::clear_directory;
|
use revive_dt_common::fs::clear_directory;
|
||||||
use revive_dt_format::traits::ResolverApi;
|
use revive_dt_format::traits::ResolverApi;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_json::{Value as JsonValue, json};
|
use serde_json::{Value as JsonValue, json};
|
||||||
use sp_core::crypto::Ss58Codec;
|
use sp_core::crypto::Ss58Codec;
|
||||||
use sp_runtime::AccountId32;
|
use sp_runtime::AccountId32;
|
||||||
use tracing::Level;
|
|
||||||
|
|
||||||
use revive_dt_config::Arguments;
|
use revive_dt_config::*;
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
|
|
||||||
use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE};
|
use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE};
|
||||||
@@ -49,14 +53,17 @@ static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
|
|||||||
pub struct KitchensinkNode {
|
pub struct KitchensinkNode {
|
||||||
id: u32,
|
id: u32,
|
||||||
substrate_binary: PathBuf,
|
substrate_binary: PathBuf,
|
||||||
|
dev_node_binary: PathBuf,
|
||||||
eth_proxy_binary: PathBuf,
|
eth_proxy_binary: PathBuf,
|
||||||
rpc_url: String,
|
rpc_url: String,
|
||||||
wallet: EthereumWallet,
|
|
||||||
base_directory: PathBuf,
|
base_directory: PathBuf,
|
||||||
logs_directory: PathBuf,
|
logs_directory: PathBuf,
|
||||||
process_substrate: Option<Child>,
|
process_substrate: Option<Child>,
|
||||||
process_proxy: Option<Child>,
|
process_proxy: Option<Child>,
|
||||||
|
wallet: Arc<EthereumWallet>,
|
||||||
nonce_manager: CachedNonceManager,
|
nonce_manager: CachedNonceManager,
|
||||||
|
chain_id_filler: ChainIdFiller,
|
||||||
|
use_kitchensink_not_dev_node: bool,
|
||||||
/// This vector stores [`File`] objects that we use for logging which we want to flush when the
|
/// This vector stores [`File`] objects that we use for logging which we want to flush when the
|
||||||
/// node object is dropped. We do not store them in a structured fashion at the moment (in
|
/// node object is dropped. We do not store them in a structured fashion at the moment (in
|
||||||
/// separate fields) as the logic that we need to apply to them is all the same regardless of
|
/// separate fields) as the logic that we need to apply to them is all the same regardless of
|
||||||
@@ -84,23 +91,34 @@ impl KitchensinkNode {
|
|||||||
const PROXY_STDOUT_LOG_FILE_NAME: &str = "proxy_stdout.log";
|
const PROXY_STDOUT_LOG_FILE_NAME: &str = "proxy_stdout.log";
|
||||||
const PROXY_STDERR_LOG_FILE_NAME: &str = "proxy_stderr.log";
|
const PROXY_STDERR_LOG_FILE_NAME: &str = "proxy_stderr.log";
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
|
||||||
fn init(&mut self, genesis: &str) -> anyhow::Result<&mut Self> {
|
|
||||||
let _ = clear_directory(&self.base_directory);
|
let _ = clear_directory(&self.base_directory);
|
||||||
let _ = clear_directory(&self.logs_directory);
|
let _ = clear_directory(&self.logs_directory);
|
||||||
|
|
||||||
create_dir_all(&self.base_directory)?;
|
create_dir_all(&self.base_directory)
|
||||||
create_dir_all(&self.logs_directory)?;
|
.context("Failed to create base directory for kitchensink node")?;
|
||||||
|
create_dir_all(&self.logs_directory)
|
||||||
|
.context("Failed to create logs directory for kitchensink node")?;
|
||||||
|
|
||||||
let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE);
|
let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE);
|
||||||
|
|
||||||
// Note: we do not pipe the logs of this process to a separate file since this is just a
|
// Note: we do not pipe the logs of this process to a separate file since this is just a
|
||||||
// once-off export of the default chain spec and not part of the long-running node process.
|
// once-off export of the default chain spec and not part of the long-running node process.
|
||||||
let output = Command::new(&self.substrate_binary)
|
let output = if self.use_kitchensink_not_dev_node {
|
||||||
|
Command::new(&self.substrate_binary)
|
||||||
.arg("export-chain-spec")
|
.arg("export-chain-spec")
|
||||||
.arg("--chain")
|
.arg("--chain")
|
||||||
.arg("dev")
|
.arg("dev")
|
||||||
.output()?;
|
.output()
|
||||||
|
.context("Failed to export the chain-spec")?
|
||||||
|
} else {
|
||||||
|
Command::new(&self.dev_node_binary)
|
||||||
|
.arg("build-spec")
|
||||||
|
.arg("--chain")
|
||||||
|
.arg("dev")
|
||||||
|
.output()
|
||||||
|
.context("Failed to export the chain-spec")?
|
||||||
|
};
|
||||||
|
|
||||||
if !output.status.success() {
|
if !output.status.success() {
|
||||||
anyhow::bail!(
|
anyhow::bail!(
|
||||||
@@ -109,8 +127,10 @@ impl KitchensinkNode {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let content = String::from_utf8(output.stdout)?;
|
let content = String::from_utf8(output.stdout)
|
||||||
let mut chainspec_json: JsonValue = serde_json::from_str(&content)?;
|
.context("Failed to decode substrate export-chain-spec output as UTF-8")?;
|
||||||
|
let mut chainspec_json: JsonValue =
|
||||||
|
serde_json::from_str(&content).context("Failed to parse substrate chain spec JSON")?;
|
||||||
|
|
||||||
let existing_chainspec_balances =
|
let existing_chainspec_balances =
|
||||||
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"]
|
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"]
|
||||||
@@ -132,7 +152,6 @@ impl KitchensinkNode {
|
|||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
let mut eth_balances = {
|
let mut eth_balances = {
|
||||||
let mut genesis = serde_json::from_str::<Genesis>(genesis)?;
|
|
||||||
for signer_address in
|
for signer_address in
|
||||||
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
|
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
|
||||||
{
|
{
|
||||||
@@ -143,7 +162,8 @@ impl KitchensinkNode {
|
|||||||
.entry(signer_address)
|
.entry(signer_address)
|
||||||
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
|
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
|
||||||
}
|
}
|
||||||
self.extract_balance_from_genesis_file(&genesis)?
|
self.extract_balance_from_genesis_file(&genesis)
|
||||||
|
.context("Failed to extract balances from EVM genesis JSON")?
|
||||||
};
|
};
|
||||||
merged_balances.append(&mut eth_balances);
|
merged_balances.append(&mut eth_balances);
|
||||||
|
|
||||||
@@ -151,13 +171,14 @@ impl KitchensinkNode {
|
|||||||
json!(merged_balances);
|
json!(merged_balances);
|
||||||
|
|
||||||
serde_json::to_writer_pretty(
|
serde_json::to_writer_pretty(
|
||||||
std::fs::File::create(&template_chainspec_path)?,
|
std::fs::File::create(&template_chainspec_path)
|
||||||
|
.context("Failed to create kitchensink template chainspec file")?,
|
||||||
&chainspec_json,
|
&chainspec_json,
|
||||||
)?;
|
)
|
||||||
|
.context("Failed to write kitchensink template chainspec JSON")?;
|
||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
|
||||||
fn spawn_process(&mut self) -> anyhow::Result<()> {
|
fn spawn_process(&mut self) -> anyhow::Result<()> {
|
||||||
let substrate_rpc_port = Self::BASE_SUBSTRATE_RPC_PORT + self.id as u16;
|
let substrate_rpc_port = Self::BASE_SUBSTRATE_RPC_PORT + self.id as u16;
|
||||||
let proxy_rpc_port = Self::BASE_PROXY_RPC_PORT + self.id as u16;
|
let proxy_rpc_port = Self::BASE_PROXY_RPC_PORT + self.id as u16;
|
||||||
@@ -180,11 +201,18 @@ impl KitchensinkNode {
|
|||||||
// Start Substrate node
|
// Start Substrate node
|
||||||
let kitchensink_stdout_logs_file = open_options
|
let kitchensink_stdout_logs_file = open_options
|
||||||
.clone()
|
.clone()
|
||||||
.open(self.kitchensink_stdout_log_file_path())?;
|
.open(self.kitchensink_stdout_log_file_path())
|
||||||
|
.context("Failed to open kitchensink stdout logs file")?;
|
||||||
let kitchensink_stderr_logs_file = open_options
|
let kitchensink_stderr_logs_file = open_options
|
||||||
.clone()
|
.clone()
|
||||||
.open(self.kitchensink_stderr_log_file_path())?;
|
.open(self.kitchensink_stderr_log_file_path())
|
||||||
self.process_substrate = Command::new(&self.substrate_binary)
|
.context("Failed to open kitchensink stderr logs file")?;
|
||||||
|
let node_binary_path = if self.use_kitchensink_not_dev_node {
|
||||||
|
self.substrate_binary.as_path()
|
||||||
|
} else {
|
||||||
|
self.dev_node_binary.as_path()
|
||||||
|
};
|
||||||
|
self.process_substrate = Command::new(node_binary_path)
|
||||||
.arg("--dev")
|
.arg("--dev")
|
||||||
.arg("--chain")
|
.arg("--chain")
|
||||||
.arg(chainspec_path)
|
.arg(chainspec_path)
|
||||||
@@ -199,49 +227,71 @@ impl KitchensinkNode {
|
|||||||
.arg("Unsafe")
|
.arg("Unsafe")
|
||||||
.arg("--rpc-cors")
|
.arg("--rpc-cors")
|
||||||
.arg("all")
|
.arg("all")
|
||||||
|
.arg("--rpc-max-connections")
|
||||||
|
.arg(u32::MAX.to_string())
|
||||||
.env("RUST_LOG", Self::SUBSTRATE_LOG_ENV)
|
.env("RUST_LOG", Self::SUBSTRATE_LOG_ENV)
|
||||||
.stdout(kitchensink_stdout_logs_file.try_clone()?)
|
.stdout(
|
||||||
.stderr(kitchensink_stderr_logs_file.try_clone()?)
|
kitchensink_stdout_logs_file
|
||||||
.spawn()?
|
.try_clone()
|
||||||
|
.context("Failed to clone kitchensink stdout log file handle")?,
|
||||||
|
)
|
||||||
|
.stderr(
|
||||||
|
kitchensink_stderr_logs_file
|
||||||
|
.try_clone()
|
||||||
|
.context("Failed to clone kitchensink stderr log file handle")?,
|
||||||
|
)
|
||||||
|
.spawn()
|
||||||
|
.context("Failed to spawn substrate node process")?
|
||||||
.into();
|
.into();
|
||||||
|
|
||||||
// Give the node a moment to boot
|
// Give the node a moment to boot
|
||||||
if let Err(error) = Self::wait_ready(
|
if let Err(error) = Self::wait_ready(
|
||||||
self.kitchensink_stderr_log_file_path().as_path(),
|
self.kitchensink_stderr_log_file_path().as_path(),
|
||||||
Self::SUBSTRATE_READY_MARKER,
|
Self::SUBSTRATE_READY_MARKER,
|
||||||
Duration::from_secs(30),
|
Duration::from_secs(60),
|
||||||
) {
|
) {
|
||||||
tracing::error!(
|
self.shutdown()
|
||||||
?error,
|
.context("Failed to gracefully shutdown after substrate start error")?;
|
||||||
"Failed to start substrate, shutting down gracefully"
|
|
||||||
);
|
|
||||||
self.shutdown()?;
|
|
||||||
return Err(error);
|
return Err(error);
|
||||||
};
|
};
|
||||||
|
|
||||||
let eth_proxy_stdout_logs_file = open_options
|
let eth_proxy_stdout_logs_file = open_options
|
||||||
.clone()
|
.clone()
|
||||||
.open(self.proxy_stdout_log_file_path())?;
|
.open(self.proxy_stdout_log_file_path())
|
||||||
let eth_proxy_stderr_logs_file = open_options.open(self.proxy_stderr_log_file_path())?;
|
.context("Failed to open eth-proxy stdout logs file")?;
|
||||||
|
let eth_proxy_stderr_logs_file = open_options
|
||||||
|
.open(self.proxy_stderr_log_file_path())
|
||||||
|
.context("Failed to open eth-proxy stderr logs file")?;
|
||||||
self.process_proxy = Command::new(&self.eth_proxy_binary)
|
self.process_proxy = Command::new(&self.eth_proxy_binary)
|
||||||
.arg("--dev")
|
.arg("--dev")
|
||||||
.arg("--rpc-port")
|
.arg("--rpc-port")
|
||||||
.arg(proxy_rpc_port.to_string())
|
.arg(proxy_rpc_port.to_string())
|
||||||
.arg("--node-rpc-url")
|
.arg("--node-rpc-url")
|
||||||
.arg(format!("ws://127.0.0.1:{substrate_rpc_port}"))
|
.arg(format!("ws://127.0.0.1:{substrate_rpc_port}"))
|
||||||
|
.arg("--rpc-max-connections")
|
||||||
|
.arg(u32::MAX.to_string())
|
||||||
.env("RUST_LOG", Self::PROXY_LOG_ENV)
|
.env("RUST_LOG", Self::PROXY_LOG_ENV)
|
||||||
.stdout(eth_proxy_stdout_logs_file.try_clone()?)
|
.stdout(
|
||||||
.stderr(eth_proxy_stderr_logs_file.try_clone()?)
|
eth_proxy_stdout_logs_file
|
||||||
.spawn()?
|
.try_clone()
|
||||||
|
.context("Failed to clone eth-proxy stdout log file handle")?,
|
||||||
|
)
|
||||||
|
.stderr(
|
||||||
|
eth_proxy_stderr_logs_file
|
||||||
|
.try_clone()
|
||||||
|
.context("Failed to clone eth-proxy stderr log file handle")?,
|
||||||
|
)
|
||||||
|
.spawn()
|
||||||
|
.context("Failed to spawn eth-proxy process")?
|
||||||
.into();
|
.into();
|
||||||
|
|
||||||
if let Err(error) = Self::wait_ready(
|
if let Err(error) = Self::wait_ready(
|
||||||
self.proxy_stderr_log_file_path().as_path(),
|
self.proxy_stderr_log_file_path().as_path(),
|
||||||
Self::ETH_PROXY_READY_MARKER,
|
Self::ETH_PROXY_READY_MARKER,
|
||||||
Duration::from_secs(30),
|
Duration::from_secs(60),
|
||||||
) {
|
) {
|
||||||
tracing::error!(?error, "Failed to start proxy, shutting down gracefully");
|
self.shutdown()
|
||||||
self.shutdown()?;
|
.context("Failed to gracefully shutdown after eth-proxy start error")?;
|
||||||
return Err(error);
|
return Err(error);
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -255,7 +305,6 @@ impl KitchensinkNode {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
|
||||||
fn extract_balance_from_genesis_file(
|
fn extract_balance_from_genesis_file(
|
||||||
&self,
|
&self,
|
||||||
genesis: &Genesis,
|
genesis: &Genesis,
|
||||||
@@ -304,7 +353,6 @@ impl KitchensinkNode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
|
||||||
pub fn eth_rpc_version(&self) -> anyhow::Result<String> {
|
pub fn eth_rpc_version(&self) -> anyhow::Result<String> {
|
||||||
let output = Command::new(&self.eth_proxy_binary)
|
let output = Command::new(&self.eth_proxy_binary)
|
||||||
.arg("--version")
|
.arg("--version")
|
||||||
@@ -317,100 +365,82 @@ impl KitchensinkNode {
|
|||||||
Ok(String::from_utf8_lossy(&output).trim().to_string())
|
Ok(String::from_utf8_lossy(&output).trim().to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)]
|
|
||||||
fn kitchensink_stdout_log_file_path(&self) -> PathBuf {
|
fn kitchensink_stdout_log_file_path(&self) -> PathBuf {
|
||||||
self.logs_directory
|
self.logs_directory
|
||||||
.join(Self::KITCHENSINK_STDOUT_LOG_FILE_NAME)
|
.join(Self::KITCHENSINK_STDOUT_LOG_FILE_NAME)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)]
|
|
||||||
fn kitchensink_stderr_log_file_path(&self) -> PathBuf {
|
fn kitchensink_stderr_log_file_path(&self) -> PathBuf {
|
||||||
self.logs_directory
|
self.logs_directory
|
||||||
.join(Self::KITCHENSINK_STDERR_LOG_FILE_NAME)
|
.join(Self::KITCHENSINK_STDERR_LOG_FILE_NAME)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)]
|
|
||||||
fn proxy_stdout_log_file_path(&self) -> PathBuf {
|
fn proxy_stdout_log_file_path(&self) -> PathBuf {
|
||||||
self.logs_directory.join(Self::PROXY_STDOUT_LOG_FILE_NAME)
|
self.logs_directory.join(Self::PROXY_STDOUT_LOG_FILE_NAME)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)]
|
|
||||||
fn proxy_stderr_log_file_path(&self) -> PathBuf {
|
fn proxy_stderr_log_file_path(&self) -> PathBuf {
|
||||||
self.logs_directory.join(Self::PROXY_STDERR_LOG_FILE_NAME)
|
self.logs_directory.join(Self::PROXY_STDERR_LOG_FILE_NAME)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn provider(
|
async fn provider(
|
||||||
&self,
|
&self,
|
||||||
) -> impl Future<
|
) -> anyhow::Result<
|
||||||
Output = anyhow::Result<
|
|
||||||
FillProvider<
|
FillProvider<
|
||||||
impl TxFiller<KitchenSinkNetwork>,
|
impl TxFiller<KitchenSinkNetwork>,
|
||||||
impl Provider<KitchenSinkNetwork>,
|
impl Provider<KitchenSinkNetwork>,
|
||||||
KitchenSinkNetwork,
|
KitchenSinkNetwork,
|
||||||
>,
|
>,
|
||||||
>,
|
> {
|
||||||
> + 'static {
|
|
||||||
let connection_string = self.connection_string();
|
|
||||||
let wallet = self.wallet.clone();
|
|
||||||
|
|
||||||
// Note: We would like all providers to make use of the same nonce manager so that we have
|
|
||||||
// monotonically increasing nonces that are cached. The cached nonce manager uses Arc's in
|
|
||||||
// its implementation and therefore it means that when we clone it then it still references
|
|
||||||
// the same state.
|
|
||||||
let nonce_manager = self.nonce_manager.clone();
|
|
||||||
|
|
||||||
Box::pin(async move {
|
|
||||||
ProviderBuilder::new()
|
ProviderBuilder::new()
|
||||||
.disable_recommended_fillers()
|
.disable_recommended_fillers()
|
||||||
.network::<KitchenSinkNetwork>()
|
.network::<KitchenSinkNetwork>()
|
||||||
.filler(FallbackGasFiller::new(
|
.filler(FallbackGasFiller::new(
|
||||||
30_000_000,
|
25_000_000,
|
||||||
200_000_000_000,
|
1_000_000_000,
|
||||||
3_000_000_000,
|
1_000_000_000,
|
||||||
))
|
))
|
||||||
.filler(ChainIdFiller::default())
|
.filler(self.chain_id_filler.clone())
|
||||||
.filler(NonceFiller::new(nonce_manager))
|
.filler(NonceFiller::new(self.nonce_manager.clone()))
|
||||||
.wallet(wallet)
|
.wallet(self.wallet.clone())
|
||||||
.connect(&connection_string)
|
.connect(&self.rpc_url)
|
||||||
.await
|
.await
|
||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EthereumNode for KitchensinkNode {
|
impl EthereumNode for KitchensinkNode {
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
|
||||||
async fn execute_transaction(
|
async fn execute_transaction(
|
||||||
&self,
|
&self,
|
||||||
transaction: alloy::rpc::types::TransactionRequest,
|
transaction: alloy::rpc::types::TransactionRequest,
|
||||||
) -> anyhow::Result<TransactionReceipt> {
|
) -> anyhow::Result<TransactionReceipt> {
|
||||||
tracing::debug!(?transaction, "Submitting transaction");
|
|
||||||
let receipt = self
|
let receipt = self
|
||||||
.provider()
|
.provider()
|
||||||
.await?
|
.await
|
||||||
|
.context("Failed to create provider for transaction submission")?
|
||||||
.send_transaction(transaction)
|
.send_transaction(transaction)
|
||||||
.await?
|
.await
|
||||||
|
.context("Failed to submit transaction to kitchensink proxy")?
|
||||||
.get_receipt()
|
.get_receipt()
|
||||||
.await?;
|
.await
|
||||||
tracing::info!(?receipt, "Submitted tx to kitchensink");
|
.context("Failed to fetch transaction receipt from kitchensink proxy")?;
|
||||||
Ok(receipt)
|
Ok(receipt)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
|
||||||
async fn trace_transaction(
|
async fn trace_transaction(
|
||||||
&self,
|
&self,
|
||||||
transaction: &TransactionReceipt,
|
transaction: &TransactionReceipt,
|
||||||
trace_options: GethDebugTracingOptions,
|
trace_options: GethDebugTracingOptions,
|
||||||
) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> {
|
) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> {
|
||||||
let tx_hash = transaction.transaction_hash;
|
let tx_hash = transaction.transaction_hash;
|
||||||
Ok(self
|
self.provider()
|
||||||
.provider()
|
.await
|
||||||
.await?
|
.context("Failed to create provider for debug tracing")?
|
||||||
.debug_trace_transaction(tx_hash, trace_options)
|
.debug_trace_transaction(tx_hash, trace_options)
|
||||||
.await?)
|
.await
|
||||||
|
.context("Failed to obtain debug trace from kitchensink proxy")
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
|
||||||
async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result<DiffMode> {
|
async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result<DiffMode> {
|
||||||
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
|
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
|
||||||
diff_mode: Some(true),
|
diff_mode: Some(true),
|
||||||
@@ -426,72 +456,126 @@ impl EthereumNode for KitchensinkNode {
|
|||||||
_ => anyhow::bail!("expected a diff mode trace"),
|
_ => anyhow::bail!("expected a diff mode trace"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn balance_of(&self, address: Address) -> anyhow::Result<U256> {
|
||||||
|
self.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to get the Kitchensink provider")?
|
||||||
|
.get_balance(address)
|
||||||
|
.await
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn latest_state_proof(
|
||||||
|
&self,
|
||||||
|
address: Address,
|
||||||
|
keys: Vec<StorageKey>,
|
||||||
|
) -> anyhow::Result<EIP1186AccountProofResponse> {
|
||||||
|
self.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to get the Kitchensink provider")?
|
||||||
|
.get_proof(address, keys)
|
||||||
|
.latest()
|
||||||
|
.await
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ResolverApi for KitchensinkNode {
|
impl ResolverApi for KitchensinkNode {
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
|
||||||
async fn chain_id(&self) -> anyhow::Result<alloy::primitives::ChainId> {
|
async fn chain_id(&self) -> anyhow::Result<alloy::primitives::ChainId> {
|
||||||
self.provider()
|
self.provider()
|
||||||
.await?
|
.await
|
||||||
|
.context("Failed to get the Kitchensink provider")?
|
||||||
.get_chain_id()
|
.get_chain_id()
|
||||||
.await
|
.await
|
||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result<u128> {
|
||||||
|
self.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to get the Kitchensink provider")?
|
||||||
|
.get_transaction_receipt(*tx_hash)
|
||||||
|
.await?
|
||||||
|
.context("Failed to get the transaction receipt")
|
||||||
|
.map(|receipt| receipt.effective_gas_price)
|
||||||
|
}
|
||||||
|
|
||||||
async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result<u128> {
|
async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result<u128> {
|
||||||
self.provider()
|
self.provider()
|
||||||
.await?
|
.await
|
||||||
|
.context("Failed to get the Kitchensink provider")?
|
||||||
.get_block_by_number(number)
|
.get_block_by_number(number)
|
||||||
.await?
|
.await
|
||||||
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
|
.context("Failed to get the kitchensink block")?
|
||||||
|
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
|
||||||
.map(|block| block.header.gas_limit as _)
|
.map(|block| block.header.gas_limit as _)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
|
||||||
async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result<Address> {
|
async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result<Address> {
|
||||||
self.provider()
|
self.provider()
|
||||||
.await?
|
.await
|
||||||
|
.context("Failed to get the Kitchensink provider")?
|
||||||
.get_block_by_number(number)
|
.get_block_by_number(number)
|
||||||
.await?
|
.await
|
||||||
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
|
.context("Failed to get the kitchensink block")?
|
||||||
|
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
|
||||||
.map(|block| block.header.beneficiary)
|
.map(|block| block.header.beneficiary)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
|
||||||
async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result<U256> {
|
async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result<U256> {
|
||||||
self.provider()
|
self.provider()
|
||||||
.await?
|
.await
|
||||||
|
.context("Failed to get the Kitchensink provider")?
|
||||||
.get_block_by_number(number)
|
.get_block_by_number(number)
|
||||||
.await?
|
.await
|
||||||
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
|
.context("Failed to get the kitchensink block")?
|
||||||
.map(|block| block.header.difficulty)
|
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
|
||||||
|
.map(|block| U256::from_be_bytes(block.header.mix_hash.0))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result<u64> {
|
||||||
|
self.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to get the Kitchensink provider")?
|
||||||
|
.get_block_by_number(number)
|
||||||
|
.await
|
||||||
|
.context("Failed to get the kitchensink block")?
|
||||||
|
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
|
||||||
|
.and_then(|block| {
|
||||||
|
block
|
||||||
|
.header
|
||||||
|
.base_fee_per_gas
|
||||||
|
.context("Failed to get the base fee per gas")
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
|
||||||
async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockHash> {
|
async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockHash> {
|
||||||
self.provider()
|
self.provider()
|
||||||
.await?
|
.await
|
||||||
|
.context("Failed to get the Kitchensink provider")?
|
||||||
.get_block_by_number(number)
|
.get_block_by_number(number)
|
||||||
.await?
|
.await
|
||||||
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
|
.context("Failed to get the kitchensink block")?
|
||||||
|
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
|
||||||
.map(|block| block.header.hash)
|
.map(|block| block.header.hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
|
||||||
async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> {
|
async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> {
|
||||||
self.provider()
|
self.provider()
|
||||||
.await?
|
.await
|
||||||
|
.context("Failed to get the Kitchensink provider")?
|
||||||
.get_block_by_number(number)
|
.get_block_by_number(number)
|
||||||
.await?
|
.await
|
||||||
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
|
.context("Failed to get the kitchensink block")?
|
||||||
|
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
|
||||||
.map(|block| block.header.timestamp)
|
.map(|block| block.header.timestamp)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
|
||||||
async fn last_block_number(&self) -> anyhow::Result<BlockNumber> {
|
async fn last_block_number(&self) -> anyhow::Result<BlockNumber> {
|
||||||
self.provider()
|
self.provider()
|
||||||
.await?
|
.await
|
||||||
|
.context("Failed to get the Kitchensink provider")?
|
||||||
.get_block_number()
|
.get_block_number()
|
||||||
.await
|
.await
|
||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
@@ -499,44 +583,61 @@ impl ResolverApi for KitchensinkNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Node for KitchensinkNode {
|
impl Node for KitchensinkNode {
|
||||||
fn new(config: &Arguments) -> Self {
|
fn new(
|
||||||
let kitchensink_directory = config.directory().join(Self::BASE_DIRECTORY);
|
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||||
|
+ AsRef<ConcurrencyConfiguration>
|
||||||
|
+ AsRef<GenesisConfiguration>
|
||||||
|
+ AsRef<WalletConfiguration>
|
||||||
|
+ AsRef<GethConfiguration>
|
||||||
|
+ AsRef<KitchensinkConfiguration>
|
||||||
|
+ AsRef<ReviveDevNodeConfiguration>
|
||||||
|
+ AsRef<EthRpcConfiguration>
|
||||||
|
+ Clone,
|
||||||
|
) -> Self {
|
||||||
|
let kitchensink_configuration = AsRef::<KitchensinkConfiguration>::as_ref(&context);
|
||||||
|
let dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
|
||||||
|
let eth_rpc_configuration = AsRef::<EthRpcConfiguration>::as_ref(&context);
|
||||||
|
let working_directory_configuration =
|
||||||
|
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
||||||
|
let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context);
|
||||||
|
|
||||||
|
let kitchensink_directory = working_directory_configuration
|
||||||
|
.as_path()
|
||||||
|
.join(Self::BASE_DIRECTORY);
|
||||||
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
|
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||||
let base_directory = kitchensink_directory.join(id.to_string());
|
let base_directory = kitchensink_directory.join(id.to_string());
|
||||||
let logs_directory = base_directory.join(Self::LOGS_DIRECTORY);
|
let logs_directory = base_directory.join(Self::LOGS_DIRECTORY);
|
||||||
|
|
||||||
let mut wallet = config.wallet();
|
let wallet = wallet_configuration.wallet();
|
||||||
for signer in (1..=config.private_keys_to_add)
|
|
||||||
.map(|id| U256::from(id))
|
|
||||||
.map(|id| id.to_be_bytes::<32>())
|
|
||||||
.map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap())
|
|
||||||
{
|
|
||||||
wallet.register_signer(signer);
|
|
||||||
}
|
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
id,
|
id,
|
||||||
substrate_binary: config.kitchensink.clone(),
|
substrate_binary: kitchensink_configuration.path.clone(),
|
||||||
eth_proxy_binary: config.eth_proxy.clone(),
|
dev_node_binary: dev_node_configuration.path.clone(),
|
||||||
|
eth_proxy_binary: eth_rpc_configuration.path.clone(),
|
||||||
rpc_url: String::new(),
|
rpc_url: String::new(),
|
||||||
wallet,
|
|
||||||
base_directory,
|
base_directory,
|
||||||
logs_directory,
|
logs_directory,
|
||||||
process_substrate: None,
|
process_substrate: None,
|
||||||
process_proxy: None,
|
process_proxy: None,
|
||||||
|
wallet: wallet.clone(),
|
||||||
|
chain_id_filler: Default::default(),
|
||||||
nonce_manager: Default::default(),
|
nonce_manager: Default::default(),
|
||||||
|
use_kitchensink_not_dev_node: kitchensink_configuration.use_kitchensink,
|
||||||
// We know that we only need to be storing 4 files so we can specify that when creating
|
// We know that we only need to be storing 4 files so we can specify that when creating
|
||||||
// the vector. It's the stdout and stderr of the substrate-node and the eth-rpc.
|
// the vector. It's the stdout and stderr of the substrate-node and the eth-rpc.
|
||||||
logs_file_to_flush: Vec::with_capacity(4),
|
logs_file_to_flush: Vec::with_capacity(4),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
fn id(&self) -> usize {
|
||||||
|
self.id as _
|
||||||
|
}
|
||||||
|
|
||||||
fn connection_string(&self) -> String {
|
fn connection_string(&self) -> String {
|
||||||
self.rpc_url.clone()
|
self.rpc_url.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
|
||||||
fn shutdown(&mut self) -> anyhow::Result<()> {
|
fn shutdown(&mut self) -> anyhow::Result<()> {
|
||||||
// Terminate the processes in a graceful manner to allow for the output to be flushed.
|
// Terminate the processes in a graceful manner to allow for the output to be flushed.
|
||||||
if let Some(mut child) = self.process_proxy.take() {
|
if let Some(mut child) = self.process_proxy.take() {
|
||||||
@@ -563,35 +664,37 @@ impl Node for KitchensinkNode {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
|
||||||
fn spawn(&mut self, genesis: String) -> anyhow::Result<()> {
|
self.init(genesis)?.spawn_process()
|
||||||
self.init(&genesis)?.spawn_process()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
|
||||||
fn version(&self) -> anyhow::Result<String> {
|
fn version(&self) -> anyhow::Result<String> {
|
||||||
let output = Command::new(&self.substrate_binary)
|
let output = Command::new(&self.substrate_binary)
|
||||||
.arg("--version")
|
.arg("--version")
|
||||||
.stdin(Stdio::null())
|
.stdin(Stdio::null())
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
.stderr(Stdio::null())
|
.stderr(Stdio::null())
|
||||||
.spawn()?
|
.spawn()
|
||||||
.wait_with_output()?
|
.context("Failed to spawn kitchensink --version")?
|
||||||
|
.wait_with_output()
|
||||||
|
.context("Failed to wait for kitchensink --version")?
|
||||||
.stdout;
|
.stdout;
|
||||||
Ok(String::from_utf8_lossy(&output).into())
|
Ok(String::from_utf8_lossy(&output).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
fn matches_target(targets: Option<&[String]>) -> bool {
|
||||||
fn matches_target(&self, targets: Option<&[String]>) -> bool {
|
|
||||||
match targets {
|
match targets {
|
||||||
None => true,
|
None => true,
|
||||||
Some(targets) => targets.iter().any(|str| str.as_str() == "pvm"),
|
Some(targets) => targets.iter().any(|str| str.as_str() == "pvm"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn evm_version() -> EVMVersion {
|
||||||
|
EVMVersion::Cancun
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for KitchensinkNode {
|
impl Drop for KitchensinkNode {
|
||||||
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
|
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
self.shutdown().expect("Failed to shutdown")
|
self.shutdown().expect("Failed to shutdown")
|
||||||
}
|
}
|
||||||
@@ -1027,24 +1130,20 @@ impl BlockHeader for KitchenSinkHeader {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use alloy::rpc::types::TransactionRequest;
|
use alloy::rpc::types::TransactionRequest;
|
||||||
use revive_dt_config::Arguments;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::sync::{LazyLock, Mutex};
|
use std::sync::{LazyLock, Mutex};
|
||||||
|
|
||||||
use std::fs;
|
use std::fs;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::{GENESIS_JSON, Node};
|
use crate::Node;
|
||||||
|
|
||||||
fn test_config() -> Arguments {
|
fn test_config() -> ExecutionContext {
|
||||||
Arguments {
|
let mut context = ExecutionContext::default();
|
||||||
kitchensink: PathBuf::from("substrate-node"),
|
context.kitchensink_configuration.use_kitchensink = true;
|
||||||
eth_proxy: PathBuf::from("eth-rpc"),
|
context
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_node() -> (KitchensinkNode, Arguments) {
|
fn new_node() -> (ExecutionContext, KitchensinkNode) {
|
||||||
// Note: When we run the tests in the CI we found that if they're all
|
// Note: When we run the tests in the CI we found that if they're all
|
||||||
// run in parallel then the CI is unable to start all of the nodes in
|
// run in parallel then the CI is unable to start all of the nodes in
|
||||||
// time and their start up times-out. Therefore, we want all of the
|
// time and their start up times-out. Therefore, we want all of the
|
||||||
@@ -1063,32 +1162,36 @@ mod tests {
|
|||||||
static NODE_START_MUTEX: Mutex<()> = Mutex::new(());
|
static NODE_START_MUTEX: Mutex<()> = Mutex::new(());
|
||||||
let _guard = NODE_START_MUTEX.lock().unwrap();
|
let _guard = NODE_START_MUTEX.lock().unwrap();
|
||||||
|
|
||||||
let args = test_config();
|
let context = test_config();
|
||||||
let mut node = KitchensinkNode::new(&args);
|
let mut node = KitchensinkNode::new(&context);
|
||||||
node.init(GENESIS_JSON)
|
node.init(context.genesis_configuration.genesis().unwrap().clone())
|
||||||
.expect("Failed to initialize the node")
|
.expect("Failed to initialize the node")
|
||||||
.spawn_process()
|
.spawn_process()
|
||||||
.expect("Failed to spawn the node process");
|
.expect("Failed to spawn the node process");
|
||||||
(node, args)
|
(context, node)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A shared node that multiple tests can use. It starts up once.
|
/// A shared node that multiple tests can use. It starts up once.
|
||||||
fn shared_node() -> &'static KitchensinkNode {
|
fn shared_node() -> &'static KitchensinkNode {
|
||||||
static NODE: LazyLock<(KitchensinkNode, Arguments)> = LazyLock::new(|| {
|
static NODE: LazyLock<(ExecutionContext, KitchensinkNode)> = LazyLock::new(|| {
|
||||||
let (node, args) = new_node();
|
let (context, node) = new_node();
|
||||||
(node, args)
|
(context, node)
|
||||||
});
|
});
|
||||||
&NODE.0
|
&NODE.1
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
|
async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let (node, args) = new_node();
|
let (context, node) = new_node();
|
||||||
|
|
||||||
let provider = node.provider().await.expect("Failed to create provider");
|
let provider = node.provider().await.expect("Failed to create provider");
|
||||||
|
|
||||||
let account_address = args.wallet().default_signer().address();
|
let account_address = context
|
||||||
|
.wallet_configuration
|
||||||
|
.wallet()
|
||||||
|
.default_signer()
|
||||||
|
.address();
|
||||||
let transaction = TransactionRequest::default()
|
let transaction = TransactionRequest::default()
|
||||||
.to(account_address)
|
.to(account_address)
|
||||||
.value(U256::from(100_000_000_000_000u128));
|
.value(U256::from(100_000_000_000_000u128));
|
||||||
@@ -1122,7 +1225,9 @@ mod tests {
|
|||||||
let mut dummy_node = KitchensinkNode::new(&test_config());
|
let mut dummy_node = KitchensinkNode::new(&test_config());
|
||||||
|
|
||||||
// Call `init()`
|
// Call `init()`
|
||||||
dummy_node.init(genesis_content).expect("init failed");
|
dummy_node
|
||||||
|
.init(serde_json::from_str(genesis_content).unwrap())
|
||||||
|
.expect("init failed");
|
||||||
|
|
||||||
// Check that the patched chainspec file was generated
|
// Check that the patched chainspec file was generated
|
||||||
let final_chainspec_path = dummy_node
|
let final_chainspec_path = dummy_node
|
||||||
@@ -1232,20 +1337,10 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn spawn_works() {
|
|
||||||
let config = test_config();
|
|
||||||
|
|
||||||
let mut node = KitchensinkNode::new(&config);
|
|
||||||
|
|
||||||
node.spawn(GENESIS_JSON.to_string()).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn version_works() {
|
fn version_works() {
|
||||||
let config = test_config();
|
let node = shared_node();
|
||||||
|
|
||||||
let node = KitchensinkNode::new(&config);
|
|
||||||
let version = node.version().unwrap();
|
let version = node.version().unwrap();
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
@@ -1256,9 +1351,8 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn eth_rpc_version_works() {
|
fn eth_rpc_version_works() {
|
||||||
let config = test_config();
|
let node = shared_node();
|
||||||
|
|
||||||
let node = KitchensinkNode::new(&config);
|
|
||||||
let version = node.eth_rpc_version().unwrap();
|
let version = node.eth_rpc_version().unwrap();
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
|
|||||||
+22
-7
@@ -1,6 +1,8 @@
|
|||||||
//! This crate implements the testing nodes.
|
//! This crate implements the testing nodes.
|
||||||
|
|
||||||
use revive_dt_config::Arguments;
|
use alloy::genesis::Genesis;
|
||||||
|
use revive_common::EVMVersion;
|
||||||
|
use revive_dt_config::*;
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
|
|
||||||
pub mod common;
|
pub mod common;
|
||||||
@@ -9,18 +11,28 @@ pub mod geth;
|
|||||||
pub mod kitchensink;
|
pub mod kitchensink;
|
||||||
pub mod pool;
|
pub mod pool;
|
||||||
|
|
||||||
/// The default genesis configuration.
|
|
||||||
pub const GENESIS_JSON: &str = include_str!("../../../genesis.json");
|
|
||||||
|
|
||||||
/// An abstract interface for testing nodes.
|
/// An abstract interface for testing nodes.
|
||||||
pub trait Node: EthereumNode {
|
pub trait Node: EthereumNode {
|
||||||
/// Create a new uninitialized instance.
|
/// Create a new uninitialized instance.
|
||||||
fn new(config: &Arguments) -> Self;
|
fn new(
|
||||||
|
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||||
|
+ AsRef<ConcurrencyConfiguration>
|
||||||
|
+ AsRef<GenesisConfiguration>
|
||||||
|
+ AsRef<WalletConfiguration>
|
||||||
|
+ AsRef<GethConfiguration>
|
||||||
|
+ AsRef<KitchensinkConfiguration>
|
||||||
|
+ AsRef<ReviveDevNodeConfiguration>
|
||||||
|
+ AsRef<EthRpcConfiguration>
|
||||||
|
+ Clone,
|
||||||
|
) -> Self;
|
||||||
|
|
||||||
|
/// Returns the identifier of the node.
|
||||||
|
fn id(&self) -> usize;
|
||||||
|
|
||||||
/// Spawns a node configured according to the genesis json.
|
/// Spawns a node configured according to the genesis json.
|
||||||
///
|
///
|
||||||
/// Blocking until it's ready to accept transactions.
|
/// Blocking until it's ready to accept transactions.
|
||||||
fn spawn(&mut self, genesis: String) -> anyhow::Result<()>;
|
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()>;
|
||||||
|
|
||||||
/// Prune the node instance and related data.
|
/// Prune the node instance and related data.
|
||||||
///
|
///
|
||||||
@@ -35,5 +47,8 @@ pub trait Node: EthereumNode {
|
|||||||
|
|
||||||
/// Given a list of targets from the metadata file, this function determines if the metadata
|
/// Given a list of targets from the metadata file, this function determines if the metadata
|
||||||
/// file can be ran on this node or not.
|
/// file can be ran on this node or not.
|
||||||
fn matches_target(&self, targets: Option<&[String]>) -> bool;
|
fn matches_target(targets: Option<&[String]>) -> bool;
|
||||||
|
|
||||||
|
/// Returns the EVM version of the node.
|
||||||
|
fn evm_version() -> EVMVersion;
|
||||||
}
|
}
|
||||||
|
|||||||
+59
-17
@@ -1,13 +1,18 @@
|
|||||||
//! This crate implements concurrent handling of testing node.
|
//! This crate implements concurrent handling of testing node.
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
fs::read_to_string,
|
|
||||||
sync::atomic::{AtomicUsize, Ordering},
|
sync::atomic::{AtomicUsize, Ordering},
|
||||||
thread,
|
thread,
|
||||||
};
|
};
|
||||||
|
|
||||||
use anyhow::Context;
|
use alloy::genesis::Genesis;
|
||||||
use revive_dt_config::Arguments;
|
use anyhow::Context as _;
|
||||||
|
use revive_dt_config::{
|
||||||
|
ConcurrencyConfiguration, EthRpcConfiguration, GenesisConfiguration, GethConfiguration,
|
||||||
|
KitchensinkConfiguration, ReviveDevNodeConfiguration, WalletConfiguration,
|
||||||
|
WorkingDirectoryConfiguration,
|
||||||
|
};
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
use crate::Node;
|
use crate::Node;
|
||||||
|
|
||||||
@@ -23,18 +28,31 @@ where
|
|||||||
T: Node + Send + 'static,
|
T: Node + Send + 'static,
|
||||||
{
|
{
|
||||||
/// Create a new Pool. This will start as many nodes as there are workers in `config`.
|
/// Create a new Pool. This will start as many nodes as there are workers in `config`.
|
||||||
pub fn new(config: &Arguments) -> anyhow::Result<Self> {
|
pub fn new(
|
||||||
let nodes = config.number_of_nodes;
|
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||||
let genesis = read_to_string(&config.genesis_file).context(format!(
|
+ AsRef<ConcurrencyConfiguration>
|
||||||
"can not read genesis file: {}",
|
+ AsRef<GenesisConfiguration>
|
||||||
config.genesis_file.display()
|
+ AsRef<WalletConfiguration>
|
||||||
))?;
|
+ AsRef<GethConfiguration>
|
||||||
|
+ AsRef<KitchensinkConfiguration>
|
||||||
|
+ AsRef<ReviveDevNodeConfiguration>
|
||||||
|
+ AsRef<EthRpcConfiguration>
|
||||||
|
+ Send
|
||||||
|
+ Sync
|
||||||
|
+ Clone
|
||||||
|
+ 'static,
|
||||||
|
) -> anyhow::Result<Self> {
|
||||||
|
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
|
||||||
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
|
|
||||||
|
let nodes = concurrency_configuration.number_of_nodes;
|
||||||
|
let genesis = genesis_configuration.genesis()?;
|
||||||
|
|
||||||
let mut handles = Vec::with_capacity(nodes);
|
let mut handles = Vec::with_capacity(nodes);
|
||||||
for _ in 0..nodes {
|
for _ in 0..nodes {
|
||||||
let config = config.clone();
|
let context = context.clone();
|
||||||
let genesis = genesis.clone();
|
let genesis = genesis.clone();
|
||||||
handles.push(thread::spawn(move || spawn_node::<T>(&config, genesis)));
|
handles.push(thread::spawn(move || spawn_node::<T>(context, genesis)));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut nodes = Vec::with_capacity(nodes);
|
let mut nodes = Vec::with_capacity(nodes);
|
||||||
@@ -42,8 +60,10 @@ where
|
|||||||
nodes.push(
|
nodes.push(
|
||||||
handle
|
handle
|
||||||
.join()
|
.join()
|
||||||
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))?
|
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))
|
||||||
.map_err(|error| anyhow::anyhow!("node failed to spawn: {error}"))?,
|
.context("Failed to join node spawn thread")?
|
||||||
|
.map_err(|error| anyhow::anyhow!("node failed to spawn: {error}"))
|
||||||
|
.context("Node failed to spawn")?,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -60,9 +80,31 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn spawn_node<T: Node + Send>(args: &Arguments, genesis: String) -> anyhow::Result<T> {
|
fn spawn_node<T: Node + Send>(
|
||||||
let mut node = T::new(args);
|
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||||
tracing::info!("starting node: {}", node.connection_string());
|
+ AsRef<ConcurrencyConfiguration>
|
||||||
node.spawn(genesis)?;
|
+ AsRef<GenesisConfiguration>
|
||||||
|
+ AsRef<WalletConfiguration>
|
||||||
|
+ AsRef<GethConfiguration>
|
||||||
|
+ AsRef<KitchensinkConfiguration>
|
||||||
|
+ AsRef<ReviveDevNodeConfiguration>
|
||||||
|
+ AsRef<EthRpcConfiguration>
|
||||||
|
+ Clone
|
||||||
|
+ 'static,
|
||||||
|
genesis: Genesis,
|
||||||
|
) -> anyhow::Result<T> {
|
||||||
|
let mut node = T::new(context);
|
||||||
|
info!(
|
||||||
|
id = node.id(),
|
||||||
|
connection_string = node.connection_string(),
|
||||||
|
"Spawning node"
|
||||||
|
);
|
||||||
|
node.spawn(genesis)
|
||||||
|
.context("Failed to spawn node process")?;
|
||||||
|
info!(
|
||||||
|
id = node.id(),
|
||||||
|
connection_string = node.connection_string(),
|
||||||
|
"Spawned node"
|
||||||
|
);
|
||||||
Ok(node)
|
Ok(node)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,11 +8,21 @@ repository.workspace = true
|
|||||||
rust-version.workspace = true
|
rust-version.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
revive-dt-common = { workspace = true }
|
||||||
revive-dt-config = { workspace = true }
|
revive-dt-config = { workspace = true }
|
||||||
revive-dt-format = { workspace = true }
|
revive-dt-format = { workspace = true }
|
||||||
revive-dt-compiler = { workspace = true }
|
revive-dt-compiler = { workspace = true }
|
||||||
|
|
||||||
|
alloy-primitives = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
tracing = { workspace = true }
|
paste = { workspace = true }
|
||||||
|
indexmap = { workspace = true, features = ["serde"] }
|
||||||
|
semver = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
|
serde_with = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
tracing = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|||||||
@@ -0,0 +1,560 @@
|
|||||||
|
//! Implementation of the report aggregator task which consumes the events sent by the various
|
||||||
|
//! reporters and combines them into a single unified report.
|
||||||
|
|
||||||
|
use std::{
|
||||||
|
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
|
||||||
|
fs::OpenOptions,
|
||||||
|
path::PathBuf,
|
||||||
|
time::{SystemTime, UNIX_EPOCH},
|
||||||
|
};
|
||||||
|
|
||||||
|
use alloy_primitives::Address;
|
||||||
|
use anyhow::{Context as _, Result};
|
||||||
|
use indexmap::IndexMap;
|
||||||
|
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
|
||||||
|
use revive_dt_config::{Context, TestingPlatform};
|
||||||
|
use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance};
|
||||||
|
use semver::Version;
|
||||||
|
use serde::Serialize;
|
||||||
|
use serde_with::{DisplayFromStr, serde_as};
|
||||||
|
use tokio::sync::{
|
||||||
|
broadcast::{Sender, channel},
|
||||||
|
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
|
||||||
|
};
|
||||||
|
use tracing::debug;
|
||||||
|
|
||||||
|
use crate::*;
|
||||||
|
|
||||||
|
pub struct ReportAggregator {
|
||||||
|
/* Internal Report State */
|
||||||
|
report: Report,
|
||||||
|
remaining_cases: HashMap<MetadataFilePath, HashMap<Mode, HashSet<CaseIdx>>>,
|
||||||
|
/* Channels */
|
||||||
|
runner_tx: Option<UnboundedSender<RunnerEvent>>,
|
||||||
|
runner_rx: UnboundedReceiver<RunnerEvent>,
|
||||||
|
listener_tx: Sender<ReporterEvent>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReportAggregator {
|
||||||
|
pub fn new(context: Context) -> Self {
|
||||||
|
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
|
||||||
|
let (listener_tx, _) = channel::<ReporterEvent>(1024);
|
||||||
|
Self {
|
||||||
|
report: Report::new(context),
|
||||||
|
remaining_cases: Default::default(),
|
||||||
|
runner_tx: Some(runner_tx),
|
||||||
|
runner_rx,
|
||||||
|
listener_tx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<()>>) {
|
||||||
|
let reporter = self
|
||||||
|
.runner_tx
|
||||||
|
.take()
|
||||||
|
.map(Into::into)
|
||||||
|
.expect("Can't fail since this can only be called once");
|
||||||
|
(reporter, async move { self.aggregate().await })
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn aggregate(mut self) -> Result<()> {
|
||||||
|
debug!("Starting to aggregate report");
|
||||||
|
|
||||||
|
while let Some(event) = self.runner_rx.recv().await {
|
||||||
|
debug!(?event, "Received Event");
|
||||||
|
match event {
|
||||||
|
RunnerEvent::SubscribeToEvents(event) => {
|
||||||
|
self.handle_subscribe_to_events_event(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::CorpusFileDiscovery(event) => {
|
||||||
|
self.handle_corpus_file_discovered_event(*event)
|
||||||
|
}
|
||||||
|
RunnerEvent::MetadataFileDiscovery(event) => {
|
||||||
|
self.handle_metadata_file_discovery_event(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::TestCaseDiscovery(event) => {
|
||||||
|
self.handle_test_case_discovery(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::TestSucceeded(event) => {
|
||||||
|
self.handle_test_succeeded_event(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::TestFailed(event) => {
|
||||||
|
self.handle_test_failed_event(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::TestIgnored(event) => {
|
||||||
|
self.handle_test_ignored_event(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::LeaderNodeAssigned(event) => {
|
||||||
|
self.handle_leader_node_assigned_event(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::FollowerNodeAssigned(event) => {
|
||||||
|
self.handle_follower_node_assigned_event(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::PreLinkContractsCompilationSucceeded(event) => {
|
||||||
|
self.handle_pre_link_contracts_compilation_succeeded_event(*event)
|
||||||
|
}
|
||||||
|
RunnerEvent::PostLinkContractsCompilationSucceeded(event) => {
|
||||||
|
self.handle_post_link_contracts_compilation_succeeded_event(*event)
|
||||||
|
}
|
||||||
|
RunnerEvent::PreLinkContractsCompilationFailed(event) => {
|
||||||
|
self.handle_pre_link_contracts_compilation_failed_event(*event)
|
||||||
|
}
|
||||||
|
RunnerEvent::PostLinkContractsCompilationFailed(event) => {
|
||||||
|
self.handle_post_link_contracts_compilation_failed_event(*event)
|
||||||
|
}
|
||||||
|
RunnerEvent::LibrariesDeployed(event) => {
|
||||||
|
self.handle_libraries_deployed_event(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::ContractDeployed(event) => {
|
||||||
|
self.handle_contract_deployed_event(*event);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
debug!("Report aggregation completed");
|
||||||
|
|
||||||
|
let file_name = {
|
||||||
|
let current_timestamp = SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.context("System clock is before UNIX_EPOCH; cannot compute report timestamp")?
|
||||||
|
.as_secs();
|
||||||
|
let mut file_name = current_timestamp.to_string();
|
||||||
|
file_name.push_str(".json");
|
||||||
|
file_name
|
||||||
|
};
|
||||||
|
let file_path = self
|
||||||
|
.report
|
||||||
|
.context
|
||||||
|
.working_directory_configuration()
|
||||||
|
.as_path()
|
||||||
|
.join(file_name);
|
||||||
|
let file = OpenOptions::new()
|
||||||
|
.create(true)
|
||||||
|
.write(true)
|
||||||
|
.truncate(true)
|
||||||
|
.read(false)
|
||||||
|
.open(&file_path)
|
||||||
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to open report file for writing: {}",
|
||||||
|
file_path.display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
serde_json::to_writer_pretty(&file, &self.report).with_context(|| {
|
||||||
|
format!("Failed to serialize report JSON to {}", file_path.display())
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) {
|
||||||
|
let _ = event.tx.send(self.listener_tx.subscribe());
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_corpus_file_discovered_event(&mut self, event: CorpusFileDiscoveryEvent) {
|
||||||
|
self.report.corpora.push(event.corpus);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_metadata_file_discovery_event(&mut self, event: MetadataFileDiscoveryEvent) {
|
||||||
|
self.report.metadata_files.insert(event.path.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_test_case_discovery(&mut self, event: TestCaseDiscoveryEvent) {
|
||||||
|
self.remaining_cases
|
||||||
|
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||||
|
.or_default()
|
||||||
|
.entry(event.test_specifier.solc_mode.clone())
|
||||||
|
.or_default()
|
||||||
|
.insert(event.test_specifier.case_idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_test_succeeded_event(&mut self, event: TestSucceededEvent) {
|
||||||
|
// Remove this from the set of cases we're tracking since it has completed.
|
||||||
|
self.remaining_cases
|
||||||
|
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||||
|
.or_default()
|
||||||
|
.entry(event.test_specifier.solc_mode.clone())
|
||||||
|
.or_default()
|
||||||
|
.remove(&event.test_specifier.case_idx);
|
||||||
|
|
||||||
|
// Add information on the fact that the case was ignored to the report.
|
||||||
|
let test_case_report = self.test_case_report(&event.test_specifier);
|
||||||
|
test_case_report.status = Some(TestCaseStatus::Succeeded {
|
||||||
|
steps_executed: event.steps_executed,
|
||||||
|
});
|
||||||
|
self.handle_post_test_case_status_update(&event.test_specifier);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_test_failed_event(&mut self, event: TestFailedEvent) {
|
||||||
|
// Remove this from the set of cases we're tracking since it has completed.
|
||||||
|
self.remaining_cases
|
||||||
|
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||||
|
.or_default()
|
||||||
|
.entry(event.test_specifier.solc_mode.clone())
|
||||||
|
.or_default()
|
||||||
|
.remove(&event.test_specifier.case_idx);
|
||||||
|
|
||||||
|
// Add information on the fact that the case was ignored to the report.
|
||||||
|
let test_case_report = self.test_case_report(&event.test_specifier);
|
||||||
|
test_case_report.status = Some(TestCaseStatus::Failed {
|
||||||
|
reason: event.reason,
|
||||||
|
});
|
||||||
|
self.handle_post_test_case_status_update(&event.test_specifier);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_test_ignored_event(&mut self, event: TestIgnoredEvent) {
|
||||||
|
// Remove this from the set of cases we're tracking since it has completed.
|
||||||
|
self.remaining_cases
|
||||||
|
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||||
|
.or_default()
|
||||||
|
.entry(event.test_specifier.solc_mode.clone())
|
||||||
|
.or_default()
|
||||||
|
.remove(&event.test_specifier.case_idx);
|
||||||
|
|
||||||
|
// Add information on the fact that the case was ignored to the report.
|
||||||
|
let test_case_report = self.test_case_report(&event.test_specifier);
|
||||||
|
test_case_report.status = Some(TestCaseStatus::Ignored {
|
||||||
|
reason: event.reason,
|
||||||
|
additional_fields: event.additional_fields,
|
||||||
|
});
|
||||||
|
self.handle_post_test_case_status_update(&event.test_specifier);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_post_test_case_status_update(&mut self, specifier: &TestSpecifier) {
|
||||||
|
let remaining_cases = self
|
||||||
|
.remaining_cases
|
||||||
|
.entry(specifier.metadata_file_path.clone().into())
|
||||||
|
.or_default()
|
||||||
|
.entry(specifier.solc_mode.clone())
|
||||||
|
.or_default();
|
||||||
|
if !remaining_cases.is_empty() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let case_status = self
|
||||||
|
.report
|
||||||
|
.test_case_information
|
||||||
|
.entry(specifier.metadata_file_path.clone().into())
|
||||||
|
.or_default()
|
||||||
|
.entry(specifier.solc_mode.clone())
|
||||||
|
.or_default()
|
||||||
|
.iter()
|
||||||
|
.map(|(case_idx, case_report)| {
|
||||||
|
(
|
||||||
|
*case_idx,
|
||||||
|
case_report.status.clone().expect("Can't be uninitialized"),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect::<BTreeMap<_, _>>();
|
||||||
|
let event = ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
||||||
|
metadata_file_path: specifier.metadata_file_path.clone().into(),
|
||||||
|
mode: specifier.solc_mode.clone(),
|
||||||
|
case_status,
|
||||||
|
};
|
||||||
|
|
||||||
|
// According to the documentation on send, the sending fails if there are no more receiver
|
||||||
|
// handles. Therefore, this isn't an error that we want to bubble up or anything. If we fail
|
||||||
|
// to send then we ignore the error.
|
||||||
|
let _ = self.listener_tx.send(event);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_leader_node_assigned_event(&mut self, event: LeaderNodeAssignedEvent) {
|
||||||
|
let execution_information = self.execution_information(&ExecutionSpecifier {
|
||||||
|
test_specifier: event.test_specifier,
|
||||||
|
node_id: event.id,
|
||||||
|
node_designation: NodeDesignation::Leader,
|
||||||
|
});
|
||||||
|
execution_information.node = Some(TestCaseNodeInformation {
|
||||||
|
id: event.id,
|
||||||
|
platform: event.platform,
|
||||||
|
connection_string: event.connection_string,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_follower_node_assigned_event(&mut self, event: FollowerNodeAssignedEvent) {
|
||||||
|
let execution_information = self.execution_information(&ExecutionSpecifier {
|
||||||
|
test_specifier: event.test_specifier,
|
||||||
|
node_id: event.id,
|
||||||
|
node_designation: NodeDesignation::Follower,
|
||||||
|
});
|
||||||
|
execution_information.node = Some(TestCaseNodeInformation {
|
||||||
|
id: event.id,
|
||||||
|
platform: event.platform,
|
||||||
|
connection_string: event.connection_string,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_pre_link_contracts_compilation_succeeded_event(
|
||||||
|
&mut self,
|
||||||
|
event: PreLinkContractsCompilationSucceededEvent,
|
||||||
|
) {
|
||||||
|
let include_input = self
|
||||||
|
.report
|
||||||
|
.context
|
||||||
|
.report_configuration()
|
||||||
|
.include_compiler_input;
|
||||||
|
let include_output = self
|
||||||
|
.report
|
||||||
|
.context
|
||||||
|
.report_configuration()
|
||||||
|
.include_compiler_output;
|
||||||
|
|
||||||
|
let execution_information = self.execution_information(&event.execution_specifier);
|
||||||
|
|
||||||
|
let compiler_input = if include_input {
|
||||||
|
event.compiler_input
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
let compiler_output = if include_output {
|
||||||
|
Some(event.compiler_output)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
execution_information.pre_link_compilation_status = Some(CompilationStatus::Success {
|
||||||
|
is_cached: event.is_cached,
|
||||||
|
compiler_version: event.compiler_version,
|
||||||
|
compiler_path: event.compiler_path,
|
||||||
|
compiler_input,
|
||||||
|
compiler_output,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_post_link_contracts_compilation_succeeded_event(
|
||||||
|
&mut self,
|
||||||
|
event: PostLinkContractsCompilationSucceededEvent,
|
||||||
|
) {
|
||||||
|
let include_input = self
|
||||||
|
.report
|
||||||
|
.context
|
||||||
|
.report_configuration()
|
||||||
|
.include_compiler_input;
|
||||||
|
let include_output = self
|
||||||
|
.report
|
||||||
|
.context
|
||||||
|
.report_configuration()
|
||||||
|
.include_compiler_output;
|
||||||
|
|
||||||
|
let execution_information = self.execution_information(&event.execution_specifier);
|
||||||
|
|
||||||
|
let compiler_input = if include_input {
|
||||||
|
event.compiler_input
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
let compiler_output = if include_output {
|
||||||
|
Some(event.compiler_output)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
execution_information.post_link_compilation_status = Some(CompilationStatus::Success {
|
||||||
|
is_cached: event.is_cached,
|
||||||
|
compiler_version: event.compiler_version,
|
||||||
|
compiler_path: event.compiler_path,
|
||||||
|
compiler_input,
|
||||||
|
compiler_output,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_pre_link_contracts_compilation_failed_event(
|
||||||
|
&mut self,
|
||||||
|
event: PreLinkContractsCompilationFailedEvent,
|
||||||
|
) {
|
||||||
|
let execution_information = self.execution_information(&event.execution_specifier);
|
||||||
|
|
||||||
|
execution_information.pre_link_compilation_status = Some(CompilationStatus::Failure {
|
||||||
|
reason: event.reason,
|
||||||
|
compiler_version: event.compiler_version,
|
||||||
|
compiler_path: event.compiler_path,
|
||||||
|
compiler_input: event.compiler_input,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_post_link_contracts_compilation_failed_event(
|
||||||
|
&mut self,
|
||||||
|
event: PostLinkContractsCompilationFailedEvent,
|
||||||
|
) {
|
||||||
|
let execution_information = self.execution_information(&event.execution_specifier);
|
||||||
|
|
||||||
|
execution_information.post_link_compilation_status = Some(CompilationStatus::Failure {
|
||||||
|
reason: event.reason,
|
||||||
|
compiler_version: event.compiler_version,
|
||||||
|
compiler_path: event.compiler_path,
|
||||||
|
compiler_input: event.compiler_input,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_libraries_deployed_event(&mut self, event: LibrariesDeployedEvent) {
|
||||||
|
self.execution_information(&event.execution_specifier)
|
||||||
|
.deployed_libraries = Some(event.libraries);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_contract_deployed_event(&mut self, event: ContractDeployedEvent) {
|
||||||
|
self.execution_information(&event.execution_specifier)
|
||||||
|
.deployed_contracts
|
||||||
|
.get_or_insert_default()
|
||||||
|
.insert(event.contract_instance, event.address);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_case_report(&mut self, specifier: &TestSpecifier) -> &mut TestCaseReport {
|
||||||
|
self.report
|
||||||
|
.test_case_information
|
||||||
|
.entry(specifier.metadata_file_path.clone().into())
|
||||||
|
.or_default()
|
||||||
|
.entry(specifier.solc_mode.clone())
|
||||||
|
.or_default()
|
||||||
|
.entry(specifier.case_idx)
|
||||||
|
.or_default()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn execution_information(
|
||||||
|
&mut self,
|
||||||
|
specifier: &ExecutionSpecifier,
|
||||||
|
) -> &mut ExecutionInformation {
|
||||||
|
let test_case_report = self.test_case_report(&specifier.test_specifier);
|
||||||
|
match specifier.node_designation {
|
||||||
|
NodeDesignation::Leader => test_case_report
|
||||||
|
.leader_execution_information
|
||||||
|
.get_or_insert_default(),
|
||||||
|
NodeDesignation::Follower => test_case_report
|
||||||
|
.follower_execution_information
|
||||||
|
.get_or_insert_default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[serde_as]
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
pub struct Report {
|
||||||
|
/// The context that the tool was started up with.
|
||||||
|
pub context: Context,
|
||||||
|
/// The list of corpus files that the tool found.
|
||||||
|
pub corpora: Vec<Corpus>,
|
||||||
|
/// The list of metadata files that were found by the tool.
|
||||||
|
pub metadata_files: BTreeSet<MetadataFilePath>,
|
||||||
|
/// Information relating to each test case.
|
||||||
|
#[serde_as(as = "BTreeMap<_, HashMap<DisplayFromStr, BTreeMap<DisplayFromStr, _>>>")]
|
||||||
|
pub test_case_information:
|
||||||
|
BTreeMap<MetadataFilePath, HashMap<Mode, BTreeMap<CaseIdx, TestCaseReport>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Report {
|
||||||
|
pub fn new(context: Context) -> Self {
|
||||||
|
Self {
|
||||||
|
context,
|
||||||
|
corpora: Default::default(),
|
||||||
|
metadata_files: Default::default(),
|
||||||
|
test_case_information: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize, Default)]
|
||||||
|
pub struct TestCaseReport {
|
||||||
|
/// Information on the status of the test case and whether it succeeded, failed, or was ignored.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub status: Option<TestCaseStatus>,
|
||||||
|
/// Information related to the execution on the leader.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub leader_execution_information: Option<ExecutionInformation>,
|
||||||
|
/// Information related to the execution on the follower.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub follower_execution_information: Option<ExecutionInformation>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Information related to the status of the test. Could be that the test succeeded, failed, or that
|
||||||
|
/// it was ignored.
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
#[serde(tag = "status")]
|
||||||
|
pub enum TestCaseStatus {
|
||||||
|
/// The test case succeeded.
|
||||||
|
Succeeded {
|
||||||
|
/// The number of steps of the case that were executed.
|
||||||
|
steps_executed: usize,
|
||||||
|
},
|
||||||
|
/// The test case failed.
|
||||||
|
Failed {
|
||||||
|
/// The reason for the failure of the test case.
|
||||||
|
reason: String,
|
||||||
|
},
|
||||||
|
/// The test case was ignored. This variant carries information related to why it was ignored.
|
||||||
|
Ignored {
|
||||||
|
/// The reason behind the test case being ignored.
|
||||||
|
reason: String,
|
||||||
|
/// Additional fields that describe more information on why the test case is ignored.
|
||||||
|
#[serde(flatten)]
|
||||||
|
additional_fields: IndexMap<String, serde_json::Value>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Information related to the leader or follower node that's being used to execute the step.
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
pub struct TestCaseNodeInformation {
|
||||||
|
/// The ID of the node that this case is being executed on.
|
||||||
|
pub id: usize,
|
||||||
|
/// The platform of the node.
|
||||||
|
pub platform: TestingPlatform,
|
||||||
|
/// The connection string of the node.
|
||||||
|
pub connection_string: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Execution information tied to the leader or the follower.
|
||||||
|
#[derive(Clone, Debug, Default, Serialize)]
|
||||||
|
pub struct ExecutionInformation {
|
||||||
|
/// Information related to the node assigned to this test case.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub node: Option<TestCaseNodeInformation>,
|
||||||
|
/// Information on the pre-link compiled contracts.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub pre_link_compilation_status: Option<CompilationStatus>,
|
||||||
|
/// Information on the post-link compiled contracts.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub post_link_compilation_status: Option<CompilationStatus>,
|
||||||
|
/// Information on the deployed libraries.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub deployed_libraries: Option<BTreeMap<ContractInstance, Address>>,
|
||||||
|
/// Information on the deployed contracts.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub deployed_contracts: Option<BTreeMap<ContractInstance, Address>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Information related to compilation
|
||||||
|
#[derive(Clone, Debug, Serialize)]
|
||||||
|
#[serde(tag = "status")]
|
||||||
|
pub enum CompilationStatus {
|
||||||
|
/// The compilation was successful.
|
||||||
|
Success {
|
||||||
|
/// A flag with information on whether the compilation artifacts were cached or not.
|
||||||
|
is_cached: bool,
|
||||||
|
/// The version of the compiler used to compile the contracts.
|
||||||
|
compiler_version: Version,
|
||||||
|
/// The path of the compiler used to compile the contracts.
|
||||||
|
compiler_path: PathBuf,
|
||||||
|
/// The input provided to the compiler to compile the contracts. This is only included if
|
||||||
|
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
|
||||||
|
/// the compiler was invoked.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
compiler_input: Option<CompilerInput>,
|
||||||
|
/// The output of the compiler. This is only included if the appropriate flag is set in the
|
||||||
|
/// CLI contexts.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
compiler_output: Option<CompilerOutput>,
|
||||||
|
},
|
||||||
|
/// The compilation failed.
|
||||||
|
Failure {
|
||||||
|
/// The failure reason.
|
||||||
|
reason: String,
|
||||||
|
/// The version of the compiler used to compile the contracts.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
compiler_version: Option<Version>,
|
||||||
|
/// The path of the compiler used to compile the contracts.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
compiler_path: Option<PathBuf>,
|
||||||
|
/// The input provided to the compiler to compile the contracts. This is only included if
|
||||||
|
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
|
||||||
|
/// the compiler was invoked.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
compiler_input: Option<CompilerInput>,
|
||||||
|
},
|
||||||
|
}
|
||||||
@@ -1,81 +0,0 @@
|
|||||||
//! The report analyzer enriches the raw report data.
|
|
||||||
|
|
||||||
use revive_dt_compiler::CompilerOutput;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use crate::reporter::CompilationTask;
|
|
||||||
|
|
||||||
/// Provides insights into how well the compilers perform.
|
|
||||||
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, PartialOrd)]
|
|
||||||
pub struct CompilerStatistics {
|
|
||||||
/// The sum of contracts observed.
|
|
||||||
pub n_contracts: usize,
|
|
||||||
/// The mean size of compiled contracts.
|
|
||||||
pub mean_code_size: usize,
|
|
||||||
/// The mean size of the optimized YUL IR.
|
|
||||||
pub mean_yul_size: usize,
|
|
||||||
/// Is a proxy because the YUL also contains a lot of comments.
|
|
||||||
pub yul_to_bytecode_size_ratio: f32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CompilerStatistics {
|
|
||||||
/// Cumulatively update the statistics with the next compiler task.
|
|
||||||
pub fn sample(&mut self, compilation_task: &CompilationTask) {
|
|
||||||
let Some(CompilerOutput { contracts }) = &compilation_task.json_output else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
for (_solidity, contracts) in contracts.iter() {
|
|
||||||
for (_name, (bytecode, _)) in contracts.iter() {
|
|
||||||
// The EVM bytecode can be unlinked and thus is not necessarily a decodable hex
|
|
||||||
// string; for our statistics this is a good enough approximation.
|
|
||||||
let bytecode_size = bytecode.len() / 2;
|
|
||||||
|
|
||||||
// TODO: for the time being we set the yul_size to be zero. We need to change this
|
|
||||||
// when we overhaul the reporting.
|
|
||||||
|
|
||||||
self.update_sizes(bytecode_size, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Updates the size statistics cumulatively.
|
|
||||||
fn update_sizes(&mut self, bytecode_size: usize, yul_size: usize) {
|
|
||||||
let n_previous = self.n_contracts;
|
|
||||||
let n_current = self.n_contracts + 1;
|
|
||||||
|
|
||||||
self.n_contracts = n_current;
|
|
||||||
|
|
||||||
self.mean_code_size = (n_previous * self.mean_code_size + bytecode_size) / n_current;
|
|
||||||
self.mean_yul_size = (n_previous * self.mean_yul_size + yul_size) / n_current;
|
|
||||||
|
|
||||||
if self.mean_code_size > 0 {
|
|
||||||
self.yul_to_bytecode_size_ratio =
|
|
||||||
self.mean_yul_size as f32 / self.mean_code_size as f32;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::CompilerStatistics;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn compiler_statistics() {
|
|
||||||
let mut received = CompilerStatistics::default();
|
|
||||||
received.update_sizes(0, 0);
|
|
||||||
received.update_sizes(3, 37);
|
|
||||||
received.update_sizes(123, 456);
|
|
||||||
|
|
||||||
let mean_code_size = 41; // rounding error from integer truncation
|
|
||||||
let mean_yul_size = 164;
|
|
||||||
let expected = CompilerStatistics {
|
|
||||||
n_contracts: 3,
|
|
||||||
mean_code_size,
|
|
||||||
mean_yul_size,
|
|
||||||
yul_to_bytecode_size_ratio: mean_yul_size as f32 / mean_code_size as f32,
|
|
||||||
};
|
|
||||||
|
|
||||||
assert_eq!(received, expected);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,43 @@
|
|||||||
|
//! Common types and functions used throughout the crate.
|
||||||
|
|
||||||
|
use std::{path::PathBuf, sync::Arc};
|
||||||
|
|
||||||
|
use revive_dt_common::define_wrapper_type;
|
||||||
|
use revive_dt_compiler::Mode;
|
||||||
|
use revive_dt_format::{case::CaseIdx, input::StepIdx};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
define_wrapper_type!(
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
|
#[serde(transparent)]
|
||||||
|
pub struct MetadataFilePath(PathBuf);
|
||||||
|
);
|
||||||
|
|
||||||
|
/// An absolute specifier for a test.
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
|
pub struct TestSpecifier {
|
||||||
|
pub solc_mode: Mode,
|
||||||
|
pub metadata_file_path: PathBuf,
|
||||||
|
pub case_idx: CaseIdx,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An absolute path for a test that also includes information about the node that it's assigned to
|
||||||
|
/// and whether it's the leader or follower.
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
|
pub struct ExecutionSpecifier {
|
||||||
|
pub test_specifier: Arc<TestSpecifier>,
|
||||||
|
pub node_id: usize,
|
||||||
|
pub node_designation: NodeDesignation,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
|
pub enum NodeDesignation {
|
||||||
|
Leader,
|
||||||
|
Follower,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
|
pub struct StepExecutionSpecifier {
|
||||||
|
pub execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
pub step_idx: StepIdx,
|
||||||
|
}
|
||||||
@@ -1,4 +1,11 @@
|
|||||||
//! The revive differential tests reporting facility.
|
//! This crate implements the reporting infrastructure for the differential testing tool.
|
||||||
|
|
||||||
pub mod analyzer;
|
mod aggregator;
|
||||||
pub mod reporter;
|
mod common;
|
||||||
|
mod reporter_event;
|
||||||
|
mod runner_event;
|
||||||
|
|
||||||
|
pub use aggregator::*;
|
||||||
|
pub use common::*;
|
||||||
|
pub use reporter_event::*;
|
||||||
|
pub use runner_event::*;
|
||||||
|
|||||||
@@ -1,235 +0,0 @@
|
|||||||
//! The reporter is the central place observing test execution by collecting data.
|
|
||||||
//!
|
|
||||||
//! The data collected gives useful insights into the outcome of the test run
|
|
||||||
//! and helps identifying and reproducing failing cases.
|
|
||||||
|
|
||||||
use std::{
|
|
||||||
collections::HashMap,
|
|
||||||
fs::{self, File, create_dir_all},
|
|
||||||
path::PathBuf,
|
|
||||||
sync::{Mutex, OnceLock},
|
|
||||||
time::{SystemTime, UNIX_EPOCH},
|
|
||||||
};
|
|
||||||
|
|
||||||
use anyhow::Context;
|
|
||||||
use revive_dt_compiler::{CompilerInput, CompilerOutput};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use revive_dt_config::{Arguments, TestingPlatform};
|
|
||||||
use revive_dt_format::{corpus::Corpus, mode::SolcMode};
|
|
||||||
|
|
||||||
use crate::analyzer::CompilerStatistics;
|
|
||||||
|
|
||||||
pub(crate) static REPORTER: OnceLock<Mutex<Report>> = OnceLock::new();
|
|
||||||
|
|
||||||
/// The `Report` datastructure stores all relevant inforamtion required for generating reports.
|
|
||||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
|
||||||
pub struct Report {
|
|
||||||
/// The configuration used during the test.
|
|
||||||
pub config: Arguments,
|
|
||||||
/// The observed test corpora.
|
|
||||||
pub corpora: Vec<Corpus>,
|
|
||||||
/// The observed test definitions.
|
|
||||||
pub metadata_files: Vec<PathBuf>,
|
|
||||||
/// The observed compilation results.
|
|
||||||
pub compiler_results: HashMap<TestingPlatform, Vec<CompilationResult>>,
|
|
||||||
/// The observed compilation statistics.
|
|
||||||
pub compiler_statistics: HashMap<TestingPlatform, CompilerStatistics>,
|
|
||||||
/// The file name this is serialized to.
|
|
||||||
#[serde(skip)]
|
|
||||||
directory: PathBuf,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Contains a compiled contract.
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct CompilationTask {
|
|
||||||
/// The observed compiler input.
|
|
||||||
pub json_input: CompilerInput,
|
|
||||||
/// The observed compiler output.
|
|
||||||
pub json_output: Option<CompilerOutput>,
|
|
||||||
/// The observed compiler mode.
|
|
||||||
pub mode: SolcMode,
|
|
||||||
/// The observed compiler version.
|
|
||||||
pub compiler_version: String,
|
|
||||||
/// The observed error, if any.
|
|
||||||
pub error: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents a report about a compilation task.
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct CompilationResult {
|
|
||||||
/// The observed compilation task.
|
|
||||||
pub compilation_task: CompilationTask,
|
|
||||||
/// The linked span.
|
|
||||||
pub span: Span,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The [Span] struct indicates the context of what is being reported.
|
|
||||||
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Span {
|
|
||||||
/// The corpus index this belongs to.
|
|
||||||
corpus: usize,
|
|
||||||
/// The metadata file this belongs to.
|
|
||||||
metadata_file: usize,
|
|
||||||
/// The index of the case definition this belongs to.
|
|
||||||
case: usize,
|
|
||||||
/// The index of the case input this belongs to.
|
|
||||||
input: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Report {
|
|
||||||
/// The file name where this report will be written to.
|
|
||||||
pub const FILE_NAME: &str = "report.json";
|
|
||||||
|
|
||||||
/// The [Span] is expected to initialize the reporter by providing the config.
|
|
||||||
const INITIALIZED_VIA_SPAN: &str = "requires a Span which initializes the reporter";
|
|
||||||
|
|
||||||
/// Create a new [Report].
|
|
||||||
fn new(config: Arguments) -> anyhow::Result<Self> {
|
|
||||||
let now = SystemTime::now()
|
|
||||||
.duration_since(UNIX_EPOCH)
|
|
||||||
.unwrap()
|
|
||||||
.as_millis();
|
|
||||||
|
|
||||||
let directory = config.directory().join("report").join(format!("{now}"));
|
|
||||||
if !directory.exists() {
|
|
||||||
create_dir_all(&directory)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
config,
|
|
||||||
directory,
|
|
||||||
..Default::default()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a compilation task to the report.
|
|
||||||
pub fn compilation(span: Span, platform: TestingPlatform, compilation_task: CompilationTask) {
|
|
||||||
let mut report = REPORTER
|
|
||||||
.get()
|
|
||||||
.expect(Report::INITIALIZED_VIA_SPAN)
|
|
||||||
.lock()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
report
|
|
||||||
.compiler_statistics
|
|
||||||
.entry(platform)
|
|
||||||
.or_default()
|
|
||||||
.sample(&compilation_task);
|
|
||||||
|
|
||||||
report
|
|
||||||
.compiler_results
|
|
||||||
.entry(platform)
|
|
||||||
.or_default()
|
|
||||||
.push(CompilationResult {
|
|
||||||
compilation_task,
|
|
||||||
span,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write the report to disk.
|
|
||||||
pub fn save() -> anyhow::Result<()> {
|
|
||||||
let Some(reporter) = REPORTER.get() else {
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
let report = reporter.lock().unwrap();
|
|
||||||
|
|
||||||
if let Err(error) = report.write_to_file() {
|
|
||||||
anyhow::bail!("can not write report: {error}");
|
|
||||||
}
|
|
||||||
|
|
||||||
if report.config.extract_problems {
|
|
||||||
if let Err(error) = report.save_compiler_problems() {
|
|
||||||
anyhow::bail!("can not write compiler problems: {error}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write compiler problems to disk for later debugging.
|
|
||||||
pub fn save_compiler_problems(&self) -> anyhow::Result<()> {
|
|
||||||
for (platform, results) in self.compiler_results.iter() {
|
|
||||||
for result in results {
|
|
||||||
// ignore if there were no errors
|
|
||||||
if result.compilation_task.error.is_none() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let path = &self.metadata_files[result.span.metadata_file]
|
|
||||||
.parent()
|
|
||||||
.unwrap()
|
|
||||||
.join(format!("{platform}_errors"));
|
|
||||||
if !path.exists() {
|
|
||||||
create_dir_all(path)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(error) = result.compilation_task.error.as_ref() {
|
|
||||||
fs::write(path.join("compiler_error.txt"), error)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(errors) = result.compilation_task.json_output.as_ref() {
|
|
||||||
let file = File::create(path.join("compiler_output.txt"))?;
|
|
||||||
serde_json::to_writer_pretty(file, &errors)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_to_file(&self) -> anyhow::Result<()> {
|
|
||||||
let path = self.directory.join(Self::FILE_NAME);
|
|
||||||
|
|
||||||
let file = File::create(&path).context(path.display().to_string())?;
|
|
||||||
serde_json::to_writer_pretty(file, &self)?;
|
|
||||||
|
|
||||||
tracing::info!("report written to: {}", path.display());
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Span {
|
|
||||||
/// Create a new [Span] with case and input index at 0.
|
|
||||||
///
|
|
||||||
/// Initializes the reporting facility on the first call.
|
|
||||||
pub fn new(corpus: Corpus, config: Arguments) -> anyhow::Result<Self> {
|
|
||||||
let report = Mutex::new(Report::new(config)?);
|
|
||||||
let mut reporter = REPORTER.get_or_init(|| report).lock().unwrap();
|
|
||||||
reporter.corpora.push(corpus);
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
corpus: reporter.corpora.len() - 1,
|
|
||||||
metadata_file: 0,
|
|
||||||
case: 0,
|
|
||||||
input: 0,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Advance to the next metadata file: Resets the case input index to 0.
|
|
||||||
pub fn next_metadata(&mut self, metadata_file: PathBuf) {
|
|
||||||
let mut reporter = REPORTER
|
|
||||||
.get()
|
|
||||||
.expect(Report::INITIALIZED_VIA_SPAN)
|
|
||||||
.lock()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
reporter.metadata_files.push(metadata_file);
|
|
||||||
|
|
||||||
self.metadata_file = reporter.metadata_files.len() - 1;
|
|
||||||
self.case = 0;
|
|
||||||
self.input = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Advance to the next case: Increas the case index by one and resets the input index to 0.
|
|
||||||
pub fn next_case(&mut self) {
|
|
||||||
self.case += 1;
|
|
||||||
self.input = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Advance to the next input.
|
|
||||||
pub fn next_input(&mut self) {
|
|
||||||
self.input += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,22 @@
|
|||||||
|
//! A reporter event sent by the report aggregator to the various listeners.
|
||||||
|
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
use revive_dt_compiler::Mode;
|
||||||
|
use revive_dt_format::case::CaseIdx;
|
||||||
|
|
||||||
|
use crate::{MetadataFilePath, TestCaseStatus};
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum ReporterEvent {
|
||||||
|
/// An event sent by the reporter once an entire metadata file and solc mode combination has
|
||||||
|
/// finished execution.
|
||||||
|
MetadataFileSolcModeCombinationExecutionCompleted {
|
||||||
|
/// The path of the metadata file.
|
||||||
|
metadata_file_path: MetadataFilePath,
|
||||||
|
/// The Solc mode that this metadata file was executed in.
|
||||||
|
mode: Mode,
|
||||||
|
/// The status of each one of the cases.
|
||||||
|
case_status: BTreeMap<CaseIdx, TestCaseStatus>,
|
||||||
|
},
|
||||||
|
}
|
||||||
@@ -0,0 +1,642 @@
|
|||||||
|
//! The types associated with the events sent by the runner to the reporter.
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
|
use std::{collections::BTreeMap, path::PathBuf, sync::Arc};
|
||||||
|
|
||||||
|
use alloy_primitives::Address;
|
||||||
|
use anyhow::Context as _;
|
||||||
|
use indexmap::IndexMap;
|
||||||
|
use revive_dt_compiler::{CompilerInput, CompilerOutput};
|
||||||
|
use revive_dt_config::TestingPlatform;
|
||||||
|
use revive_dt_format::metadata::Metadata;
|
||||||
|
use revive_dt_format::{corpus::Corpus, metadata::ContractInstance};
|
||||||
|
use semver::Version;
|
||||||
|
use tokio::sync::{broadcast, oneshot};
|
||||||
|
|
||||||
|
use crate::{ExecutionSpecifier, ReporterEvent, TestSpecifier, common::MetadataFilePath};
|
||||||
|
|
||||||
|
macro_rules! __report_gen_emit_test_specific {
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident,
|
||||||
|
$skip_field:ident;
|
||||||
|
$( $bname:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
$( $aname:ident : $aty:ty, )*
|
||||||
|
) => {
|
||||||
|
paste::paste! {
|
||||||
|
pub fn [< report_ $variant_ident:snake _event >](
|
||||||
|
&self
|
||||||
|
$(, $bname: impl Into<$bty> )*
|
||||||
|
$(, $aname: impl Into<$aty> )*
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
self.report([< $variant_ident Event >] {
|
||||||
|
$skip_field: self.test_specifier.clone()
|
||||||
|
$(, $bname: $bname.into() )*
|
||||||
|
$(, $aname: $aname.into() )*
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_emit_test_specific_by_parse {
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident,
|
||||||
|
$skip_field:ident;
|
||||||
|
$( $bname:ident : $bty:ty, )* ; $( $aname:ident : $aty:ty, )*
|
||||||
|
) => {
|
||||||
|
__report_gen_emit_test_specific!(
|
||||||
|
$ident, $variant_ident, $skip_field;
|
||||||
|
$( $bname : $bty, )* ; $( $aname : $aty, )*
|
||||||
|
);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_scan_before {
|
||||||
|
(
|
||||||
|
$ident:ident, $variant_ident:ident;
|
||||||
|
$( $before:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
test_specifier : $skip_ty:ty,
|
||||||
|
$( $after:ident : $aty:ty, )*
|
||||||
|
;
|
||||||
|
) => {
|
||||||
|
__report_gen_emit_test_specific_by_parse!(
|
||||||
|
$ident, $variant_ident, test_specifier;
|
||||||
|
$( $before : $bty, )* ; $( $after : $aty, )*
|
||||||
|
);
|
||||||
|
};
|
||||||
|
(
|
||||||
|
$ident:ident, $variant_ident:ident;
|
||||||
|
$( $before:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
$name:ident : $ty:ty, $( $after:ident : $aty:ty, )*
|
||||||
|
;
|
||||||
|
) => {
|
||||||
|
__report_gen_scan_before!(
|
||||||
|
$ident, $variant_ident;
|
||||||
|
$( $before : $bty, )* $name : $ty,
|
||||||
|
;
|
||||||
|
$( $after : $aty, )*
|
||||||
|
;
|
||||||
|
);
|
||||||
|
};
|
||||||
|
(
|
||||||
|
$ident:ident, $variant_ident:ident;
|
||||||
|
$( $before:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
;
|
||||||
|
) => {};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_for_variant {
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident;
|
||||||
|
) => {};
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident;
|
||||||
|
$( $field_ident:ident : $field_ty:ty ),+ $(,)?
|
||||||
|
) => {
|
||||||
|
__report_gen_scan_before!(
|
||||||
|
$ident, $variant_ident;
|
||||||
|
;
|
||||||
|
$( $field_ident : $field_ty, )*
|
||||||
|
;
|
||||||
|
);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_emit_execution_specific {
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident,
|
||||||
|
$skip_field:ident;
|
||||||
|
$( $bname:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
$( $aname:ident : $aty:ty, )*
|
||||||
|
) => {
|
||||||
|
paste::paste! {
|
||||||
|
pub fn [< report_ $variant_ident:snake _event >](
|
||||||
|
&self
|
||||||
|
$(, $bname: impl Into<$bty> )*
|
||||||
|
$(, $aname: impl Into<$aty> )*
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
self.report([< $variant_ident Event >] {
|
||||||
|
$skip_field: self.execution_specifier.clone()
|
||||||
|
$(, $bname: $bname.into() )*
|
||||||
|
$(, $aname: $aname.into() )*
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_emit_execution_specific_by_parse {
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident,
|
||||||
|
$skip_field:ident;
|
||||||
|
$( $bname:ident : $bty:ty, )* ; $( $aname:ident : $aty:ty, )*
|
||||||
|
) => {
|
||||||
|
__report_gen_emit_execution_specific!(
|
||||||
|
$ident, $variant_ident, $skip_field;
|
||||||
|
$( $bname : $bty, )* ; $( $aname : $aty, )*
|
||||||
|
);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_scan_before_exec {
|
||||||
|
(
|
||||||
|
$ident:ident, $variant_ident:ident;
|
||||||
|
$( $before:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
execution_specifier : $skip_ty:ty,
|
||||||
|
$( $after:ident : $aty:ty, )*
|
||||||
|
;
|
||||||
|
) => {
|
||||||
|
__report_gen_emit_execution_specific_by_parse!(
|
||||||
|
$ident, $variant_ident, execution_specifier;
|
||||||
|
$( $before : $bty, )* ; $( $after : $aty, )*
|
||||||
|
);
|
||||||
|
};
|
||||||
|
(
|
||||||
|
$ident:ident, $variant_ident:ident;
|
||||||
|
$( $before:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
$name:ident : $ty:ty, $( $after:ident : $aty:ty, )*
|
||||||
|
;
|
||||||
|
) => {
|
||||||
|
__report_gen_scan_before_exec!(
|
||||||
|
$ident, $variant_ident;
|
||||||
|
$( $before : $bty, )* $name : $ty,
|
||||||
|
;
|
||||||
|
$( $after : $aty, )*
|
||||||
|
;
|
||||||
|
);
|
||||||
|
};
|
||||||
|
(
|
||||||
|
$ident:ident, $variant_ident:ident;
|
||||||
|
$( $before:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
;
|
||||||
|
) => {};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_for_variant_exec {
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident;
|
||||||
|
) => {};
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident;
|
||||||
|
$( $field_ident:ident : $field_ty:ty ),+ $(,)?
|
||||||
|
) => {
|
||||||
|
__report_gen_scan_before_exec!(
|
||||||
|
$ident, $variant_ident;
|
||||||
|
;
|
||||||
|
$( $field_ident : $field_ty, )*
|
||||||
|
;
|
||||||
|
);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_emit_step_execution_specific {
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident,
|
||||||
|
$skip_field:ident;
|
||||||
|
$( $bname:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
$( $aname:ident : $aty:ty, )*
|
||||||
|
) => {
|
||||||
|
paste::paste! {
|
||||||
|
pub fn [< report_ $variant_ident:snake _event >](
|
||||||
|
&self
|
||||||
|
$(, $bname: impl Into<$bty> )*
|
||||||
|
$(, $aname: impl Into<$aty> )*
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
self.report([< $variant_ident Event >] {
|
||||||
|
$skip_field: self.step_specifier.clone()
|
||||||
|
$(, $bname: $bname.into() )*
|
||||||
|
$(, $aname: $aname.into() )*
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_emit_step_execution_specific_by_parse {
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident,
|
||||||
|
$skip_field:ident;
|
||||||
|
$( $bname:ident : $bty:ty, )* ; $( $aname:ident : $aty:ty, )*
|
||||||
|
) => {
|
||||||
|
__report_gen_emit_step_execution_specific!(
|
||||||
|
$ident, $variant_ident, $skip_field;
|
||||||
|
$( $bname : $bty, )* ; $( $aname : $aty, )*
|
||||||
|
);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_scan_before_step {
|
||||||
|
(
|
||||||
|
$ident:ident, $variant_ident:ident;
|
||||||
|
$( $before:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
step_specifier : $skip_ty:ty,
|
||||||
|
$( $after:ident : $aty:ty, )*
|
||||||
|
;
|
||||||
|
) => {
|
||||||
|
__report_gen_emit_step_execution_specific_by_parse!(
|
||||||
|
$ident, $variant_ident, step_specifier;
|
||||||
|
$( $before : $bty, )* ; $( $after : $aty, )*
|
||||||
|
);
|
||||||
|
};
|
||||||
|
(
|
||||||
|
$ident:ident, $variant_ident:ident;
|
||||||
|
$( $before:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
$name:ident : $ty:ty, $( $after:ident : $aty:ty, )*
|
||||||
|
;
|
||||||
|
) => {
|
||||||
|
__report_gen_scan_before_step!(
|
||||||
|
$ident, $variant_ident;
|
||||||
|
$( $before : $bty, )* $name : $ty,
|
||||||
|
;
|
||||||
|
$( $after : $aty, )*
|
||||||
|
;
|
||||||
|
);
|
||||||
|
};
|
||||||
|
(
|
||||||
|
$ident:ident, $variant_ident:ident;
|
||||||
|
$( $before:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
;
|
||||||
|
) => {};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_for_variant_step {
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident;
|
||||||
|
) => {};
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident;
|
||||||
|
$( $field_ident:ident : $field_ty:ty ),+ $(,)?
|
||||||
|
) => {
|
||||||
|
__report_gen_scan_before_step!(
|
||||||
|
$ident, $variant_ident;
|
||||||
|
;
|
||||||
|
$( $field_ident : $field_ty, )*
|
||||||
|
;
|
||||||
|
);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Defines the runner-event which is sent from the test runners to the report aggregator.
|
||||||
|
///
|
||||||
|
/// This macro defines a number of things related to the reporting infrastructure and the interface
|
||||||
|
/// used. First of all, it defines the enum of all of the possible events that the runners can send
|
||||||
|
/// to the aggregator. For each one of the variants it defines a separate struct for it to allow the
|
||||||
|
/// variant field in the enum to be put in a [`Box`].
|
||||||
|
///
|
||||||
|
/// In addition to the above, it defines [`From`] implementations for the various event types for
|
||||||
|
/// the [`RunnerEvent`] enum essentially allowing for events such as [`CorpusFileDiscoveryEvent`] to
|
||||||
|
/// be converted into a [`RunnerEvent`].
|
||||||
|
///
|
||||||
|
/// In addition to the above, it also defines the [`RunnerEventReporter`] which is a wrapper around
|
||||||
|
/// an [`UnboundedSender`] allowing for events to be sent to the report aggregator.
|
||||||
|
///
|
||||||
|
/// With the above description, we can see that this macro defines almost all of the interface of
|
||||||
|
/// the reporting infrastructure, from the enum itself, to its associated types, and also to the
|
||||||
|
/// reporter that's used to report events to the aggregator.
|
||||||
|
///
|
||||||
|
/// [`UnboundedSender`]: tokio::sync::mpsc::UnboundedSender
|
||||||
|
macro_rules! define_event {
|
||||||
|
(
|
||||||
|
$(#[$enum_meta: meta])*
|
||||||
|
$vis: vis enum $ident: ident {
|
||||||
|
$(
|
||||||
|
$(#[$variant_meta: meta])*
|
||||||
|
$variant_ident: ident {
|
||||||
|
$(
|
||||||
|
$(#[$field_meta: meta])*
|
||||||
|
$field_ident: ident: $field_ty: ty
|
||||||
|
),* $(,)?
|
||||||
|
}
|
||||||
|
),* $(,)?
|
||||||
|
}
|
||||||
|
) => {
|
||||||
|
paste::paste! {
|
||||||
|
$(#[$enum_meta])*
|
||||||
|
#[derive(Debug)]
|
||||||
|
$vis enum $ident {
|
||||||
|
$(
|
||||||
|
$(#[$variant_meta])*
|
||||||
|
$variant_ident(Box<[<$variant_ident Event>]>)
|
||||||
|
),*
|
||||||
|
}
|
||||||
|
|
||||||
|
$(
|
||||||
|
#[derive(Debug)]
|
||||||
|
$(#[$variant_meta])*
|
||||||
|
$vis struct [<$variant_ident Event>] {
|
||||||
|
$(
|
||||||
|
$(#[$field_meta])*
|
||||||
|
$vis $field_ident: $field_ty
|
||||||
|
),*
|
||||||
|
}
|
||||||
|
)*
|
||||||
|
|
||||||
|
$(
|
||||||
|
impl From<[<$variant_ident Event>]> for $ident {
|
||||||
|
fn from(value: [<$variant_ident Event>]) -> Self {
|
||||||
|
Self::$variant_ident(Box::new(value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)*
|
||||||
|
|
||||||
|
/// Provides a way to report events to the aggregator.
|
||||||
|
///
|
||||||
|
/// Under the hood, this is a wrapper around an [`UnboundedSender`] which abstracts away
|
||||||
|
/// the fact that channels are used and that implements high-level methods for reporting
|
||||||
|
/// various events to the aggregator.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct [< $ident Reporter >]($vis tokio::sync::mpsc::UnboundedSender<$ident>);
|
||||||
|
|
||||||
|
impl From<tokio::sync::mpsc::UnboundedSender<$ident>> for [< $ident Reporter >] {
|
||||||
|
fn from(value: tokio::sync::mpsc::UnboundedSender<$ident>) -> Self {
|
||||||
|
Self(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl [< $ident Reporter >] {
|
||||||
|
pub fn test_specific_reporter(
|
||||||
|
&self,
|
||||||
|
test_specifier: impl Into<std::sync::Arc<crate::common::TestSpecifier>>
|
||||||
|
) -> [< $ident TestSpecificReporter >] {
|
||||||
|
[< $ident TestSpecificReporter >] {
|
||||||
|
reporter: self.clone(),
|
||||||
|
test_specifier: test_specifier.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> {
|
||||||
|
self.0.send(event.into()).map_err(Into::into)
|
||||||
|
}
|
||||||
|
|
||||||
|
$(
|
||||||
|
pub fn [< report_ $variant_ident:snake _event >](&self, $($field_ident: impl Into<$field_ty>),*) -> anyhow::Result<()> {
|
||||||
|
self.report([< $variant_ident Event >] {
|
||||||
|
$($field_ident: $field_ident.into()),*
|
||||||
|
})
|
||||||
|
}
|
||||||
|
)*
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A reporter that's tied to a specific test case.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct [< $ident TestSpecificReporter >] {
|
||||||
|
$vis reporter: [< $ident Reporter >],
|
||||||
|
$vis test_specifier: std::sync::Arc<crate::common::TestSpecifier>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl [< $ident TestSpecificReporter >] {
|
||||||
|
pub fn execution_specific_reporter(
|
||||||
|
&self,
|
||||||
|
node_id: impl Into<usize>,
|
||||||
|
node_designation: impl Into<$crate::common::NodeDesignation>
|
||||||
|
) -> [< $ident ExecutionSpecificReporter >] {
|
||||||
|
[< $ident ExecutionSpecificReporter >] {
|
||||||
|
reporter: self.reporter.clone(),
|
||||||
|
execution_specifier: Arc::new($crate::common::ExecutionSpecifier {
|
||||||
|
test_specifier: self.test_specifier.clone(),
|
||||||
|
node_id: node_id.into(),
|
||||||
|
node_designation: node_designation.into(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> {
|
||||||
|
self.reporter.report(event)
|
||||||
|
}
|
||||||
|
|
||||||
|
$(
|
||||||
|
__report_gen_for_variant! { $ident, $variant_ident; $( $field_ident : $field_ty ),* }
|
||||||
|
)*
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A reporter that's tied to a specific execution of the test case such as execution on
|
||||||
|
/// a specific node like the leader or follower.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct [< $ident ExecutionSpecificReporter >] {
|
||||||
|
$vis reporter: [< $ident Reporter >],
|
||||||
|
$vis execution_specifier: std::sync::Arc<$crate::common::ExecutionSpecifier>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl [< $ident ExecutionSpecificReporter >] {
|
||||||
|
fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> {
|
||||||
|
self.reporter.report(event)
|
||||||
|
}
|
||||||
|
|
||||||
|
$(
|
||||||
|
__report_gen_for_variant_exec! { $ident, $variant_ident; $( $field_ident : $field_ty ),* }
|
||||||
|
)*
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A reporter that's tied to a specific step execution
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct [< $ident StepExecutionSpecificReporter >] {
|
||||||
|
$vis reporter: [< $ident Reporter >],
|
||||||
|
$vis step_specifier: std::sync::Arc<$crate::common::StepExecutionSpecifier>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl [< $ident StepExecutionSpecificReporter >] {
|
||||||
|
fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> {
|
||||||
|
self.reporter.report(event)
|
||||||
|
}
|
||||||
|
|
||||||
|
$(
|
||||||
|
__report_gen_for_variant_step! { $ident, $variant_ident; $( $field_ident : $field_ty ),* }
|
||||||
|
)*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
define_event! {
|
||||||
|
/// An event type that's sent by the test runners/drivers to the report aggregator.
|
||||||
|
pub(crate) enum RunnerEvent {
|
||||||
|
/// An event emitted by the reporter when it wishes to listen to events emitted by the
|
||||||
|
/// aggregator.
|
||||||
|
SubscribeToEvents {
|
||||||
|
/// The channel that the aggregator is to send the receive side of the channel on.
|
||||||
|
tx: oneshot::Sender<broadcast::Receiver<ReporterEvent>>
|
||||||
|
},
|
||||||
|
/// An event emitted by runners when they've discovered a corpus file.
|
||||||
|
CorpusFileDiscovery {
|
||||||
|
/// The contents of the corpus file.
|
||||||
|
corpus: Corpus
|
||||||
|
},
|
||||||
|
/// An event emitted by runners when they've discovered a metadata file.
|
||||||
|
MetadataFileDiscovery {
|
||||||
|
/// The path of the metadata file discovered.
|
||||||
|
path: MetadataFilePath,
|
||||||
|
/// The content of the metadata file.
|
||||||
|
metadata: Metadata
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when they discover a test case.
|
||||||
|
TestCaseDiscovery {
|
||||||
|
/// A specifier for the test that was discovered.
|
||||||
|
test_specifier: Arc<TestSpecifier>,
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when a test case is ignored.
|
||||||
|
TestIgnored {
|
||||||
|
/// A specifier for the test that's been ignored.
|
||||||
|
test_specifier: Arc<TestSpecifier>,
|
||||||
|
/// A reason for the test to be ignored.
|
||||||
|
reason: String,
|
||||||
|
/// Additional fields that describe more information on why the test was ignored.
|
||||||
|
additional_fields: IndexMap<String, serde_json::Value>
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when a test case has succeeded.
|
||||||
|
TestSucceeded {
|
||||||
|
/// A specifier for the test that succeeded.
|
||||||
|
test_specifier: Arc<TestSpecifier>,
|
||||||
|
/// The number of steps of the case that were executed by the driver.
|
||||||
|
steps_executed: usize,
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when a test case has failed.
|
||||||
|
TestFailed {
|
||||||
|
/// A specifier for the test that succeeded.
|
||||||
|
test_specifier: Arc<TestSpecifier>,
|
||||||
|
/// A reason for the failure of the test.
|
||||||
|
reason: String,
|
||||||
|
},
|
||||||
|
/// An event emitted when the test case is assigned a leader node.
|
||||||
|
LeaderNodeAssigned {
|
||||||
|
/// A specifier for the test that the assignment is for.
|
||||||
|
test_specifier: Arc<TestSpecifier>,
|
||||||
|
/// The ID of the node that this case is being executed on.
|
||||||
|
id: usize,
|
||||||
|
/// The platform of the node.
|
||||||
|
platform: TestingPlatform,
|
||||||
|
/// The connection string of the node.
|
||||||
|
connection_string: String,
|
||||||
|
},
|
||||||
|
/// An event emitted when the test case is assigned a follower node.
|
||||||
|
FollowerNodeAssigned {
|
||||||
|
/// A specifier for the test that the assignment is for.
|
||||||
|
test_specifier: Arc<TestSpecifier>,
|
||||||
|
/// The ID of the node that this case is being executed on.
|
||||||
|
id: usize,
|
||||||
|
/// The platform of the node.
|
||||||
|
platform: TestingPlatform,
|
||||||
|
/// The connection string of the node.
|
||||||
|
connection_string: String,
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when the compilation of the contracts has succeeded
|
||||||
|
/// on the pre-link contracts.
|
||||||
|
PreLinkContractsCompilationSucceeded {
|
||||||
|
/// A specifier for the execution that's taking place.
|
||||||
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
/// The version of the compiler used to compile the contracts.
|
||||||
|
compiler_version: Version,
|
||||||
|
/// The path of the compiler used to compile the contracts.
|
||||||
|
compiler_path: PathBuf,
|
||||||
|
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
|
||||||
|
/// anew.
|
||||||
|
is_cached: bool,
|
||||||
|
/// The input provided to the compiler - this is optional and not provided if the
|
||||||
|
/// contracts were obtained from the cache.
|
||||||
|
compiler_input: Option<CompilerInput>,
|
||||||
|
/// The output of the compiler.
|
||||||
|
compiler_output: CompilerOutput
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when the compilation of the contracts has succeeded
|
||||||
|
/// on the post-link contracts.
|
||||||
|
PostLinkContractsCompilationSucceeded {
|
||||||
|
/// A specifier for the execution that's taking place.
|
||||||
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
/// The version of the compiler used to compile the contracts.
|
||||||
|
compiler_version: Version,
|
||||||
|
/// The path of the compiler used to compile the contracts.
|
||||||
|
compiler_path: PathBuf,
|
||||||
|
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
|
||||||
|
/// anew.
|
||||||
|
is_cached: bool,
|
||||||
|
/// The input provided to the compiler - this is optional and not provided if the
|
||||||
|
/// contracts were obtained from the cache.
|
||||||
|
compiler_input: Option<CompilerInput>,
|
||||||
|
/// The output of the compiler.
|
||||||
|
compiler_output: CompilerOutput
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when the compilation of the pre-link contract has
|
||||||
|
/// failed.
|
||||||
|
PreLinkContractsCompilationFailed {
|
||||||
|
/// A specifier for the execution that's taking place.
|
||||||
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
/// The version of the compiler used to compile the contracts.
|
||||||
|
compiler_version: Option<Version>,
|
||||||
|
/// The path of the compiler used to compile the contracts.
|
||||||
|
compiler_path: Option<PathBuf>,
|
||||||
|
/// The input provided to the compiler - this is optional and not provided if the
|
||||||
|
/// contracts were obtained from the cache.
|
||||||
|
compiler_input: Option<CompilerInput>,
|
||||||
|
/// The failure reason.
|
||||||
|
reason: String,
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when the compilation of the post-link contract has
|
||||||
|
/// failed.
|
||||||
|
PostLinkContractsCompilationFailed {
|
||||||
|
/// A specifier for the execution that's taking place.
|
||||||
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
/// The version of the compiler used to compile the contracts.
|
||||||
|
compiler_version: Option<Version>,
|
||||||
|
/// The path of the compiler used to compile the contracts.
|
||||||
|
compiler_path: Option<PathBuf>,
|
||||||
|
/// The input provided to the compiler - this is optional and not provided if the
|
||||||
|
/// contracts were obtained from the cache.
|
||||||
|
compiler_input: Option<CompilerInput>,
|
||||||
|
/// The failure reason.
|
||||||
|
reason: String,
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when a library has been deployed.
|
||||||
|
LibrariesDeployed {
|
||||||
|
/// A specifier for the execution that's taking place.
|
||||||
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
/// The addresses of the libraries that were deployed.
|
||||||
|
libraries: BTreeMap<ContractInstance, Address>
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when they've deployed a new contract.
|
||||||
|
ContractDeployed {
|
||||||
|
/// A specifier for the execution that's taking place.
|
||||||
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
/// The instance name of the contract.
|
||||||
|
contract_instance: ContractInstance,
|
||||||
|
/// The address of the contract.
|
||||||
|
address: Address
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An extension to the [`Reporter`] implemented by the macro.
|
||||||
|
impl RunnerEventReporter {
|
||||||
|
pub async fn subscribe(&self) -> anyhow::Result<broadcast::Receiver<ReporterEvent>> {
|
||||||
|
let (tx, rx) = oneshot::channel::<broadcast::Receiver<ReporterEvent>>();
|
||||||
|
self.report_subscribe_to_events_event(tx)
|
||||||
|
.context("Failed to send subscribe request to reporter task")?;
|
||||||
|
rx.await.map_err(Into::into)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type Reporter = RunnerEventReporter;
|
||||||
|
pub type TestSpecificReporter = RunnerEventTestSpecificReporter;
|
||||||
|
pub type ExecutionSpecificReporter = RunnerEventExecutionSpecificReporter;
|
||||||
@@ -19,3 +19,6 @@ reqwest = { workspace = true }
|
|||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
sha2 = { workspace = true }
|
sha2 = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|||||||
@@ -9,17 +9,19 @@ use std::{
|
|||||||
sync::LazyLock,
|
sync::LazyLock,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use semver::Version;
|
||||||
use tokio::sync::Mutex;
|
use tokio::sync::Mutex;
|
||||||
|
|
||||||
use crate::download::GHDownloader;
|
use crate::download::SolcDownloader;
|
||||||
|
use anyhow::Context as _;
|
||||||
|
|
||||||
pub const SOLC_CACHE_DIRECTORY: &str = "solc";
|
pub const SOLC_CACHE_DIRECTORY: &str = "solc";
|
||||||
pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default);
|
pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default);
|
||||||
|
|
||||||
pub(crate) async fn get_or_download(
|
pub(crate) async fn get_or_download(
|
||||||
working_directory: &Path,
|
working_directory: &Path,
|
||||||
downloader: &GHDownloader,
|
downloader: &SolcDownloader,
|
||||||
) -> anyhow::Result<PathBuf> {
|
) -> anyhow::Result<(Version, PathBuf)> {
|
||||||
let target_directory = working_directory
|
let target_directory = working_directory
|
||||||
.join(SOLC_CACHE_DIRECTORY)
|
.join(SOLC_CACHE_DIRECTORY)
|
||||||
.join(downloader.version.to_string());
|
.join(downloader.version.to_string());
|
||||||
@@ -28,34 +30,55 @@ pub(crate) async fn get_or_download(
|
|||||||
let mut cache = SOLC_CACHER.lock().await;
|
let mut cache = SOLC_CACHER.lock().await;
|
||||||
if cache.contains(&target_file) {
|
if cache.contains(&target_file) {
|
||||||
tracing::debug!("using cached solc: {}", target_file.display());
|
tracing::debug!("using cached solc: {}", target_file.display());
|
||||||
return Ok(target_file);
|
return Ok((downloader.version.clone(), target_file));
|
||||||
}
|
}
|
||||||
|
|
||||||
create_dir_all(target_directory)?;
|
create_dir_all(&target_directory).with_context(|| {
|
||||||
download_to_file(&target_file, downloader).await?;
|
format!(
|
||||||
|
"Failed to create solc cache directory: {}",
|
||||||
|
target_directory.display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
download_to_file(&target_file, downloader)
|
||||||
|
.await
|
||||||
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to write downloaded solc to {}",
|
||||||
|
target_file.display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
cache.insert(target_file.clone());
|
cache.insert(target_file.clone());
|
||||||
|
|
||||||
Ok(target_file)
|
Ok((downloader.version.clone(), target_file))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn download_to_file(path: &Path, downloader: &GHDownloader) -> anyhow::Result<()> {
|
async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::Result<()> {
|
||||||
tracing::info!("caching file: {}", path.display());
|
|
||||||
|
|
||||||
let Ok(file) = File::create_new(path) else {
|
let Ok(file) = File::create_new(path) else {
|
||||||
tracing::debug!("cache file already exists: {}", path.display());
|
|
||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
|
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
{
|
{
|
||||||
let mut permissions = file.metadata()?.permissions();
|
let mut permissions = file
|
||||||
|
.metadata()
|
||||||
|
.with_context(|| format!("Failed to read metadata for {}", path.display()))?
|
||||||
|
.permissions();
|
||||||
permissions.set_mode(permissions.mode() | 0o111);
|
permissions.set_mode(permissions.mode() | 0o111);
|
||||||
file.set_permissions(permissions)?;
|
file.set_permissions(permissions).with_context(|| {
|
||||||
|
format!("Failed to set executable permissions on {}", path.display())
|
||||||
|
})?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut file = BufWriter::new(file);
|
let mut file = BufWriter::new(file);
|
||||||
file.write_all(&downloader.download().await?)?;
|
file.write_all(
|
||||||
file.flush()?;
|
&downloader
|
||||||
|
.download()
|
||||||
|
.await
|
||||||
|
.context("Failed to download solc binary bytes")?,
|
||||||
|
)
|
||||||
|
.with_context(|| format!("Failed to write solc binary to {}", path.display()))?;
|
||||||
|
file.flush()
|
||||||
|
.with_context(|| format!("Failed to flush file {}", path.display()))?;
|
||||||
drop(file);
|
drop(file);
|
||||||
|
|
||||||
#[cfg(target_os = "macos")]
|
#[cfg(target_os = "macos")]
|
||||||
@@ -66,8 +89,20 @@ async fn download_to_file(path: &Path, downloader: &GHDownloader) -> anyhow::Res
|
|||||||
.stderr(std::process::Stdio::null())
|
.stderr(std::process::Stdio::null())
|
||||||
.stdout(std::process::Stdio::null())
|
.stdout(std::process::Stdio::null())
|
||||||
.stdout(std::process::Stdio::null())
|
.stdout(std::process::Stdio::null())
|
||||||
.spawn()?
|
.spawn()
|
||||||
.wait()?;
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to spawn xattr to remove quarantine attribute on {}",
|
||||||
|
path.display()
|
||||||
|
)
|
||||||
|
})?
|
||||||
|
.wait()
|
||||||
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed waiting for xattr operation to complete on {}",
|
||||||
|
path.display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ use semver::Version;
|
|||||||
use sha2::{Digest, Sha256};
|
use sha2::{Digest, Sha256};
|
||||||
|
|
||||||
use crate::list::List;
|
use crate::list::List;
|
||||||
|
use anyhow::Context as _;
|
||||||
|
|
||||||
pub static LIST_CACHE: LazyLock<Mutex<HashMap<&'static str, List>>> =
|
pub static LIST_CACHE: LazyLock<Mutex<HashMap<&'static str, List>>> =
|
||||||
LazyLock::new(Default::default);
|
LazyLock::new(Default::default);
|
||||||
@@ -30,7 +31,12 @@ impl List {
|
|||||||
return Ok(list.clone());
|
return Ok(list.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
let body: List = reqwest::get(url).await?.json().await?;
|
let body: List = reqwest::get(url)
|
||||||
|
.await
|
||||||
|
.with_context(|| format!("Failed to GET solc list from {url}"))?
|
||||||
|
.json()
|
||||||
|
.await
|
||||||
|
.with_context(|| format!("Failed to deserialize solc list JSON from {url}"))?;
|
||||||
|
|
||||||
LIST_CACHE.lock().unwrap().insert(url, body.clone());
|
LIST_CACHE.lock().unwrap().insert(url, body.clone());
|
||||||
|
|
||||||
@@ -38,21 +44,21 @@ impl List {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Download solc binaries from GitHub releases (IPFS links aren't reliable).
|
/// Download solc binaries from the official SolidityLang site
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct GHDownloader {
|
pub struct SolcDownloader {
|
||||||
pub version: Version,
|
pub version: Version,
|
||||||
pub target: &'static str,
|
pub target: &'static str,
|
||||||
pub list: &'static str,
|
pub list: &'static str,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GHDownloader {
|
impl SolcDownloader {
|
||||||
pub const BASE_URL: &str = "https://github.com/ethereum/solidity/releases/download";
|
pub const BASE_URL: &str = "https://binaries.soliditylang.org";
|
||||||
|
|
||||||
pub const LINUX_NAME: &str = "solc-static-linux";
|
pub const LINUX_NAME: &str = "linux-amd64";
|
||||||
pub const MACOSX_NAME: &str = "solc-macos";
|
pub const MACOSX_NAME: &str = "macosx-amd64";
|
||||||
pub const WINDOWS_NAME: &str = "solc-windows.exe";
|
pub const WINDOWS_NAME: &str = "windows-amd64";
|
||||||
pub const WASM_NAME: &str = "soljson.js";
|
pub const WASM_NAME: &str = "wasm";
|
||||||
|
|
||||||
async fn new(
|
async fn new(
|
||||||
version: impl Into<VersionOrRequirement>,
|
version: impl Into<VersionOrRequirement>,
|
||||||
@@ -68,7 +74,8 @@ impl GHDownloader {
|
|||||||
}),
|
}),
|
||||||
VersionOrRequirement::Requirement(requirement) => {
|
VersionOrRequirement::Requirement(requirement) => {
|
||||||
let Some(version) = List::download(list)
|
let Some(version) = List::download(list)
|
||||||
.await?
|
.await
|
||||||
|
.with_context(|| format!("Failed to download solc builds list from {list}"))?
|
||||||
.builds
|
.builds
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|build| build.version)
|
.map(|build| build.version)
|
||||||
@@ -102,26 +109,41 @@ impl GHDownloader {
|
|||||||
Self::new(version, Self::WASM_NAME, List::WASM_URL).await
|
Self::new(version, Self::WASM_NAME, List::WASM_URL).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the download link.
|
|
||||||
pub fn url(&self) -> String {
|
|
||||||
format!("{}/v{}/{}", Self::BASE_URL, &self.version, &self.target)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Download the solc binary.
|
/// Download the solc binary.
|
||||||
///
|
///
|
||||||
/// Errors out if the download fails or the digest of the downloaded file
|
/// Errors out if the download fails or the digest of the downloaded file
|
||||||
/// mismatches the expected digest from the release [List].
|
/// mismatches the expected digest from the release [List].
|
||||||
pub async fn download(&self) -> anyhow::Result<Vec<u8>> {
|
pub async fn download(&self) -> anyhow::Result<Vec<u8>> {
|
||||||
tracing::info!("downloading solc: {self:?}");
|
let builds = List::download(self.list)
|
||||||
let expected_digest = List::download(self.list)
|
.await
|
||||||
.await?
|
.with_context(|| format!("Failed to download solc builds list from {}", self.list))?
|
||||||
.builds
|
.builds;
|
||||||
|
let build = builds
|
||||||
.iter()
|
.iter()
|
||||||
.find(|build| build.version == self.version)
|
.find(|build| build.version == self.version)
|
||||||
.ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version))
|
.ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version))
|
||||||
.map(|b| b.sha256.strip_prefix("0x").unwrap_or(&b.sha256).to_string())?;
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Requested solc version {} was not found in builds list fetched from {}",
|
||||||
|
self.version, self.list
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
let file = reqwest::get(self.url()).await?.bytes().await?.to_vec();
|
let path = build.path.clone();
|
||||||
|
let expected_digest = build
|
||||||
|
.sha256
|
||||||
|
.strip_prefix("0x")
|
||||||
|
.unwrap_or(&build.sha256)
|
||||||
|
.to_string();
|
||||||
|
let url = format!("{}/{}/{}", Self::BASE_URL, self.target, path.display());
|
||||||
|
|
||||||
|
let file = reqwest::get(&url)
|
||||||
|
.await
|
||||||
|
.with_context(|| format!("Failed to GET solc binary from {url}"))?
|
||||||
|
.bytes()
|
||||||
|
.await
|
||||||
|
.with_context(|| format!("Failed to read solc binary bytes from {url}"))?
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
if hex::encode(Sha256::digest(&file)) != expected_digest {
|
if hex::encode(Sha256::digest(&file)) != expected_digest {
|
||||||
anyhow::bail!("sha256 mismatch for solc version {}", self.version);
|
anyhow::bail!("sha256 mismatch for solc version {}", self.version);
|
||||||
@@ -133,7 +155,7 @@ impl GHDownloader {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::{download::GHDownloader, list::List};
|
use crate::{download::SolcDownloader, list::List};
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn try_get_windows() {
|
async fn try_get_windows() {
|
||||||
@@ -141,7 +163,7 @@ mod tests {
|
|||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.latest_release;
|
.latest_release;
|
||||||
GHDownloader::windows(version)
|
SolcDownloader::windows(version)
|
||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.download()
|
.download()
|
||||||
@@ -155,7 +177,7 @@ mod tests {
|
|||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.latest_release;
|
.latest_release;
|
||||||
GHDownloader::macosx(version)
|
SolcDownloader::macosx(version)
|
||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.download()
|
.download()
|
||||||
@@ -169,7 +191,7 @@ mod tests {
|
|||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.latest_release;
|
.latest_release;
|
||||||
GHDownloader::linux(version)
|
SolcDownloader::linux(version)
|
||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.download()
|
.download()
|
||||||
@@ -180,7 +202,7 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn try_get_wasm() {
|
async fn try_get_wasm() {
|
||||||
let version = List::download(List::WASM_URL).await.unwrap().latest_release;
|
let version = List::download(List::WASM_URL).await.unwrap().latest_release;
|
||||||
GHDownloader::wasm(version)
|
SolcDownloader::wasm(version)
|
||||||
.await
|
.await
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.download()
|
.download()
|
||||||
|
|||||||
@@ -5,10 +5,12 @@
|
|||||||
|
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
|
use anyhow::Context as _;
|
||||||
use cache::get_or_download;
|
use cache::get_or_download;
|
||||||
use download::GHDownloader;
|
use download::SolcDownloader;
|
||||||
|
|
||||||
use revive_dt_common::types::VersionOrRequirement;
|
use revive_dt_common::types::VersionOrRequirement;
|
||||||
|
use semver::Version;
|
||||||
|
|
||||||
pub mod cache;
|
pub mod cache;
|
||||||
pub mod download;
|
pub mod download;
|
||||||
@@ -23,18 +25,19 @@ pub async fn download_solc(
|
|||||||
cache_directory: &Path,
|
cache_directory: &Path,
|
||||||
version: impl Into<VersionOrRequirement>,
|
version: impl Into<VersionOrRequirement>,
|
||||||
wasm: bool,
|
wasm: bool,
|
||||||
) -> anyhow::Result<PathBuf> {
|
) -> anyhow::Result<(Version, PathBuf)> {
|
||||||
let downloader = if wasm {
|
let downloader = if wasm {
|
||||||
GHDownloader::wasm(version).await
|
SolcDownloader::wasm(version).await
|
||||||
} else if cfg!(target_os = "linux") {
|
} else if cfg!(target_os = "linux") {
|
||||||
GHDownloader::linux(version).await
|
SolcDownloader::linux(version).await
|
||||||
} else if cfg!(target_os = "macos") {
|
} else if cfg!(target_os = "macos") {
|
||||||
GHDownloader::macosx(version).await
|
SolcDownloader::macosx(version).await
|
||||||
} else if cfg!(target_os = "windows") {
|
} else if cfg!(target_os = "windows") {
|
||||||
GHDownloader::windows(version).await
|
SolcDownloader::windows(version).await
|
||||||
} else {
|
} else {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}?;
|
}
|
||||||
|
.context("Failed to initialize the Solc Downloader")?;
|
||||||
|
|
||||||
get_or_download(cache_directory, &downloader).await
|
get_or_download(cache_directory, &downloader).await
|
||||||
}
|
}
|
||||||
|
|||||||
Executable
+102
@@ -0,0 +1,102 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Revive Differential Tests - Quick Start Script
|
||||||
|
# This script clones the test repository, sets up the corpus file, and runs the tool
|
||||||
|
|
||||||
|
set -e # Exit on any error
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
TEST_REPO_URL="https://github.com/paritytech/resolc-compiler-tests"
|
||||||
|
TEST_REPO_DIR="resolc-compiler-tests"
|
||||||
|
CORPUS_FILE="./corpus.json"
|
||||||
|
WORKDIR="workdir"
|
||||||
|
|
||||||
|
# Optional positional argument: path to polkadot-sdk directory
|
||||||
|
POLKADOT_SDK_DIR="${1:-}"
|
||||||
|
|
||||||
|
# Binary paths (default to names in $PATH)
|
||||||
|
REVIVE_DEV_NODE_BIN="revive-dev-node"
|
||||||
|
ETH_RPC_BIN="eth-rpc"
|
||||||
|
SUBSTRATE_NODE_BIN="substrate-node"
|
||||||
|
|
||||||
|
echo -e "${GREEN}=== Revive Differential Tests Quick Start ===${NC}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check if test repo already exists
|
||||||
|
if [ -d "$TEST_REPO_DIR" ]; then
|
||||||
|
echo -e "${YELLOW}Test repository already exists. Pulling latest changes...${NC}"
|
||||||
|
cd "$TEST_REPO_DIR"
|
||||||
|
git pull
|
||||||
|
cd ..
|
||||||
|
else
|
||||||
|
echo -e "${GREEN}Cloning test repository...${NC}"
|
||||||
|
git clone "$TEST_REPO_URL"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If polkadot-sdk path is provided, verify and use binaries from there; build if needed
|
||||||
|
if [ -n "$POLKADOT_SDK_DIR" ]; then
|
||||||
|
if [ ! -d "$POLKADOT_SDK_DIR" ]; then
|
||||||
|
echo -e "${RED}Provided polkadot-sdk directory does not exist: $POLKADOT_SDK_DIR${NC}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
POLKADOT_SDK_DIR=$(realpath "$POLKADOT_SDK_DIR")
|
||||||
|
echo -e "${GREEN}Using polkadot-sdk at: $POLKADOT_SDK_DIR${NC}"
|
||||||
|
|
||||||
|
REVIVE_DEV_NODE_BIN="$POLKADOT_SDK_DIR/target/release/revive-dev-node"
|
||||||
|
ETH_RPC_BIN="$POLKADOT_SDK_DIR/target/release/eth-rpc"
|
||||||
|
SUBSTRATE_NODE_BIN="$POLKADOT_SDK_DIR/target/release/substrate-node"
|
||||||
|
|
||||||
|
if [ ! -x "$REVIVE_DEV_NODE_BIN" ] || [ ! -x "$ETH_RPC_BIN" ] || [ ! -x "$SUBSTRATE_NODE_BIN" ]; then
|
||||||
|
echo -e "${YELLOW}Required binaries not found in release target. Building...${NC}"
|
||||||
|
(cd "$POLKADOT_SDK_DIR" && cargo build --release --package staging-node-cli --package pallet-revive-eth-rpc --package revive-dev-node)
|
||||||
|
fi
|
||||||
|
|
||||||
|
for bin in "$REVIVE_DEV_NODE_BIN" "$ETH_RPC_BIN" "$SUBSTRATE_NODE_BIN"; do
|
||||||
|
if [ ! -x "$bin" ]; then
|
||||||
|
echo -e "${RED}Expected binary not found after build: $bin${NC}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}No polkadot-sdk path provided. Using binaries from $PATH.${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create corpus file with absolute path resolved at runtime
|
||||||
|
echo -e "${GREEN}Creating corpus file...${NC}"
|
||||||
|
ABSOLUTE_PATH=$(realpath "$TEST_REPO_DIR/fixtures/solidity/")
|
||||||
|
|
||||||
|
cat > "$CORPUS_FILE" << EOF
|
||||||
|
{
|
||||||
|
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
|
||||||
|
"path": "$ABSOLUTE_PATH"
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo -e "${GREEN}Corpus file created: $CORPUS_FILE${NC}"
|
||||||
|
|
||||||
|
# Create workdir if it doesn't exist
|
||||||
|
mkdir -p "$WORKDIR"
|
||||||
|
|
||||||
|
echo -e "${GREEN}Starting differential tests...${NC}"
|
||||||
|
echo "This may take a while..."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Run the tool
|
||||||
|
RUST_LOG="error" cargo run --release -- execute-tests \
|
||||||
|
--corpus "$CORPUS_FILE" \
|
||||||
|
--working-directory "$WORKDIR" \
|
||||||
|
--concurrency.number-of-nodes 5 \
|
||||||
|
--kitchensink.path "$SUBSTRATE_NODE_BIN" \
|
||||||
|
--revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
|
||||||
|
--eth-rpc.path "$ETH_RPC_BIN" \
|
||||||
|
> logs.log \
|
||||||
|
2> output.log
|
||||||
|
|
||||||
|
echo -e "${GREEN}=== Test run completed! ===${NC}"
|
||||||
+497
@@ -0,0 +1,497 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
||||||
|
"title": "Metadata",
|
||||||
|
"description": "A MatterLabs metadata file.\n\nThis defines the structure that the MatterLabs metadata files follow for defining the tests or\nthe workloads.\n\nEach metadata file is composed of multiple test cases where each test case is isolated from the\nothers and runs in a completely different address space. Each test case is composed of a number\nof steps and assertions that should be performed as part of the test case.",
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"comment": {
|
||||||
|
"description": "This is an optional comment on the metadata file which has no impact on the execution in any\nway.",
|
||||||
|
"type": [
|
||||||
|
"string",
|
||||||
|
"null"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"ignore": {
|
||||||
|
"description": "An optional boolean which defines if the metadata file as a whole should be ignored. If null\nthen the metadata file will not be ignored.",
|
||||||
|
"type": [
|
||||||
|
"boolean",
|
||||||
|
"null"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"targets": {
|
||||||
|
"description": "An optional vector of targets that this Metadata file's cases can be executed on. As an\nexample, if we wish for the metadata file's cases to only be run on PolkaVM then we'd\nspecify a target of \"PolkaVM\" in here.",
|
||||||
|
"type": [
|
||||||
|
"array",
|
||||||
|
"null"
|
||||||
|
],
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"cases": {
|
||||||
|
"description": "A vector of the test cases and workloads contained within the metadata file. This is their\nprimary description.",
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/$defs/Case"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"contracts": {
|
||||||
|
"description": "A map of all of the contracts that the test requires to run.\n\nThis is a map where the key is the name of the contract instance and the value is the\ncontract's path and ident in the file.\n\nIf any contract is to be used by the test then it must be included in here first so that the\nframework is aware of its path, compiles it, and prepares it.",
|
||||||
|
"type": [
|
||||||
|
"object",
|
||||||
|
"null"
|
||||||
|
],
|
||||||
|
"additionalProperties": {
|
||||||
|
"$ref": "#/$defs/ContractPathAndIdent"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"libraries": {
|
||||||
|
"description": "The set of libraries that this metadata file requires.",
|
||||||
|
"type": [
|
||||||
|
"object",
|
||||||
|
"null"
|
||||||
|
],
|
||||||
|
"additionalProperties": {
|
||||||
|
"type": "object",
|
||||||
|
"additionalProperties": {
|
||||||
|
"$ref": "#/$defs/ContractInstance"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"modes": {
|
||||||
|
"description": "This represents a mode that has been parsed from test metadata.\n\nMode strings can take the following form (in pseudo-regex):\n\n```text\n[YEILV][+-]? (M[0123sz])? <semver>?\n```",
|
||||||
|
"type": [
|
||||||
|
"array",
|
||||||
|
"null"
|
||||||
|
],
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/$defs/ParsedMode"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required_evm_version": {
|
||||||
|
"description": "This field specifies an EVM version requirement that the test case has where the test might\nbe run of the evm version of the nodes match the evm version specified here.",
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"$ref": "#/$defs/EvmVersionRequirement"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "null"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"compiler_directives": {
|
||||||
|
"description": "A set of compilation directives that will be passed to the compiler whenever the contracts\nfor the test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`]\nis just a filter for when a test can run whereas this is an instruction to the compiler.",
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"$ref": "#/$defs/CompilationDirectives"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "null"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": [
|
||||||
|
"cases"
|
||||||
|
],
|
||||||
|
"$defs": {
|
||||||
|
"Case": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"description": "An optional name of the test case.",
|
||||||
|
"type": [
|
||||||
|
"string",
|
||||||
|
"null"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"comment": {
|
||||||
|
"description": "An optional comment on the case which has no impact on the execution in any way.",
|
||||||
|
"type": [
|
||||||
|
"string",
|
||||||
|
"null"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"modes": {
|
||||||
|
"description": "This represents a mode that has been parsed from test metadata.\n\nMode strings can take the following form (in pseudo-regex):\n\n```text\n[YEILV][+-]? (M[0123sz])? <semver>?\n```\n\nIf this is provided then it takes higher priority than the modes specified in the metadata\nfile.",
|
||||||
|
"type": [
|
||||||
|
"array",
|
||||||
|
"null"
|
||||||
|
],
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/$defs/ParsedMode"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"inputs": {
|
||||||
|
"description": "The set of steps to run as part of this test case.",
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/$defs/Step"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"group": {
|
||||||
|
"description": "An optional name of the group of tests that this test belongs to.",
|
||||||
|
"type": [
|
||||||
|
"string",
|
||||||
|
"null"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"expected": {
|
||||||
|
"description": "An optional set of expectations and assertions to make about the transaction after it ran.\n\nIf this is not specified then the only assertion that will be ran is that the transaction\nwas successful.\n\nThis expectation that's on the case itself will be attached to the final step of the case.",
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"$ref": "#/$defs/Expected"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "null"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"ignore": {
|
||||||
|
"description": "An optional boolean which defines if the case as a whole should be ignored. If null then the\ncase will not be ignored.",
|
||||||
|
"type": [
|
||||||
|
"boolean",
|
||||||
|
"null"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": [
|
||||||
|
"inputs"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"ParsedMode": {
|
||||||
|
"description": "This represents a mode that has been parsed from test metadata.\n\nMode strings can take the following form (in pseudo-regex):\n\n```text\n[YEILV][+-]? (M[0123sz])? <semver>?\n```\n\nWe can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"Step": {
|
||||||
|
"description": "A test step.\n\nA test step can be anything. It could be an invocation to a function, an assertion, or any other\naction that needs to be run or executed on the nodes used in the tests.",
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"description": "A function call or an invocation to some function on some smart contract.",
|
||||||
|
"$ref": "#/$defs/Input"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "A step for performing a balance assertion on some account or contract.",
|
||||||
|
"$ref": "#/$defs/BalanceAssertion"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "A step for asserting that the storage of some contract or account is empty.",
|
||||||
|
"$ref": "#/$defs/StorageEmptyAssertion"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"Input": {
|
||||||
|
"description": "This is an input step which is a transaction description that the framework translates into a\ntransaction and executes on the nodes.",
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"caller": {
|
||||||
|
"description": "The address of the account performing the call and paying the fees for it.",
|
||||||
|
"type": "string",
|
||||||
|
"default": "0x90f8bf6a479f320ead074411a4b0e7944ea8c9c1"
|
||||||
|
},
|
||||||
|
"comment": {
|
||||||
|
"description": "An optional comment on the step which has no impact on the execution in any way.",
|
||||||
|
"type": [
|
||||||
|
"string",
|
||||||
|
"null"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"instance": {
|
||||||
|
"description": "The contract instance that's being called in this transaction step.",
|
||||||
|
"$ref": "#/$defs/ContractInstance",
|
||||||
|
"default": "Test"
|
||||||
|
},
|
||||||
|
"method": {
|
||||||
|
"description": "The method that's being called in this step.",
|
||||||
|
"$ref": "#/$defs/Method"
|
||||||
|
},
|
||||||
|
"calldata": {
|
||||||
|
"description": "The calldata that the function should be invoked with.",
|
||||||
|
"$ref": "#/$defs/Calldata",
|
||||||
|
"default": []
|
||||||
|
},
|
||||||
|
"expected": {
|
||||||
|
"description": "A set of assertions and expectations to have for the transaction.",
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"$ref": "#/$defs/Expected"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "null"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"value": {
|
||||||
|
"description": "An optional value to provide as part of the transaction.",
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"$ref": "#/$defs/EtherValue"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "null"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"variable_assignments": {
|
||||||
|
"description": "Variable assignment to perform in the framework allowing us to reference them again later on\nduring the execution.",
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"$ref": "#/$defs/VariableAssignments"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "null"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": [
|
||||||
|
"method"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"ContractInstance": {
|
||||||
|
"description": "Represents a contract instance found a metadata file.\n\nTypically, this is used as the key to the \"contracts\" field of metadata files.",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"Method": {
|
||||||
|
"description": "Specify how the contract is called.",
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"description": "Initiate a deploy transaction, calling contracts constructor.\n\nIndicated by `#deployer`.",
|
||||||
|
"type": "string",
|
||||||
|
"const": "#deployer"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Does not calculate and insert a function selector.\n\nIndicated by `#fallback`.",
|
||||||
|
"type": "string",
|
||||||
|
"const": "#fallback"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Call the public function with the given name.",
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"Calldata": {
|
||||||
|
"description": "A type definition for the calldata supported by the testing framework.\n\nWe choose to document all of the types used in [`Calldata`] in this one doc comment to elaborate\non why they exist and consolidate all of the documentation for calldata in a single place where\nit can be viewed and understood.\n\nThe [`Single`] variant of this enum is quite simple and straightforward: it's a hex-encoded byte\narray of the calldata.\n\nThe [`Compound`] type is more intricate and allows for capabilities such as resolution and some\nsimple arithmetic operations. It houses a vector of [`CalldataItem`]s which is just a wrapper\naround an owned string.\n\nA [`CalldataItem`] could be a simple hex string of a single calldata argument, but it could also\nbe something that requires resolution such as `MyContract.address` which is a variable that is\nunderstood by the resolution logic to mean \"Lookup the address of this particular contract\ninstance\".\n\nIn addition to the above, the format supports some simple arithmetic operations like add, sub,\ndivide, multiply, bitwise AND, bitwise OR, and bitwise XOR. Our parser understands the [reverse\npolish notation] simply because it's easy to write a calculator for that notation and since we\ndo not have plans to use arithmetic too often in tests. In reverse polish notation a typical\n`2 + 4` would be written as `2 4 +` which makes this notation very simple to implement through\na stack.\n\nCombining the above, a single [`CalldataItem`] could employ both resolution and arithmetic at\nthe same time. For example, a [`CalldataItem`] of `$BLOCK_NUMBER $BLOCK_NUMBER +` means that\nthe block number should be retrieved and then it should be added to itself.\n\nInternally, we split the [`CalldataItem`] by spaces. Therefore, `$BLOCK_NUMBER $BLOCK_NUMBER+`\nis invalid but `$BLOCK_NUMBER $BLOCK_NUMBER +` is valid and can be understood by the parser and\ncalculator. After the split is done, each token is parsed into a [`CalldataToken<&str>`] forming\nan [`Iterator`] over [`CalldataToken<&str>`]. A [`CalldataToken<&str>`] can then be resolved\ninto a [`CalldataToken<U256>`] through the resolution logic. Finally, after resolution is done,\nthis iterator of [`CalldataToken<U256>`] is collapsed into the final result by applying the\narithmetic operations requested.\n\nFor example, supplying a [`Compound`] calldata of `0xdeadbeef` produces an iterator of a single\n[`CalldataToken<&str>`] items of the value [`CalldataToken::Item`] of the string value 12 which\nwe can then resolve into the appropriate [`U256`] value and convert into calldata.\n\nIn summary, the various types used in [`Calldata`] represent the following:\n- [`CalldataItem`]: A calldata string from the metadata files.\n- [`CalldataToken<&str>`]: Typically used in an iterator of items from the space splitted\n [`CalldataItem`] and represents a token that has not yet been resolved into its value.\n- [`CalldataToken<U256>`]: Represents a token that's been resolved from being a string and into\n the word-size calldata argument on which we can perform arithmetic.\n\n[`Single`]: Calldata::Single\n[`Compound`]: Calldata::Compound\n[reverse polish notation]: https://en.wikipedia.org/wiki/Reverse_Polish_notation",
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/$defs/CalldataItem"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"CalldataItem": {
|
||||||
|
"description": "This represents an item in the [`Calldata::Compound`] variant. Each item will be resolved\naccording to the resolution rules of the tool.",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"Expected": {
|
||||||
|
"description": "A set of expectations and assertions to make about the transaction after it ran.\n\nIf this is not specified then the only assertion that will be ran is that the transaction\nwas successful.",
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"description": "An assertion that the transaction succeeded and returned the provided set of data.",
|
||||||
|
"$ref": "#/$defs/Calldata"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "A more complex assertion.",
|
||||||
|
"$ref": "#/$defs/ExpectedOutput"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "A set of assertions.",
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/$defs/ExpectedOutput"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"ExpectedOutput": {
|
||||||
|
"description": "A set of assertions to run on the transaction.",
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"compiler_version": {
|
||||||
|
"description": "An optional compiler version that's required in order for this assertion to run.",
|
||||||
|
"type": [
|
||||||
|
"string",
|
||||||
|
"null"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"return_data": {
|
||||||
|
"description": "An optional field of the expected returns from the invocation.",
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"$ref": "#/$defs/Calldata"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "null"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"events": {
|
||||||
|
"description": "An optional set of assertions to run on the emitted events from the transaction.",
|
||||||
|
"type": [
|
||||||
|
"array",
|
||||||
|
"null"
|
||||||
|
],
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/$defs/Event"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"exception": {
|
||||||
|
"description": "A boolean which defines whether we expect the transaction to succeed or fail.",
|
||||||
|
"type": "boolean",
|
||||||
|
"default": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Event": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"address": {
|
||||||
|
"description": "An optional field of the address of the emitter of the event.",
|
||||||
|
"type": [
|
||||||
|
"string",
|
||||||
|
"null"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"topics": {
|
||||||
|
"description": "The set of topics to expect the event to have.",
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"values": {
|
||||||
|
"description": "The set of values to expect the event to have.",
|
||||||
|
"$ref": "#/$defs/Calldata"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": [
|
||||||
|
"topics",
|
||||||
|
"values"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"EtherValue": {
|
||||||
|
"description": "Defines an Ether value.\n\nThis is an unsigned 256 bit integer that's followed by some denomination which can either be\neth, ether, gwei, or wei.",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"VariableAssignments": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"return_data": {
|
||||||
|
"description": "A vector of the variable names to assign to the return data.\n\nExample: `UniswapV3PoolAddress`",
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": [
|
||||||
|
"return_data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"BalanceAssertion": {
|
||||||
|
"description": "This represents a balance assertion step where the framework needs to query the balance of some\naccount or contract and assert that it's some amount.",
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"comment": {
|
||||||
|
"description": "An optional comment on the balance assertion.",
|
||||||
|
"type": [
|
||||||
|
"string",
|
||||||
|
"null"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"address": {
|
||||||
|
"description": "The address that the balance assertion should be done on.\n\nThis is a string which will be resolved into an address when being processed. Therefore,\nthis could be a normal hex address, a variable such as `Test.address`, or perhaps even a\nfull on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are\nfollowed in the calldata.",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"expected_balance": {
|
||||||
|
"description": "The amount of balance to assert that the account or contract has. This is a 256 bit string\nthat's serialized and deserialized into a decimal string.",
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": [
|
||||||
|
"address",
|
||||||
|
"expected_balance"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"StorageEmptyAssertion": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"comment": {
|
||||||
|
"description": "An optional comment on the storage empty assertion.",
|
||||||
|
"type": [
|
||||||
|
"string",
|
||||||
|
"null"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"address": {
|
||||||
|
"description": "The address that the balance assertion should be done on.\n\nThis is a string which will be resolved into an address when being processed. Therefore,\nthis could be a normal hex address, a variable such as `Test.address`, or perhaps even a\nfull on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are\nfollowed in the calldata.",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"is_storage_empty": {
|
||||||
|
"description": "A boolean of whether the storage of the address is empty or not.",
|
||||||
|
"type": "boolean"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": [
|
||||||
|
"address",
|
||||||
|
"is_storage_empty"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"ContractPathAndIdent": {
|
||||||
|
"description": "Represents an identifier used for contracts.\n\nThe type supports serialization from and into the following string format:\n\n```text\n${path}:${contract_ident}\n```",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"EvmVersionRequirement": {
|
||||||
|
"description": "An EVM version requirement that the test case has. This gets serialized and deserialized from\nand into [`String`]. This follows a simple format of (>=|<=|=|>|<) followed by a string of the\nEVM version.\n\nWhen specified, the framework will only run the test if the node's EVM version matches that\nrequired by the metadata file.",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"CompilationDirectives": {
|
||||||
|
"description": "A set of compilation directives that will be passed to the compiler whenever the contracts for\nthe test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`] is\njust a filter for when a test can run whereas this is an instruction to the compiler.\nDefines how the compiler should handle revert strings.",
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"revert_string_handling": {
|
||||||
|
"description": "Defines how the revert strings should be handled.",
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"$ref": "#/$defs/RevertString"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "null"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"RevertString": {
|
||||||
|
"description": "Defines how the compiler should handle revert strings.",
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"description": "The default handling of the revert strings.",
|
||||||
|
"type": "string",
|
||||||
|
"const": "default"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "The debug handling of the revert strings.",
|
||||||
|
"type": "string",
|
||||||
|
"const": "debug"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Strip the revert strings.",
|
||||||
|
"type": "string",
|
||||||
|
"const": "strip"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Provide verbose debug strings for the revert string.",
|
||||||
|
"type": "string",
|
||||||
|
"const": "verboseDebug"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user