Compare commits

...

13 Commits

Author SHA1 Message Date
Omar Abdulla 3ff5c20fba Support specifying the path of the polkadot sdk 2025-08-25 23:55:24 +03:00
Omar Abdulla fc7ce468ec Merge remote-tracking branch 'origin/main' into feature/quick-run-script 2025-08-25 23:49:25 +03:00
Omar eb264fcc7b feature/fix abi finding resolc (#154)
* Configure kitchensink to use devnode by default

* Update the kitchensink tests

* Fix the logic for finding the ABI in resolc

* Edit how CLI reporter prints
2025-08-25 20:47:29 +00:00
Omar c887b398b4 Merge branch 'main' into feature/quick-run-script 2025-08-25 20:20:49 +03:00
Omar 84b139d3b4 Configure kitchensink to use devnode by default (#153)
* Configure kitchensink to use devnode by default

* Update the kitchensink tests
2025-08-25 15:46:06 +00:00
Omar Abdulla 028cffaeff Edit the runner script 2025-08-25 18:31:36 +03:00
Omar Abdulla 27f8542342 Update the quick run script 2025-08-25 16:09:30 +03:00
Omar Abdulla 38272a546d Fix the issue with corpus directory canonicalization 2025-08-25 16:08:03 +03:00
Omar Abdulla f2c4eb1c80 Add more context to errors 2025-08-25 15:53:12 +03:00
Omar Abdulla 94d93e87c2 Add a quick run script 2025-08-25 14:32:57 +03:00
Omar d93824d973 Updated Reporting Infrastructure (#151)
* Remove the old reporting infra

* Use the Test struct more in the code

* Implement the initial set of reporter events

* Add more runner events to the reporter and refine the structure

* Add reporting infra for reporting ignored tests

* Update report to use better map data structures

* Add case status information to the report

* Integrate the reporting infrastructure with the
CLI reporter used by the program.

* Include contract compilation information in report

* Cleanup report model

* Add information on the deployed contracts
2025-08-25 11:16:09 +00:00
Omar bec5a7e390 Increase Kitchensink maximum http connections (#148)
* Throttle the Kitchensink requests

* Increase max connections limit for kitchensink
2025-08-20 22:25:17 +00:00
Omar 85033cfead Update the readme (#145) 2025-08-19 17:41:26 +00:00
34 changed files with 2823 additions and 743 deletions
+3 -1
View File
@@ -8,4 +8,6 @@ node_modules
# added to the .gitignore file.
*.log
profile.json.gz
profile.json.gz
resolc-compiler-tests
workdir
Generated
+37 -4
View File
@@ -4588,13 +4588,20 @@ dependencies = [
name = "revive-dt-report"
version = "0.1.0"
dependencies = [
"alloy-primitives",
"anyhow",
"indexmap 2.10.0",
"paste",
"revive-dt-common",
"revive-dt-compiler",
"revive-dt-config",
"revive-dt-format",
"semver 1.0.26",
"serde",
"serde_json",
"serde_with",
"tokio",
"tracing",
]
[[package]]
@@ -4845,6 +4852,30 @@ dependencies = [
"windows-sys 0.59.0",
]
[[package]]
name = "schemars"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f"
dependencies = [
"dyn-clone",
"ref-cast",
"serde",
"serde_json",
]
[[package]]
name = "schemars"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "82d20c4491bc164fa2f6c5d44565947a52ad80b9505d8e36f8d54c27c739fcd0"
dependencies = [
"dyn-clone",
"ref-cast",
"serde",
"serde_json",
]
[[package]]
name = "schnellru"
version = "0.2.4"
@@ -5075,15 +5106,17 @@ dependencies = [
[[package]]
name = "serde_with"
version = "3.12.0"
version = "3.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa"
checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5"
dependencies = [
"base64 0.22.1",
"chrono",
"hex",
"indexmap 1.9.3",
"indexmap 2.10.0",
"schemars 0.9.0",
"schemars 1.0.4",
"serde",
"serde_derive",
"serde_json",
@@ -5093,9 +5126,9 @@ dependencies = [
[[package]]
name = "serde_with_macros"
version = "3.12.0"
version = "3.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e"
checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f"
dependencies = [
"darling",
"proc-macro2",
+2
View File
@@ -34,6 +34,7 @@ futures = { version = "0.3.31" }
hex = "0.4.3"
regex = "1"
moka = "0.12.10"
paste = "1.0.15"
reqwest = { version = "0.12.15", features = ["json"] }
once_cell = "1.21"
semver = { version = "1.0", features = ["serde"] }
@@ -43,6 +44,7 @@ serde_json = { version = "1.0", default-features = false, features = [
"std",
"unbounded_depth",
] }
serde_with = { version = "3.14.0" }
sha2 = { version = "0.10.9" }
sp-core = "36.1.0"
sp-runtime = "41.1.0"
+193 -17
View File
@@ -1,34 +1,210 @@
# revive-differential-tests
<div align="center">
<h1><code>Revive Differential Tests</code></h1>
The revive differential testing framework allows to define smart contract tests in a declarative manner in order to compile and execute them against different Ethereum-compatible blockchain implmentations. This is useful to:
- Analyze observable differences in contract compilation and execution across different blockchain implementations, including contract storage, account balances, transaction output and emitted events on a per-transaction base.
- Collect and compare benchmark metrics such as code size, gas usage or transaction throughput per seconds (TPS) of different blockchain implementations.
- Ensure reproducible contract builds across multiple compiler implementations or multiple host platforms.
- Implement end-to-end regression tests for Ethereum-compatible smart contract stacks.
<p>
<strong>Differential testing for Ethereum-compatible smart contract stacks</strong>
</p>
</div>
# Declarative test format
This project compiles and executes declarative smart-contract tests against multiple platforms, then compares behavior (status, return data, events, and state diffs). Today it supports:
For now, the format used to write tests is the [matter-labs era compiler format](https://github.com/matter-labs/era-compiler-tests?tab=readme-ov-file#matter-labs-simplecomplex-format). This allows us to re-use many tests from their corpora.
- Geth (EVM reference implementation)
- Revive Kitchensink (Substrate-based PolkaVM + `eth-rpc` proxy)
# The `retester` utility
Use it to:
The `retester` helper utilty is used to run the tests. To get an idea of what `retester` can do, please consults its command line help:
- Detect observable differences between platforms (execution success, logs, state changes)
- Ensure reproducible builds across compilers/hosts
- Run end-to-end regression suites
```
cargo run -p revive-dt-core -- --help
This framework uses the [MatterLabs tests format](https://github.com/matter-labs/era-compiler-tests/tree/main/solidity) for declarative tests which is composed of the following:
- Metadata files, this is akin to a module of tests in Rust.
- Each metadata file contains multiple cases, a case is akin to a Rust test where a module can contain multiple tests.
- Each case contains multiple steps and assertions, this is akin to any Rust test that contains multiple statements.
Metadata files are JSON files, but Solidity files can also be metadata files if they include inline metadata provided as a comment at the top of the contract.
All of the steps contained within each test case are either:
- Transactions that need to be submitted and assertions to run on the submitted transactions.
- Assertions on the state of the chain (e.g., account balances, storage, etc...)
All of the transactions submitted by the this tool to the test nodes follow a similar logic to what wallets do. We first use alloy to estimate the transaction fees, then we attach that to the transaction and submit it to the node and then await the transaction receipt.
This repository contains none of the tests and only contains the testing framework or the test runner. The tests can be found in the [`resolc-compiler-tests`](https://github.com/paritytech/resolc-compiler-tests) repository which is a clone of [MatterLab's test suite](https://github.com/matter-labs/era-compiler-tests) with some modifications and adjustments made to suit our use case.
## Requirements
This section describes the required dependencies that this framework requires to run. Compiling this framework is pretty straightforward and no additional dependencies beyond what's specified in the `Cargo.toml` file should be required.
- Stable Rust
- Geth - When doing differential testing against the PVM we submit transactions to a Geth node and to Kitchensink to compare them.
- Kitchensink - When doing differential testing against the PVM we submit transactions to a Geth node and to Kitchensink to compare them.
- ETH-RPC - All communication with Kitchensink is done through the ETH RPC.
- Solc - This is actually a transitive dependency, while this tool doesn't require solc as it downloads the versions that it requires, resolc requires that Solc is installed and available in the path.
- Resolc - This is required to compile the contracts to PolkaVM bytecode.
All of the above need to be installed and available in the path in order for the tool to work.
## Running The Tool
This tool is being updated quite frequently. Therefore, it's recommended that you don't install the tool and then run it, but rather that you run it from the root of the directory using `cargo run --release`. The help command of the tool gives you all of the information you need to know about each of the options and flags that the tool offers.
```bash
$ cargo run --release -- --help
Usage: retester [OPTIONS]
Options:
-s, --solc <SOLC>
The `solc` version to use if the test didn't specify it explicitly
[default: 0.8.29]
--wasm
Use the Wasm compiler versions
-r, --resolc <RESOLC>
The path to the `resolc` executable to be tested.
By default it uses the `resolc` binary found in `$PATH`.
If `--wasm` is set, this should point to the resolc Wasm ile.
[default: resolc]
-c, --corpus <CORPUS>
A list of test corpus JSON files to be tested
-w, --workdir <WORKING_DIRECTORY>
A place to store temporary artifacts during test execution.
Creates a temporary dir if not specified.
-g, --geth <GETH>
The path to the `geth` executable.
By default it uses `geth` binary found in `$PATH`.
[default: geth]
--geth-start-timeout <GETH_START_TIMEOUT>
The maximum time in milliseconds to wait for geth to start
[default: 5000]
--genesis <GENESIS_FILE>
Configure nodes according to this genesis.json file
[default: genesis.json]
-a, --account <ACCOUNT>
The signing account private key
[default: 0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d]
--private-keys-count <PRIVATE_KEYS_TO_ADD>
This argument controls which private keys the nodes should have access to and be added to its wallet signers. With a value of N, private keys (0, N] will be added to the signer set of the node
[default: 100000]
-l, --leader <LEADER>
The differential testing leader node implementation
[default: geth]
Possible values:
- geth: The go-ethereum reference full node EVM implementation
- kitchensink: The kitchensink runtime provides the PolkaVM (PVM) based node implentation
-f, --follower <FOLLOWER>
The differential testing follower node implementation
[default: kitchensink]
Possible values:
- geth: The go-ethereum reference full node EVM implementation
- kitchensink: The kitchensink runtime provides the PolkaVM (PVM) based node implentation
--compile-only <COMPILE_ONLY>
Only compile against this testing platform (doesn't execute the tests)
Possible values:
- geth: The go-ethereum reference full node EVM implementation
- kitchensink: The kitchensink runtime provides the PolkaVM (PVM) based node implentation
--number-of-nodes <NUMBER_OF_NODES>
Determines the amount of nodes that will be spawned for each chain
[default: 1]
--number-of-threads <NUMBER_OF_THREADS>
Determines the amount of tokio worker threads that will will be used
[default: 16]
--number-concurrent-tasks <NUMBER_CONCURRENT_TASKS>
Determines the amount of concurrent tasks that will be spawned to run tests. Defaults to 10 x the number of nodes
-e, --extract-problems
Extract problems back to the test corpus
-k, --kitchensink <KITCHENSINK>
The path to the `kitchensink` executable.
By default it uses `substrate-node` binary found in `$PATH`.
[default: substrate-node]
-p, --eth_proxy <ETH_PROXY>
The path to the `eth_proxy` executable.
By default it uses `eth-rpc` binary found in `$PATH`.
[default: eth-rpc]
-i, --invalidate-compilation-cache
Controls if the compilation cache should be invalidated or not
-h, --help
Print help (see a summary with '-h')
```
For example, to run the [complex Solidity tests](https://github.com/matter-labs/era-compiler-tests/tree/main/solidity/complex), define a corpus structure as follows:
To run tests with this tool you need a corpus JSON file that defines the tests included in the corpus. The simplest corpus file looks like the following:
```json
{
"name": "ML Solidity Complex",
"path": "/path/to/era-compiler-tests/solidity/complex"
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
"path": "resolc-compiler-tests/fixtures/solidity"
}
```
Assuming this to be saved in a `ml-solidity-complex.json` file, the following command will try to compile and execute the tests found inside the corpus:
> [!NOTE]
> Note that the tests can be found in the [`resolc-compiler-tests`](https://github.com/paritytech/resolc-compiler-tests) repository.
The above corpus file instructs the tool to look for all of the test cases contained within all of the metadata files of the specified directory.
The simplest command to run this tool is the following:
```bash
RUST_LOG=debug cargo r --release -p revive-dt-core -- --corpus ml-solidity-complex.json
RUST_LOG="info" cargo run --release -- \
--corpus path_to_your_corpus_file.json \
--workdir path_to_a_temporary_directory_to_cache_things_in \
--number-of-nodes 5 \
> logs.log \
2> output.log
```
The above command will run the tool executing every one of the tests discovered in the path specified in the corpus file. All of the logs from the execution will be persisted in the `logs.log` file and all of the output of the tool will be persisted to the `output.log` file. If all that you're looking for is to run the tool and check which tests succeeded and failed, then the `output.log` file is what you need to be looking at. However, if you're contributing the to the tool then the `logs.log` file will be very valuable.
If you only want to run a subset of tests, then you can specify that in your corpus file. The following is an example:
```json
{
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
"paths": [
"path/to/a/single/metadata/file/I/want/to/run.json",
"path/to/a/directory/to/find/all/metadata/files/within"
]
}
```
+14 -5
View File
@@ -3,19 +3,28 @@ use std::{
path::Path,
};
use anyhow::Result;
use anyhow::{Context, Result};
/// This method clears the passed directory of all of the files and directories contained within
/// without deleting the directory.
pub fn clear_directory(path: impl AsRef<Path>) -> Result<()> {
for entry in read_dir(path.as_ref())? {
let entry = entry?;
for entry in read_dir(path.as_ref())
.with_context(|| format!("Failed to read directory: {}", path.as_ref().display()))?
{
let entry = entry.with_context(|| {
format!(
"Failed to read an entry in directory: {}",
path.as_ref().display()
)
})?;
let entry_path = entry.path();
if entry_path.is_file() {
remove_file(entry_path)?
remove_file(&entry_path)
.with_context(|| format!("Failed to remove file: {}", entry_path.display()))?
} else {
remove_dir_all(entry_path)?
remove_dir_all(&entry_path)
.with_context(|| format!("Failed to remove directory: {}", entry_path.display()))?
}
}
Ok(())
+5 -2
View File
@@ -1,7 +1,7 @@
use std::ops::ControlFlow;
use std::time::Duration;
use anyhow::{Result, anyhow};
use anyhow::{Context as _, Result, anyhow};
const EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION: Duration = Duration::from_secs(60);
@@ -38,7 +38,10 @@ where
));
}
match future().await? {
match future()
.await
.context("Polled future returned an error during polling loop")?
{
ControlFlow::Continue(()) => {
let next_wait_duration = match polling_wait_behavior {
PollingWaitBehavior::Constant(duration) => duration,
@@ -1,12 +1,23 @@
#[macro_export]
macro_rules! impl_for_wrapper {
(Display, $ident: ident) => {
#[automatically_derived]
impl std::fmt::Display for $ident {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(&self.0, f)
}
}
};
(FromStr, $ident: ident) => {
#[automatically_derived]
impl std::str::FromStr for $ident {
type Err = anyhow::Error;
fn from_str(s: &str) -> anyhow::Result<Self> {
s.parse().map(Self).map_err(Into::into)
}
}
};
}
/// Defines wrappers around types.
+5 -3
View File
@@ -13,6 +13,7 @@ use std::{
use alloy::json_abi::JsonAbi;
use alloy_primitives::Address;
use anyhow::Context;
use semver::Version;
use serde::{Deserialize, Serialize};
@@ -136,9 +137,10 @@ where
}
pub fn with_source(mut self, path: impl AsRef<Path>) -> anyhow::Result<Self> {
self.input
.sources
.insert(path.as_ref().to_path_buf(), read_to_string(path.as_ref())?);
self.input.sources.insert(
path.as_ref().to_path_buf(),
read_to_string(path.as_ref()).context("Failed to read the contract source")?,
);
Ok(self)
}
+89 -35
View File
@@ -16,7 +16,6 @@ use revive_solc_json_interface::{
SolcStandardJsonOutput,
};
use super::constants::SOLC_VERSION_SUPPORTING_VIA_YUL_IR;
use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
use alloy::json_abi::JsonAbi;
@@ -120,18 +119,28 @@ impl SolidityCompiler for Resolc {
.join(","),
);
}
let mut child = command.spawn()?;
let mut child = command
.spawn()
.with_context(|| format!("Failed to spawn resolc at {}", self.resolc_path.display()))?;
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
let serialized_input = serde_json::to_vec(&input)?;
stdin_pipe.write_all(&serialized_input).await?;
let serialized_input = serde_json::to_vec(&input)
.context("Failed to serialize Standard JSON input for resolc")?;
stdin_pipe
.write_all(&serialized_input)
.await
.context("Failed to write Standard JSON to resolc stdin")?;
let output = child.wait_with_output().await?;
let output = child
.wait_with_output()
.await
.context("Failed while waiting for resolc process to finish")?;
let stdout = output.stdout;
let stderr = output.stderr;
if !output.status.success() {
let json_in = serde_json::to_string_pretty(&input)?;
let json_in = serde_json::to_string_pretty(&input)
.context("Failed to pretty-print Standard JSON input for logging")?;
let message = String::from_utf8_lossy(&stderr);
tracing::error!(
status = %output.status,
@@ -142,12 +151,14 @@ impl SolidityCompiler for Resolc {
anyhow::bail!("Compilation failed with an error: {message}");
}
let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout).map_err(|e| {
anyhow::anyhow!(
"failed to parse resolc JSON output: {e}\nstderr: {}",
String::from_utf8_lossy(&stderr)
)
})?;
let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout)
.map_err(|e| {
anyhow::anyhow!(
"failed to parse resolc JSON output: {e}\nstderr: {}",
String::from_utf8_lossy(&stderr)
)
})
.context("Failed to parse resolc standard JSON output")?;
tracing::debug!(
output = %serde_json::to_string(&parsed).unwrap(),
@@ -174,7 +185,10 @@ impl SolidityCompiler for Resolc {
let mut compiler_output = CompilerOutput::default();
for (source_path, contracts) in contracts.into_iter() {
let source_path = PathBuf::from(source_path).canonicalize()?;
let src_for_msg = source_path.clone();
let source_path = PathBuf::from(source_path)
.canonicalize()
.with_context(|| format!("Failed to canonicalize path {src_for_msg}"))?;
let map = compiler_output.contracts.entry(source_path).or_default();
for (contract_name, contract_information) in contracts.into_iter() {
@@ -182,23 +196,41 @@ impl SolidityCompiler for Resolc {
.evm
.and_then(|evm| evm.bytecode.clone())
.context("Unexpected - Contract compiled with resolc has no bytecode")?;
let abi = contract_information
.metadata
.as_ref()
.and_then(|metadata| metadata.as_object())
.and_then(|metadata| metadata.get("solc_metadata"))
.and_then(|solc_metadata| solc_metadata.as_str())
.and_then(|metadata| serde_json::from_str::<serde_json::Value>(metadata).ok())
.and_then(|metadata| {
metadata.get("output").and_then(|output| {
output
.get("abi")
.and_then(|abi| serde_json::from_value::<JsonAbi>(abi.clone()).ok())
})
})
.context(
"Unexpected - Failed to get the ABI for a contract compiled with resolc",
)?;
let abi = {
let metadata = contract_information
.metadata
.as_ref()
.context("No metadata found for the contract")?;
let solc_metadata_str = match metadata {
serde_json::Value::String(solc_metadata_str) => solc_metadata_str.as_str(),
serde_json::Value::Object(metadata_object) => {
let solc_metadata_value = metadata_object
.get("solc_metadata")
.context("Contract doesn't have a 'solc_metadata' field")?;
solc_metadata_value
.as_str()
.context("The 'solc_metadata' field is not a string")?
}
serde_json::Value::Null
| serde_json::Value::Bool(_)
| serde_json::Value::Number(_)
| serde_json::Value::Array(_) => {
anyhow::bail!("Unsupported type of metadata {metadata:?}")
}
};
let solc_metadata =
serde_json::from_str::<serde_json::Value>(solc_metadata_str).context(
"Failed to deserialize the solc_metadata as a serde_json generic value",
)?;
let output_value = solc_metadata
.get("output")
.context("solc_metadata doesn't have an output field")?;
let abi_value = output_value
.get("abi")
.context("solc_metadata output doesn't contain an abi field")?;
serde_json::from_value::<JsonAbi>(abi_value.clone())
.context("ABI found in solc_metadata output is not valid ABI")?
};
map.insert(contract_name, (bytecode.object, abi));
}
}
@@ -234,8 +266,20 @@ impl SolidityCompiler for Resolc {
let output = Command::new(self.resolc_path.as_path())
.arg("--version")
.stdout(Stdio::piped())
.spawn()?
.wait_with_output()?
.spawn()
.with_context(|| {
format!(
"Failed to spawn resolc at {} to get version",
self.resolc_path.display()
)
})?
.wait_with_output()
.with_context(|| {
format!(
"Failed waiting for resolc at {} to finish --version",
self.resolc_path.display()
)
})?
.stdout;
let output = String::from_utf8_lossy(&output);
@@ -247,7 +291,9 @@ impl SolidityCompiler for Resolc {
.next()
.context("Version parsing failed")?;
let version = Version::parse(version_string)?;
let version = Version::parse(version_string).with_context(|| {
format!("Failed to parse resolc semver from '{version_string}'")
})?;
vacant_entry.insert(version.clone());
@@ -257,15 +303,23 @@ impl SolidityCompiler for Resolc {
}
fn supports_mode(
compiler_version: &Version,
_compiler_version: &Version,
_optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline,
) -> bool {
// We only support the Y (IE compile via Yul IR) mode here, which also means that we can
// only use solc version 0.8.13 and above. We must always compile via Yul IR as resolc
// needs this to translate to LLVM IR and then RISCV.
// Note: the original implementation of this function looked like the following:
// ```
// pipeline == ModePipeline::ViaYulIR && compiler_version >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR
// ```
// However, that implementation is sadly incorrect since the version that's passed into this
// function is not the version of solc but the version of resolc. This is despite the fact
// that resolc depends on Solc for the initial Yul codegen. Therefore, we have skipped the
// version check until we do a better integrations between resolc and solc.
pipeline == ModePipeline::ViaYulIR
&& compiler_version >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR
}
}
+53 -17
View File
@@ -49,7 +49,11 @@ impl SolidityCompiler for Solc {
}: CompilerInput,
_: Self::Options,
) -> anyhow::Result<CompilerOutput> {
let compiler_supports_via_ir = self.version().await? >= SOLC_VERSION_SUPPORTING_VIA_YUL_IR;
let compiler_supports_via_ir = self
.version()
.await
.context("Failed to query solc version to determine via-ir support")?
>= SOLC_VERSION_SUPPORTING_VIA_YUL_IR;
// Be careful to entirely omit the viaIR field if the compiler does not support it,
// as it will error if you provide fields it does not know about. Because
@@ -134,15 +138,25 @@ impl SolidityCompiler for Solc {
.join(","),
);
}
let mut child = command.spawn()?;
let mut child = command
.spawn()
.with_context(|| format!("Failed to spawn solc at {}", self.solc_path.display()))?;
let stdin = child.stdin.as_mut().expect("should be piped");
let serialized_input = serde_json::to_vec(&input)?;
stdin.write_all(&serialized_input).await?;
let output = child.wait_with_output().await?;
let serialized_input = serde_json::to_vec(&input)
.context("Failed to serialize Standard JSON input for solc")?;
stdin
.write_all(&serialized_input)
.await
.context("Failed to write Standard JSON to solc stdin")?;
let output = child
.wait_with_output()
.await
.context("Failed while waiting for solc process to finish")?;
if !output.status.success() {
let json_in = serde_json::to_string_pretty(&input)?;
let json_in = serde_json::to_string_pretty(&input)
.context("Failed to pretty-print Standard JSON input for logging")?;
let message = String::from_utf8_lossy(&output.stderr);
tracing::error!(
status = %output.status,
@@ -153,12 +167,14 @@ impl SolidityCompiler for Solc {
anyhow::bail!("Compilation failed with an error: {message}");
}
let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout).map_err(|e| {
anyhow::anyhow!(
"failed to parse resolc JSON output: {e}\nstderr: {}",
String::from_utf8_lossy(&output.stdout)
)
})?;
let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout)
.map_err(|e| {
anyhow::anyhow!(
"failed to parse resolc JSON output: {e}\nstderr: {}",
String::from_utf8_lossy(&output.stdout)
)
})
.context("Failed to parse solc standard JSON output")?;
// Detecting if the compiler output contained errors and reporting them through logs and
// errors instead of returning the compiler output that might contain errors.
@@ -178,7 +194,12 @@ impl SolidityCompiler for Solc {
for (contract_path, contracts) in parsed.contracts {
let map = compiler_output
.contracts
.entry(contract_path.canonicalize()?)
.entry(contract_path.canonicalize().with_context(|| {
format!(
"Failed to canonicalize contract path {}",
contract_path.display()
)
})?)
.or_default();
for (contract_name, contract_info) in contracts.into_iter() {
let source_code = contract_info
@@ -207,7 +228,9 @@ impl SolidityCompiler for Solc {
config: &Arguments,
version: impl Into<VersionOrRequirement>,
) -> anyhow::Result<PathBuf> {
let path = download_solc(config.directory(), version, config.wasm).await?;
let path = download_solc(config.directory(), version, config.wasm)
.await
.context("Failed to download/get path to solc binary")?;
Ok(path)
}
@@ -230,8 +253,19 @@ impl SolidityCompiler for Solc {
let child = Command::new(self.solc_path.as_path())
.arg("--version")
.stdout(Stdio::piped())
.spawn()?;
let output = child.wait_with_output()?;
.spawn()
.with_context(|| {
format!(
"Failed to spawn solc at {} to get version",
self.solc_path.display()
)
})?;
let output = child.wait_with_output().with_context(|| {
format!(
"Failed waiting for solc at {} to finish --version",
self.solc_path.display()
)
})?;
let output = String::from_utf8_lossy(&output.stdout);
let version_line = output
.split("Version: ")
@@ -242,7 +276,9 @@ impl SolidityCompiler for Solc {
.next()
.context("Version parsing failed")?;
let version = Version::parse(version_string)?;
let version = Version::parse(version_string).with_context(|| {
format!("Failed to parse solc semver from '{version_string}'")
})?;
vacant_entry.insert(version.clone());
+20
View File
@@ -115,6 +115,18 @@ pub struct Arguments {
#[arg(short, long = "kitchensink", default_value = "substrate-node")]
pub kitchensink: PathBuf,
/// The path to the `revive-dev-node` executable.
///
/// By default it uses `revive-dev-node` binary found in `$PATH`.
#[arg(long = "revive-dev-node", default_value = "revive-dev-node")]
pub revive_dev_node: PathBuf,
/// By default the tool uses the revive-dev-node when it's running differential tests against
/// PolkaVM since the dev-node is much faster than kitchensink. This flag allows the caller to
/// configure the tool to use kitchensink rather than the dev-node.
#[arg(long)]
pub use_kitchensink_not_dev_node: bool,
/// The path to the `eth_proxy` executable.
///
/// By default it uses `eth-rpc` binary found in `$PATH`.
@@ -124,6 +136,14 @@ pub struct Arguments {
/// Controls if the compilation cache should be invalidated or not.
#[arg(short, long)]
pub invalidate_compilation_cache: bool,
/// Controls if the compiler input is included in the final report.
#[clap(long = "report.include-compiler-input")]
pub report_include_compiler_input: bool,
/// Controls if the compiler output is included in the final report.
#[clap(long = "report.include-compiler-output")]
pub report_include_compiler_output: bool,
}
impl Arguments {
+122 -19
View File
@@ -9,12 +9,12 @@ use std::{
use futures::FutureExt;
use revive_dt_common::iterators::FilesWithExtensionIterator;
use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler};
use revive_dt_compiler::{Compiler, CompilerInput, CompilerOutput, Mode, SolidityCompiler};
use revive_dt_config::Arguments;
use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata};
use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address};
use anyhow::{Error, Result};
use anyhow::{Context as _, Error, Result};
use once_cell::sync::Lazy;
use semver::Version;
use serde::{Deserialize, Serialize};
@@ -29,12 +29,16 @@ impl CachedCompiler {
pub async fn new(path: impl AsRef<Path>, invalidate_cache: bool) -> Result<Self> {
let mut cache = ArtifactsCache::new(path);
if invalidate_cache {
cache = cache.with_invalidated_cache().await?;
cache = cache
.with_invalidated_cache()
.await
.context("Failed to invalidate compilation cache directory")?;
}
Ok(Self(cache))
}
/// Compiles or gets the compilation artifacts from the cache.
#[allow(clippy::too_many_arguments)]
#[instrument(
level = "debug",
skip_all,
@@ -52,6 +56,19 @@ impl CachedCompiler {
mode: &Mode,
config: &Arguments,
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
compilation_success_report_callback: impl Fn(
Version,
PathBuf,
bool,
Option<CompilerInput>,
CompilerOutput,
) + Clone,
compilation_failure_report_callback: impl Fn(
Option<Version>,
Option<PathBuf>,
Option<CompilerInput>,
String,
),
) -> Result<(CompilerOutput, Version)> {
static CACHE_KEY_LOCK: Lazy<RwLock<HashMap<CacheKey, Arc<Mutex<()>>>>> =
Lazy::new(Default::default);
@@ -61,10 +78,21 @@ impl CachedCompiler {
config,
compiler_version_or_requirement,
)
.await?;
.await
.inspect_err(|err| compilation_failure_report_callback(None, None, None, err.to_string()))
.context("Failed to obtain compiler executable path")?;
let compiler_version = <P::Compiler as SolidityCompiler>::new(compiler_path.clone())
.version()
.await?;
.await
.inspect_err(|err| {
compilation_failure_report_callback(
None,
Some(compiler_path.clone()),
None,
err.to_string(),
)
})
.context("Failed to query compiler version")?;
let cache_key = CacheKey {
platform_key: P::config_id().to_string(),
@@ -74,13 +102,23 @@ impl CachedCompiler {
};
let compilation_callback = || {
let compiler_path = compiler_path.clone();
let compiler_version = compiler_version.clone();
let compilation_success_report_callback = compilation_success_report_callback.clone();
async move {
compile_contracts::<P>(
metadata.directory()?,
metadata
.directory()
.context("Failed to get metadata directory while preparing compilation")?,
compiler_path,
metadata.files_to_compile()?,
compiler_version,
metadata
.files_to_compile()
.context("Failed to enumerate files to compile from metadata")?,
mode,
deployed_libraries,
compilation_success_report_callback,
compilation_failure_report_callback,
)
.map(|compilation_result| compilation_result.map(CacheValue::new))
.await
@@ -100,7 +138,10 @@ impl CachedCompiler {
Some(_) => {
debug!("Deployed libraries defined, recompilation must take place");
debug!("Cache miss");
compilation_callback().await?.compiler_output
compilation_callback()
.await
.context("Compilation callback for deployed libraries failed")?
.compiler_output
}
// If no deployed libraries are specified then we can follow the cached flow and attempt
// to lookup the compilation artifacts in the cache.
@@ -125,10 +166,24 @@ impl CachedCompiler {
};
let _guard = mutex.lock().await;
self.0
.get_or_insert_with(&cache_key, compilation_callback)
.await
.map(|value| value.compiler_output)?
match self.0.get(&cache_key).await {
Some(cache_value) => {
compilation_success_report_callback(
compiler_version.clone(),
compiler_path,
true,
None,
cache_value.compiler_output.clone(),
);
cache_value.compiler_output
}
None => {
compilation_callback()
.await
.context("Compilation callback failed (cache miss path)")?
.compiler_output
}
}
}
};
@@ -136,19 +191,34 @@ impl CachedCompiler {
}
}
#[allow(clippy::too_many_arguments)]
async fn compile_contracts<P: Platform>(
metadata_directory: impl AsRef<Path>,
compiler_path: impl AsRef<Path>,
compiler_version: Version,
mut files_to_compile: impl Iterator<Item = PathBuf>,
mode: &Mode,
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
compilation_success_report_callback: impl Fn(
Version,
PathBuf,
bool,
Option<CompilerInput>,
CompilerOutput,
),
compilation_failure_report_callback: impl Fn(
Option<Version>,
Option<PathBuf>,
Option<CompilerInput>,
String,
),
) -> Result<CompilerOutput> {
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
.with_allowed_extension("sol")
.with_use_cached_fs(true)
.collect::<Vec<_>>();
Compiler::<P::Compiler>::new()
let compiler = Compiler::<P::Compiler>::new()
.with_allow_path(metadata_directory)
// Handling the modes
.with_optimization(mode.optimize_setting)
@@ -156,6 +226,14 @@ async fn compile_contracts<P: Platform>(
// Adding the contract sources to the compiler.
.try_then(|compiler| {
files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path))
})
.inspect_err(|err| {
compilation_failure_report_callback(
Some(compiler_version.clone()),
Some(compiler_path.as_ref().to_path_buf()),
None,
err.to_string(),
)
})?
// Adding the deployed libraries to the compiler.
.then(|compiler| {
@@ -171,9 +249,29 @@ async fn compile_contracts<P: Platform>(
.fold(compiler, |compiler, (ident, address, path)| {
compiler.with_library(path, ident.as_str(), *address)
})
})
.try_build(compiler_path)
});
let compiler_input = compiler.input();
let compiler_output = compiler
.try_build(compiler_path.as_ref())
.await
.inspect_err(|err| {
compilation_failure_report_callback(
Some(compiler_version.clone()),
Some(compiler_path.as_ref().to_path_buf()),
Some(compiler_input.clone()),
err.to_string(),
)
})
.context("Failed to configure compiler with sources and options")?;
compilation_success_report_callback(
compiler_version,
compiler_path.as_ref().to_path_buf(),
false,
Some(compiler_input),
compiler_output.clone(),
);
Ok(compiler_output)
}
struct ArtifactsCache {
@@ -191,15 +289,20 @@ impl ArtifactsCache {
pub async fn with_invalidated_cache(self) -> Result<Self> {
cacache::clear(self.path.as_path())
.await
.map_err(Into::<Error>::into)?;
.map_err(Into::<Error>::into)
.with_context(|| format!("Failed to clear cache at {}", self.path.display()))?;
Ok(self)
}
#[instrument(level = "debug", skip_all, err)]
pub async fn insert(&self, key: &CacheKey, value: &CacheValue) -> Result<()> {
let key = bson::to_vec(key)?;
let value = bson::to_vec(value)?;
cacache::write(self.path.as_path(), key.encode_hex(), value).await?;
let key = bson::to_vec(key).context("Failed to serialize cache key (bson)")?;
let value = bson::to_vec(value).context("Failed to serialize cache value (bson)")?;
cacache::write(self.path.as_path(), key.encode_hex(), value)
.await
.with_context(|| {
format!("Failed to write cache entry under {}", self.path.display())
})?;
Ok(())
}
+49 -20
View File
@@ -22,6 +22,7 @@ use anyhow::Context;
use futures::TryStreamExt;
use indexmap::IndexMap;
use revive_dt_format::traits::{ResolutionContext, ResolverApi};
use revive_dt_report::ExecutionSpecificReporter;
use semver::Version;
use revive_dt_format::case::Case;
@@ -51,6 +52,9 @@ pub struct CaseState<T: Platform> {
/// Stores the version used for the current case.
compiler_version: Version,
/// The execution reporter.
execution_reporter: ExecutionSpecificReporter,
phantom: PhantomData<T>,
}
@@ -62,12 +66,14 @@ where
compiler_version: Version,
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
execution_reporter: ExecutionSpecificReporter,
) -> Self {
Self {
compiled_contracts,
deployed_contracts,
variables: Default::default(),
compiler_version,
execution_reporter,
phantom: PhantomData,
}
}
@@ -80,18 +86,22 @@ where
) -> anyhow::Result<StepOutput> {
match step {
Step::FunctionCall(input) => {
let (receipt, geth_trace, diff_mode) =
self.handle_input(metadata, input, node).await?;
let (receipt, geth_trace, diff_mode) = self
.handle_input(metadata, input, node)
.await
.context("Failed to handle function call step")?;
Ok(StepOutput::FunctionCall(receipt, geth_trace, diff_mode))
}
Step::BalanceAssertion(balance_assertion) => {
self.handle_balance_assertion(metadata, balance_assertion, node)
.await?;
.await
.context("Failed to handle balance assertion step")?;
Ok(StepOutput::BalanceAssertion)
}
Step::StorageEmptyAssertion(storage_empty) => {
self.handle_storage_empty(metadata, storage_empty, node)
.await?;
.await
.context("Failed to handle storage empty assertion step")?;
Ok(StepOutput::StorageEmptyAssertion)
}
}
@@ -107,18 +117,23 @@ where
) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> {
let deployment_receipts = self
.handle_input_contract_deployment(metadata, input, node)
.await?;
.await
.context("Failed during contract deployment phase of input handling")?;
let execution_receipt = self
.handle_input_execution(input, deployment_receipts, node)
.await?;
.await
.context("Failed during transaction execution phase of input handling")?;
let tracing_result = self
.handle_input_call_frame_tracing(&execution_receipt, node)
.await?;
self.handle_input_variable_assignment(input, &tracing_result)?;
.await
.context("Failed during callframe tracing phase of input handling")?;
self.handle_input_variable_assignment(input, &tracing_result)
.context("Failed to assign variables from callframe output")?;
let (_, (geth_trace, diff_mode)) = try_join!(
self.handle_input_expectations(input, &execution_receipt, node, &tracing_result),
self.handle_input_diff(&execution_receipt, node)
)?;
)
.context("Failed while evaluating expectations and diffs in parallel")?;
Ok((execution_receipt, geth_trace, diff_mode))
}
@@ -130,9 +145,11 @@ where
node: &T::Blockchain,
) -> anyhow::Result<()> {
self.handle_balance_assertion_contract_deployment(metadata, balance_assertion, node)
.await?;
.await
.context("Failed to deploy contract for balance assertion")?;
self.handle_balance_assertion_execution(balance_assertion, node)
.await?;
.await
.context("Failed to execute balance assertion")?;
Ok(())
}
@@ -144,9 +161,11 @@ where
node: &T::Blockchain,
) -> anyhow::Result<()> {
self.handle_storage_empty_assertion_contract_deployment(metadata, storage_empty, node)
.await?;
.await
.context("Failed to deploy contract for storage empty assertion")?;
self.handle_storage_empty_assertion_execution(storage_empty, node)
.await?;
.await
.context("Failed to execute storage empty assertion")?;
Ok(())
}
@@ -185,7 +204,8 @@ where
value,
node,
)
.await?
.await
.context("Failed to get or deploy contract instance during input execution")?
{
receipts.insert(instance.clone(), receipt);
}
@@ -207,7 +227,7 @@ where
// lookup the transaction receipt in this case and continue on.
Method::Deployer => deployment_receipts
.remove(&input.instance)
.context("Failed to find deployment receipt"),
.context("Failed to find deployment receipt for constructor call"),
Method::Fallback | Method::FunctionName(_) => {
let tx = match input
.legacy_transaction(node, self.default_resolution_context())
@@ -379,7 +399,8 @@ where
let actual = &tracing_result.output.as_ref().unwrap_or_default();
if !expected
.is_equivalent(actual, resolver, resolution_context)
.await?
.await
.context("Failed to resolve calldata equivalence for return data assertion")?
{
tracing::error!(
?execution_receipt,
@@ -442,7 +463,8 @@ where
let expected = Calldata::new_compound([expected]);
if !expected
.is_equivalent(&actual.0, resolver, resolution_context)
.await?
.await
.context("Failed to resolve event topic equivalence")?
{
tracing::error!(
event_idx,
@@ -462,7 +484,8 @@ where
let actual = &actual_event.data().data;
if !expected
.is_equivalent(&actual.0, resolver, resolution_context)
.await?
.await
.context("Failed to resolve event value equivalence")?
{
tracing::error!(
event_idx,
@@ -495,8 +518,12 @@ where
let trace = node
.trace_transaction(execution_receipt, trace_options)
.await?;
let diff = node.state_diff(execution_receipt).await?;
.await
.context("Failed to obtain geth prestate tracer output")?;
let diff = node
.state_diff(execution_receipt)
.await
.context("Failed to obtain state diff for transaction")?;
Ok((trace, diff))
}
@@ -718,6 +745,8 @@ where
instance_address = ?address,
"Deployed contract"
);
self.execution_reporter
.report_contract_deployed_event(contract_instance.clone(), address)?;
self.deployed_contracts.insert(
contract_instance.clone(),
+412 -144
View File
@@ -18,8 +18,12 @@ use futures::stream;
use futures::{Stream, StreamExt};
use indexmap::IndexMap;
use revive_dt_node_interaction::EthereumNode;
use revive_dt_report::{
NodeDesignation, ReportAggregator, Reporter, ReporterEvent, TestCaseStatus,
TestSpecificReporter, TestSpecifier,
};
use temp_dir::TempDir;
use tokio::{sync::mpsc, try_join};
use tokio::{join, try_join};
use tracing::{debug, info, info_span, instrument};
use tracing_appender::non_blocking::WorkerGuard;
use tracing_subscriber::{EnvFilter, FmtSubscriber};
@@ -39,7 +43,6 @@ use revive_dt_format::{
mode::ParsedMode,
};
use revive_dt_node::{Node, pool::NodePool};
use revive_dt_report::reporter::{Report, Span};
use crate::cached_compiler::CachedCompiler;
@@ -53,13 +56,11 @@ struct Test<'a> {
mode: Mode,
case_idx: CaseIdx,
case: &'a Case,
reporter: TestSpecificReporter,
}
/// This represents the results that we gather from running test cases.
type CaseResult = Result<usize, anyhow::Error>;
fn main() -> anyhow::Result<()> {
let (args, _guard) = init_cli()?;
let (args, _guard) = init_cli().context("Failed to initialize CLI and tracing subscriber")?;
info!(
leader = args.leader.to_string(),
follower = args.follower.to_string(),
@@ -69,20 +70,42 @@ fn main() -> anyhow::Result<()> {
"Differential testing tool has been initialized"
);
let body = async {
for (corpus, tests) in collect_corpora(&args)? {
let span = Span::new(corpus, args.clone())?;
match &args.compile_only {
Some(platform) => compile_corpus(&args, &tests, platform, span).await,
None => execute_corpus(&args, &tests, span).await?,
let (reporter, report_aggregator_task) = ReportAggregator::new(args.clone()).into_task();
let number_of_threads = args.number_of_threads;
let body = async move {
let tests = collect_corpora(&args)
.context("Failed to collect corpus files from provided arguments")?
.into_iter()
.inspect(|(corpus, _)| {
reporter
.report_corpus_file_discovery_event(corpus.clone())
.expect("Can't fail")
})
.flat_map(|(_, files)| files.into_iter())
.inspect(|metadata_file| {
reporter
.report_metadata_file_discovery_event(
metadata_file.metadata_file_path.clone(),
metadata_file.content.clone(),
)
.expect("Can't fail")
})
.collect::<Vec<_>>();
match &args.compile_only {
Some(platform) => {
compile_corpus(&args, &tests, platform, reporter, report_aggregator_task).await
}
Report::save()?;
None => execute_corpus(&args, &tests, reporter, report_aggregator_task)
.await
.context("Failed to execute corpus")?,
}
Ok(())
};
tokio::runtime::Builder::new_multi_thread()
.worker_threads(args.number_of_threads)
.worker_threads(number_of_threads)
.enable_all()
.build()
.expect("Failed building the Runtime")
@@ -153,7 +176,8 @@ fn collect_corpora(args: &Arguments) -> anyhow::Result<HashMap<Corpus, Vec<Metad
async fn run_driver<L, F>(
args: &Arguments,
metadata_files: &[MetadataFile],
span: Span,
reporter: Reporter,
report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
) -> anyhow::Result<()>
where
L: Platform,
@@ -161,13 +185,14 @@ where
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
{
let (report_tx, report_rx) = mpsc::unbounded_channel::<(Test<'_>, CaseResult)>();
let tests = prepare_tests::<L, F>(args, metadata_files, reporter.clone());
let driver_task = start_driver_task::<L, F>(args, tests)
.await
.context("Failed to start driver task")?;
let cli_reporting_task = start_cli_reporting_task(reporter);
let tests = prepare_tests::<L, F>(args, metadata_files);
let driver_task = start_driver_task::<L, F>(args, tests, span, report_tx).await?;
let status_reporter_task = start_reporter_task(report_rx);
tokio::join!(status_reporter_task, driver_task);
let (_, _, rtn) = tokio::join!(cli_reporting_task, driver_task, report_aggregator_task);
rtn?;
Ok(())
}
@@ -175,6 +200,7 @@ where
fn prepare_tests<'a, L, F>(
args: &Arguments,
metadata_files: &'a [MetadataFile],
reporter: Reporter,
) -> impl Stream<Item = Test<'a>>
where
L: Platform,
@@ -201,27 +227,25 @@ where
.into_iter()
.map(move |mode| (metadata_file, case_idx, case, mode))
})
.fold(
IndexMap::<_, BTreeMap<_, Vec<_>>>::new(),
|mut map, (metadata_file, case_idx, case, mode)| {
let test = Test {
metadata: metadata_file,
metadata_file_path: metadata_file.metadata_file_path.as_path(),
mode: mode.clone(),
case_idx: CaseIdx::new(case_idx),
case,
};
map.entry(mode)
.or_default()
.entry(test.case_idx)
.or_default()
.push(test);
map
},
)
.into_values()
.flatten()
.flat_map(|(_, value)| value.into_iter())
.map(move |(metadata_file, case_idx, case, mode)| Test {
metadata: metadata_file,
metadata_file_path: metadata_file.metadata_file_path.as_path(),
mode: mode.clone(),
case_idx: CaseIdx::new(case_idx),
case,
reporter: reporter.test_specific_reporter(Arc::new(TestSpecifier {
solc_mode: mode.clone(),
metadata_file_path: metadata_file.metadata_file_path.clone(),
case_idx: CaseIdx::new(case_idx),
})),
})
.inspect(|test| {
test.reporter
.report_test_case_discovery_event()
.expect("Can't fail")
})
.collect::<Vec<_>>()
.into_iter()
// Filter the test out if the leader and follower do not support the target.
.filter(|test| {
let leader_support =
@@ -236,7 +260,30 @@ where
leader_support,
follower_support,
"Target is not supported, throwing metadata file out"
)
);
test
.reporter
.report_test_ignored_event(
"Either the leader or the follower do not support the target desired by the test",
IndexMap::from_iter([
(
"test_desired_targets".to_string(),
serde_json::to_value(test.metadata.targets.as_ref())
.expect("Can't fail")
),
(
"leader_support".to_string(),
serde_json::to_value(leader_support)
.expect("Can't fail")
),
(
"follower_support".to_string(),
serde_json::to_value(follower_support)
.expect("Can't fail")
)
])
)
.expect("Can't fail");
}
is_allowed
@@ -248,6 +295,13 @@ where
file_path = %test.metadata.relative_path().display(),
"Metadata file is ignored, throwing case out"
);
test
.reporter
.report_test_ignored_event(
"Metadata file is ignored, therefore all cases are ignored",
IndexMap::new(),
)
.expect("Can't fail");
false
} else {
true
@@ -261,6 +315,13 @@ where
case_idx = %test.case_idx,
"Case is ignored, throwing case out"
);
test
.reporter
.report_test_ignored_event(
"Case is ignored",
IndexMap::new(),
)
.expect("Can't fail");
false
} else {
true
@@ -283,6 +344,29 @@ where
follower_compatibility,
"EVM Version is incompatible, throwing case out"
);
test
.reporter
.report_test_ignored_event(
"EVM version is incompatible with either the leader or the follower",
IndexMap::from_iter([
(
"test_desired_evm_version".to_string(),
serde_json::to_value(test.metadata.required_evm_version)
.expect("Can't fail")
),
(
"leader_compatibility".to_string(),
serde_json::to_value(leader_compatibility)
.expect("Can't fail")
),
(
"follower_compatibility".to_string(),
serde_json::to_value(follower_compatibility)
.expect("Can't fail")
)
])
)
.expect("Can't fail");
}
is_allowed
@@ -311,6 +395,24 @@ where
follower_support,
"Compilers do not support this, throwing case out"
);
test
.reporter
.report_test_ignored_event(
"Compilers do not support this mode either for the leader or for the follower.",
IndexMap::from_iter([
(
"leader_support".to_string(),
serde_json::to_value(leader_support)
.expect("Can't fail")
),
(
"follower_support".to_string(),
serde_json::to_value(follower_support)
.expect("Can't fail")
)
])
)
.expect("Can't fail");
}
is_allowed.then_some(test)
@@ -322,9 +424,13 @@ async fn does_compiler_support_mode<P: Platform>(
mode: &Mode,
) -> anyhow::Result<bool> {
let compiler_version_or_requirement = mode.compiler_version_to_use(args.solc.clone());
let compiler_path =
P::Compiler::get_compiler_executable(args, compiler_version_or_requirement).await?;
let compiler_version = P::Compiler::new(compiler_path.clone()).version().await?;
let compiler_path = P::Compiler::get_compiler_executable(args, compiler_version_or_requirement)
.await
.context("Failed to obtain compiler executable path")?;
let compiler_version = P::Compiler::new(compiler_path.clone())
.version()
.await
.context("Failed to query compiler version")?;
Ok(P::Compiler::supports_mode(
&compiler_version,
@@ -336,8 +442,6 @@ async fn does_compiler_support_mode<P: Platform>(
async fn start_driver_task<'a, L, F>(
args: &Arguments,
tests: impl Stream<Item = Test<'a>>,
span: Span,
report_tx: mpsc::UnboundedSender<(Test<'a>, CaseResult)>,
) -> anyhow::Result<impl Future<Output = ()>>
where
L: Platform,
@@ -345,15 +449,22 @@ where
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
{
let leader_nodes = Arc::new(NodePool::<L::Blockchain>::new(args)?);
let follower_nodes = Arc::new(NodePool::<F::Blockchain>::new(args)?);
info!("Starting driver task");
let leader_nodes = Arc::new(
NodePool::<L::Blockchain>::new(args).context("Failed to initialize leader node pool")?,
);
let follower_nodes = Arc::new(
NodePool::<F::Blockchain>::new(args).context("Failed to initialize follower node pool")?,
);
let number_concurrent_tasks = args.number_of_concurrent_tasks();
let cached_compiler = Arc::new(
CachedCompiler::new(
args.directory().join("compilation_cache"),
args.invalidate_compilation_cache,
)
.await?,
.await
.context("Failed to initialize cached compiler")?,
);
Ok(tests.for_each_concurrent(
@@ -368,103 +479,129 @@ where
move |test| {
let leader_nodes = leader_nodes.clone();
let follower_nodes = follower_nodes.clone();
let report_tx = report_tx.clone();
let cached_compiler = cached_compiler.clone();
async move {
let leader_node = leader_nodes.round_robbin();
let follower_node = follower_nodes.round_robbin();
test.reporter
.report_leader_node_assigned_event(
leader_node.id(),
L::config_id(),
leader_node.connection_string(),
)
.expect("Can't fail");
test.reporter
.report_follower_node_assigned_event(
follower_node.id(),
F::config_id(),
follower_node.connection_string(),
)
.expect("Can't fail");
let reporter = test.reporter.clone();
let result = handle_case_driver::<L, F>(
test.metadata_file_path,
test.metadata,
test.case_idx,
test.case,
test.mode.clone(),
test,
args,
cached_compiler,
leader_node,
follower_node,
span,
)
.await;
report_tx
.send((test, result))
.expect("Failed to send report");
match result {
Ok(steps_executed) => reporter
.report_test_succeeded_event(steps_executed)
.expect("Can't fail"),
Err(error) => reporter
.report_test_failed_event(error.to_string())
.expect("Can't fail"),
}
}
},
))
}
async fn start_reporter_task(mut report_rx: mpsc::UnboundedReceiver<(Test<'_>, CaseResult)>) {
#[allow(clippy::uninlined_format_args)]
#[allow(irrefutable_let_patterns)]
async fn start_cli_reporting_task(reporter: Reporter) {
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
drop(reporter);
let start = Instant::now();
const GREEN: &str = "\x1B[32m";
const RED: &str = "\x1B[31m";
const COLOUR_RESET: &str = "\x1B[0m";
const GREY: &str = "\x1B[90m";
const COLOR_RESET: &str = "\x1B[0m";
const BOLD: &str = "\x1B[1m";
const BOLD_RESET: &str = "\x1B[22m";
let mut number_of_successes = 0;
let mut number_of_failures = 0;
let mut failures = vec![];
// Wait for reports to come from our test runner. When the channel closes, this ends.
let mut buf = BufWriter::new(stderr());
while let Some((test, case_result)) = report_rx.recv().await {
let case_name = test.case.name.as_deref().unwrap_or("unnamed_case");
let case_idx = test.case_idx;
let test_path = test.metadata_file_path.display();
let test_mode = test.mode.clone();
while let Ok(event) = aggregator_events_rx.recv().await {
let ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
metadata_file_path,
mode,
case_status,
} = event
else {
continue;
};
match case_result {
Ok(_inputs) => {
number_of_successes += 1;
let _ = writeln!(
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
for (case_idx, case_status) in case_status.into_iter() {
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
let _ = match case_status {
TestCaseStatus::Succeeded { steps_executed } => {
number_of_successes += 1;
writeln!(
buf,
"{}{}Case Succeeded{} - Steps Executed: {}{}",
GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET
)
}
TestCaseStatus::Failed { reason } => {
number_of_failures += 1;
writeln!(
buf,
"{}{}Case Failed{} - Reason: {}{}",
RED,
BOLD,
BOLD_RESET,
reason.trim(),
COLOR_RESET,
)
}
TestCaseStatus::Ignored { reason, .. } => writeln!(
buf,
"{GREEN}Case Succeeded:{COLOUR_RESET} {test_path} -> {case_name}:{case_idx} (mode: {test_mode})"
);
}
Err(err) => {
number_of_failures += 1;
let _ = writeln!(
buf,
"{RED}Case Failed:{COLOUR_RESET} {test_path} -> {case_name}:{case_idx} (mode: {test_mode})"
);
failures.push((test, err));
}
}
}
let _ = writeln!(buf,);
let elapsed = start.elapsed();
// Now, log the failures with more complete errors at the bottom, like `cargo test` does, so
// that we don't have to scroll through the entire output to find them.
if !failures.is_empty() {
let _ = writeln!(buf, "{BOLD}Failures:{BOLD_RESET}\n");
for failure in failures {
let (test, err) = failure;
let case_name = test.case.name.as_deref().unwrap_or("unnamed_case");
let case_idx = test.case_idx;
let test_path = test.metadata_file_path.display();
let test_mode = test.mode.clone();
let _ = writeln!(
buf,
"---- {RED}Case Failed:{COLOUR_RESET} {test_path} -> {case_name}:{case_idx} (mode: {test_mode}) ----\n\n{err}\n"
);
"{}{}Case Ignored{} - Reason: {}{}",
GREY,
BOLD,
BOLD_RESET,
reason.trim(),
COLOR_RESET,
),
};
}
let _ = writeln!(buf);
}
// Summary at the end.
let _ = writeln!(
buf,
"{} cases: {GREEN}{number_of_successes}{COLOUR_RESET} cases succeeded, {RED}{number_of_failures}{COLOUR_RESET} cases failed in {} seconds",
"{} cases: {}{}{} cases succeeded, {}{}{} cases failed in {} seconds",
number_of_successes + number_of_failures,
elapsed.as_secs()
GREEN,
number_of_successes,
COLOR_RESET,
RED,
number_of_failures,
COLOR_RESET,
start.elapsed().as_secs()
);
}
@@ -474,25 +611,20 @@ async fn start_reporter_task(mut report_rx: mpsc::UnboundedReceiver<(Test<'_>, C
name = "Handling Case"
skip_all,
fields(
metadata_file_path = %metadata.relative_path().display(),
mode = %mode,
%case_idx,
case_name = case.name.as_deref().unwrap_or("Unnamed Case"),
metadata_file_path = %test.metadata.relative_path().display(),
mode = %test.mode,
case_idx = %test.case_idx,
case_name = test.case.name.as_deref().unwrap_or("Unnamed Case"),
leader_node = leader_node.id(),
follower_node = follower_node.id(),
)
)]
async fn handle_case_driver<L, F>(
metadata_file_path: &Path,
metadata: &MetadataFile,
case_idx: CaseIdx,
case: &Case,
mode: Mode,
test: Test<'_>,
config: &Arguments,
cached_compiler: Arc<CachedCompiler>,
leader_node: &L::Blockchain,
follower_node: &F::Blockchain,
_: Span,
) -> anyhow::Result<usize>
where
L: Platform,
@@ -500,6 +632,13 @@ where
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
{
let leader_reporter = test
.reporter
.execution_specific_reporter(leader_node.id(), NodeDesignation::Leader);
let follower_reporter = test
.reporter
.execution_specific_reporter(follower_node.id(), NodeDesignation::Follower);
let (
(
CompilerOutput {
@@ -514,14 +653,73 @@ where
_,
),
) = try_join!(
cached_compiler.compile_contracts::<L>(metadata, metadata_file_path, &mode, config, None),
cached_compiler.compile_contracts::<F>(metadata, metadata_file_path, &mode, config, None)
)?;
cached_compiler.compile_contracts::<L>(
test.metadata,
test.metadata_file_path,
&test.mode,
config,
None,
|compiler_version, compiler_path, is_cached, compiler_input, compiler_output| {
leader_reporter
.report_pre_link_contracts_compilation_succeeded_event(
compiler_version,
compiler_path,
is_cached,
compiler_input,
compiler_output,
)
.expect("Can't fail")
},
|compiler_version, compiler_path, compiler_input, failure_reason| {
leader_reporter
.report_pre_link_contracts_compilation_failed_event(
compiler_version,
compiler_path,
compiler_input,
failure_reason,
)
.expect("Can't fail")
}
),
cached_compiler.compile_contracts::<F>(
test.metadata,
test.metadata_file_path,
&test.mode,
config,
None,
|compiler_version, compiler_path, is_cached, compiler_input, compiler_output| {
follower_reporter
.report_pre_link_contracts_compilation_succeeded_event(
compiler_version,
compiler_path,
is_cached,
compiler_input,
compiler_output,
)
.expect("Can't fail")
},
|compiler_version, compiler_path, compiler_input, failure_reason| {
follower_reporter
.report_pre_link_contracts_compilation_failed_event(
compiler_version,
compiler_path,
compiler_input,
failure_reason,
)
.expect("Can't fail")
}
)
)
.context("Failed to compile pre-link contracts for leader/follower in parallel")?;
let mut leader_deployed_libraries = None::<HashMap<_, _>>;
let mut follower_deployed_libraries = None::<HashMap<_, _>>;
let mut contract_sources = metadata.contract_sources()?;
for library_instance in metadata
let mut contract_sources = test
.metadata
.contract_sources()
.context("Failed to retrieve contract sources from metadata")?;
for library_instance in test
.metadata
.libraries
.iter()
.flatten()
@@ -561,7 +759,8 @@ where
// Getting the deployer address from the cases themselves. This is to ensure that we're
// doing the deployments from different accounts and therefore we're not slowed down by
// the nonce.
let deployer_address = case
let deployer_address = test
.case
.steps
.iter()
.filter_map(|step| match step {
@@ -620,6 +819,24 @@ where
),
);
}
if let Some(ref leader_deployed_libraries) = leader_deployed_libraries {
leader_reporter.report_libraries_deployed_event(
leader_deployed_libraries
.clone()
.into_iter()
.map(|(key, (_, address, _))| (key, address))
.collect::<BTreeMap<_, _>>(),
)?;
}
if let Some(ref follower_deployed_libraries) = follower_deployed_libraries {
follower_reporter.report_libraries_deployed_event(
follower_deployed_libraries
.clone()
.into_iter()
.map(|(key, (_, address, _))| (key, address))
.collect::<BTreeMap<_, _>>(),
)?;
}
let (
(
@@ -636,35 +853,80 @@ where
),
) = try_join!(
cached_compiler.compile_contracts::<L>(
metadata,
metadata_file_path,
&mode,
test.metadata,
test.metadata_file_path,
&test.mode,
config,
leader_deployed_libraries.as_ref()
leader_deployed_libraries.as_ref(),
|compiler_version, compiler_path, is_cached, compiler_input, compiler_output| {
leader_reporter
.report_post_link_contracts_compilation_succeeded_event(
compiler_version,
compiler_path,
is_cached,
compiler_input,
compiler_output,
)
.expect("Can't fail")
},
|compiler_version, compiler_path, compiler_input, failure_reason| {
leader_reporter
.report_post_link_contracts_compilation_failed_event(
compiler_version,
compiler_path,
compiler_input,
failure_reason,
)
.expect("Can't fail")
}
),
cached_compiler.compile_contracts::<F>(
metadata,
metadata_file_path,
&mode,
test.metadata,
test.metadata_file_path,
&test.mode,
config,
follower_deployed_libraries.as_ref()
follower_deployed_libraries.as_ref(),
|compiler_version, compiler_path, is_cached, compiler_input, compiler_output| {
follower_reporter
.report_post_link_contracts_compilation_succeeded_event(
compiler_version,
compiler_path,
is_cached,
compiler_input,
compiler_output,
)
.expect("Can't fail")
},
|compiler_version, compiler_path, compiler_input, failure_reason| {
follower_reporter
.report_post_link_contracts_compilation_failed_event(
compiler_version,
compiler_path,
compiler_input,
failure_reason,
)
.expect("Can't fail")
}
)
)?;
)
.context("Failed to compile post-link contracts for leader/follower in parallel")?;
let leader_state = CaseState::<L>::new(
leader_compiler_version,
leader_post_link_contracts,
leader_deployed_libraries.unwrap_or_default(),
leader_reporter,
);
let follower_state = CaseState::<F>::new(
follower_compiler_version,
follower_post_link_contracts,
follower_deployed_libraries.unwrap_or_default(),
follower_reporter,
);
let mut driver = CaseDriver::<L, F>::new(
metadata,
case,
test.metadata,
test.case,
leader_node,
follower_node,
leader_state,
@@ -679,14 +941,15 @@ where
async fn execute_corpus(
args: &Arguments,
tests: &[MetadataFile],
span: Span,
reporter: Reporter,
report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
) -> anyhow::Result<()> {
match (&args.leader, &args.follower) {
(TestingPlatform::Geth, TestingPlatform::Kitchensink) => {
run_driver::<Geth, Kitchensink>(args, tests, span).await?
run_driver::<Geth, Kitchensink>(args, tests, reporter, report_aggregator_task).await?
}
(TestingPlatform::Geth, TestingPlatform::Geth) => {
run_driver::<Geth, Geth>(args, tests, span).await?
run_driver::<Geth, Geth>(args, tests, reporter, report_aggregator_task).await?
}
_ => unimplemented!(),
}
@@ -698,7 +961,8 @@ async fn compile_corpus(
config: &Arguments,
tests: &[MetadataFile],
platform: &TestingPlatform,
_: Span,
_: Reporter,
report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
) {
let tests = tests.iter().flat_map(|metadata| {
metadata
@@ -713,8 +977,8 @@ async fn compile_corpus(
.map(Arc::new)
.expect("Failed to create the cached compiler");
futures::stream::iter(tests)
.for_each_concurrent(None, |(metadata, mode)| {
let compilation_task =
futures::stream::iter(tests).for_each_concurrent(None, |(metadata, mode)| {
let cached_compiler = cached_compiler.clone();
async move {
@@ -727,6 +991,8 @@ async fn compile_corpus(
&mode,
config,
None,
|_, _, _, _, _| {},
|_, _, _, _| {},
)
.await;
}
@@ -738,11 +1004,13 @@ async fn compile_corpus(
&mode,
config,
None,
|_, _, _, _, _| {},
|_, _, _, _| {},
)
.await;
}
}
}
})
.await;
});
let _ = join!(compilation_task, report_aggregator_task);
}
+3 -2
View File
@@ -71,6 +71,7 @@ impl Case {
define_wrapper_type!(
/// A wrapper type for the index of test cases found in metadata file.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct CaseIdx(usize) impl Display;
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[serde(transparent)]
pub struct CaseIdx(usize) impl Display, FromStr;
);
+17 -15
View File
@@ -8,6 +8,7 @@ use serde::{Deserialize, Serialize};
use tracing::{debug, info};
use crate::metadata::{Metadata, MetadataFile};
use anyhow::Context as _;
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[serde(untagged)]
@@ -20,23 +21,24 @@ impl Corpus {
pub fn try_from_path(file_path: impl AsRef<Path>) -> anyhow::Result<Self> {
let mut corpus = File::open(file_path.as_ref())
.map_err(anyhow::Error::from)
.and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into))?;
.and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into))
.with_context(|| {
format!(
"Failed to open and deserialize corpus file at {}",
file_path.as_ref().display()
)
})?;
let corpus_directory = file_path
.as_ref()
.canonicalize()
.context("Failed to canonicalize the path to the corpus file")?
.parent()
.context("Corpus file has no parent")?
.to_path_buf();
for path in corpus.paths_iter_mut() {
*path = file_path
.as_ref()
.parent()
.ok_or_else(|| {
anyhow::anyhow!("Corpus path '{}' does not point to a file", path.display())
})?
.canonicalize()
.map_err(|error| {
anyhow::anyhow!(
"Failed to canonicalize path to corpus '{}': {error}",
path.display()
)
})?
.join(path.as_path())
*path = corpus_directory.join(path.as_path())
}
Ok(corpus)
+38 -14
View File
@@ -268,7 +268,11 @@ impl Input {
) -> anyhow::Result<Bytes> {
match self.method {
Method::Deployer | Method::Fallback => {
let calldata = self.calldata.calldata(resolver, context).await?;
let calldata = self
.calldata
.calldata(resolver, context)
.await
.context("Failed to produce calldata for deployer/fallback method")?;
Ok(calldata.into())
}
@@ -283,14 +287,15 @@ impl Input {
// Overloads are handled by providing the full function signature in the "function
// name".
// https://github.com/matter-labs/era-compiler-tester/blob/1dfa7d07cba0734ca97e24704f12dd57f6990c2c/compiler_tester/src/test/case/input/mod.rs#L158-L190
let selector = if function_name.contains('(') && function_name.contains(')') {
Function::parse(function_name)
let selector =
if function_name.contains('(') && function_name.contains(')') {
Function::parse(function_name)
.context(
"Failed to parse the provided function name into a function signature",
)?
.selector()
} else {
abi.functions()
} else {
abi.functions()
.find(|function| function.signature().starts_with(function_name))
.ok_or_else(|| {
anyhow::anyhow!(
@@ -298,9 +303,13 @@ impl Input {
function_name,
&self.instance
)
})?
})
.with_context(|| format!(
"Failed to resolve function selector for {:?} on instance {:?}",
function_name, &self.instance
))?
.selector()
};
};
// Allocating a vector that we will be using for the calldata. The vector size will be:
// 4 bytes for the function selector.
@@ -312,7 +321,8 @@ impl Input {
calldata.extend(selector.0);
self.calldata
.calldata_into_slice(&mut calldata, resolver, context)
.await?;
.await
.context("Failed to append encoded argument to calldata buffer")?;
Ok(calldata.into())
}
@@ -325,7 +335,10 @@ impl Input {
resolver: &impl ResolverApi,
context: ResolutionContext<'_>,
) -> anyhow::Result<TransactionRequest> {
let input_data = self.encoded_input(resolver, context).await?;
let input_data = self
.encoded_input(resolver, context)
.await
.context("Failed to encode input bytes for transaction request")?;
let transaction_request = TransactionRequest::default().from(self.caller).value(
self.value
.map(|value| value.into_inner())
@@ -437,7 +450,8 @@ impl Calldata {
})
.buffered(0xFF)
.try_collect::<Vec<_>>()
.await?;
.await
.context("Failed to resolve one or more calldata arguments")?;
buffer.extend(resolved.into_iter().flatten());
}
@@ -478,7 +492,10 @@ impl Calldata {
std::borrow::Cow::Borrowed(other)
};
let this = this.resolve(resolver, context).await?;
let this = this
.resolve(resolver, context)
.await
.context("Failed to resolve calldata item during equivalence check")?;
let other = U256::from_be_slice(&other);
Ok(this == other)
})
@@ -664,17 +681,24 @@ impl<T: AsRef<str>> CalldataToken<T> {
let current_block_number = match context.tip_block_number() {
Some(block_number) => *block_number,
None => resolver.last_block_number().await?,
None => resolver.last_block_number().await.context(
"Failed to query last block number while resolving $BLOCK_HASH",
)?,
};
let desired_block_number = current_block_number.saturating_sub(offset);
let block_hash = resolver.block_hash(desired_block_number.into()).await?;
let block_hash = resolver
.block_hash(desired_block_number.into())
.await
.context("Failed to resolve block hash for desired block number")?;
Ok(U256::from_be_bytes(block_hash.0))
} else if item == Self::BLOCK_NUMBER_VARIABLE {
let current_block_number = match context.tip_block_number() {
Some(block_number) => *block_number,
None => resolver.last_block_number().await?,
None => resolver.last_block_number().await.context(
"Failed to query last block number while resolving $BLOCK_NUMBER",
)?,
};
Ok(U256::from(current_block_number))
} else if item == Self::BLOCK_TIMESTAMP_VARIABLE {
+9 -1
View File
@@ -132,7 +132,15 @@ impl Metadata {
) in contracts
{
let alias = alias.clone();
let absolute_path = directory.join(contract_source_path).canonicalize()?;
let absolute_path = directory
.join(contract_source_path)
.canonicalize()
.map_err(|error| {
anyhow::anyhow!(
"Failed to canonicalize contract source path '{}': {error}",
directory.join(contract_source_path).display()
)
})?;
let contract_ident = contract_ident.clone();
sources.insert(
+19 -5
View File
@@ -1,3 +1,4 @@
use anyhow::Context;
use regex::Regex;
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
use serde::{Deserialize, Serialize};
@@ -44,21 +45,34 @@ impl FromStr for ParsedMode {
};
let pipeline = match caps.name("pipeline") {
Some(m) => Some(ModePipeline::from_str(m.as_str())?),
Some(m) => Some(
ModePipeline::from_str(m.as_str())
.context("Failed to parse mode pipeline from string")?,
),
None => None,
};
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
let optimize_setting = match caps.name("optimize_setting") {
Some(m) => Some(ModeOptimizerSetting::from_str(m.as_str())?),
Some(m) => Some(
ModeOptimizerSetting::from_str(m.as_str())
.context("Failed to parse optimizer setting from string")?,
),
None => None,
};
let version = match caps.name("version") {
Some(m) => Some(semver::VersionReq::parse(m.as_str()).map_err(|e| {
anyhow::anyhow!("Cannot parse the version requirement '{}': {e}", m.as_str())
})?),
Some(m) => Some(
semver::VersionReq::parse(m.as_str())
.map_err(|e| {
anyhow::anyhow!(
"Cannot parse the version requirement '{}': {e}",
m.as_str()
)
})
.context("Failed to parse semver requirement from mode string")?,
),
None => None,
};
+105 -46
View File
@@ -101,10 +101,13 @@ impl GethNode {
let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory);
create_dir_all(&self.base_directory)?;
create_dir_all(&self.logs_directory)?;
create_dir_all(&self.base_directory)
.context("Failed to create base directory for geth node")?;
create_dir_all(&self.logs_directory)
.context("Failed to create logs directory for geth node")?;
let mut genesis = serde_json::from_str::<Genesis>(&genesis)?;
let mut genesis = serde_json::from_str::<Genesis>(&genesis)
.context("Failed to deserialize geth genesis JSON")?;
for signer_address in
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
{
@@ -116,7 +119,11 @@ impl GethNode {
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
}
let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE);
serde_json::to_writer(File::create(&genesis_path)?, &genesis)?;
serde_json::to_writer(
File::create(&genesis_path).context("Failed to create geth genesis file")?,
&genesis,
)
.context("Failed to serialize geth genesis JSON to file")?;
let mut child = Command::new(&self.geth)
.arg("--state.scheme")
@@ -127,16 +134,22 @@ impl GethNode {
.arg(genesis_path)
.stderr(Stdio::piped())
.stdout(Stdio::null())
.spawn()?;
.spawn()
.context("Failed to spawn geth --init process")?;
let mut stderr = String::new();
child
.stderr
.take()
.expect("should be piped")
.read_to_string(&mut stderr)?;
.read_to_string(&mut stderr)
.context("Failed to read geth --init stderr")?;
if !child.wait()?.success() {
if !child
.wait()
.context("Failed waiting for geth --init process to finish")?
.success()
{
anyhow::bail!("failed to initialize geth node #{:?}: {stderr}", &self.id);
}
@@ -161,8 +174,11 @@ impl GethNode {
let stdout_logs_file = open_options
.clone()
.open(self.geth_stdout_log_file_path())?;
let stderr_logs_file = open_options.open(self.geth_stderr_log_file_path())?;
.open(self.geth_stdout_log_file_path())
.context("Failed to open geth stdout logs file")?;
let stderr_logs_file = open_options
.open(self.geth_stderr_log_file_path())
.context("Failed to open geth stderr logs file")?;
self.handle = Command::new(&self.geth)
.arg("--dev")
.arg("--datadir")
@@ -182,14 +198,24 @@ impl GethNode {
.arg("full")
.arg("--gcmode")
.arg("archive")
.stderr(stderr_logs_file.try_clone()?)
.stdout(stdout_logs_file.try_clone()?)
.spawn()?
.stderr(
stderr_logs_file
.try_clone()
.context("Failed to clone geth stderr log file handle")?,
)
.stdout(
stdout_logs_file
.try_clone()
.context("Failed to clone geth stdout log file handle")?,
)
.spawn()
.context("Failed to spawn geth node process")?
.into();
if let Err(error) = self.wait_ready() {
tracing::error!(?error, "Failed to start geth, shutting down gracefully");
self.shutdown()?;
self.shutdown()
.context("Failed to gracefully shutdown after geth start error")?;
return Err(error);
}
@@ -211,7 +237,8 @@ impl GethNode {
.write(false)
.append(false)
.truncate(false)
.open(self.geth_stderr_log_file_path())?;
.open(self.geth_stderr_log_file_path())
.context("Failed to open geth stderr logs file for readiness check")?;
let maximum_wait_time = Duration::from_millis(self.start_timeout);
let mut stderr = BufReader::new(logs_file).lines();
@@ -277,11 +304,18 @@ impl EthereumNode for GethNode {
&self,
transaction: TransactionRequest,
) -> anyhow::Result<alloy::rpc::types::TransactionReceipt> {
let provider = self.provider().await?;
let provider = self
.provider()
.await
.context("Failed to create provider for transaction submission")?;
let pending_transaction = provider.send_transaction(transaction).await.inspect_err(
|err| tracing::error!(%err, "Encountered an error when submitting the transaction"),
)?;
let pending_transaction = provider
.send_transaction(transaction)
.await
.inspect_err(
|err| tracing::error!(%err, "Encountered an error when submitting the transaction"),
)
.context("Failed to submit transaction to geth node")?;
let transaction_hash = *pending_transaction.tx_hash();
// The following is a fix for the "transaction indexing is in progress" error that we used
@@ -335,7 +369,11 @@ impl EthereumNode for GethNode {
transaction: &TransactionReceipt,
trace_options: GethDebugTracingOptions,
) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> {
let provider = Arc::new(self.provider().await?);
let provider = Arc::new(
self.provider()
.await
.context("Failed to create provider for tracing")?,
);
poll(
Self::TRACE_POLLING_DURATION,
PollingWaitBehavior::Constant(Duration::from_millis(200)),
@@ -371,8 +409,10 @@ impl EthereumNode for GethNode {
});
match self
.trace_transaction(transaction, trace_options)
.await?
.try_into_pre_state_frame()?
.await
.context("Failed to trace transaction for prestate diff")?
.try_into_pre_state_frame()
.context("Failed to convert trace into pre-state frame")?
{
PreStateFrame::Diff(diff) => Ok(diff),
_ => anyhow::bail!("expected a diff mode trace"),
@@ -382,7 +422,8 @@ impl EthereumNode for GethNode {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn balance_of(&self, address: Address) -> anyhow::Result<U256> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_balance(address)
.await
.map_err(Into::into)
@@ -395,7 +436,8 @@ impl EthereumNode for GethNode {
keys: Vec<StorageKey>,
) -> anyhow::Result<EIP1186AccountProofResponse> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_proof(address, keys)
.latest()
.await
@@ -407,7 +449,8 @@ impl ResolverApi for GethNode {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn chain_id(&self) -> anyhow::Result<alloy::primitives::ChainId> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_chain_id()
.await
.map_err(Into::into)
@@ -416,7 +459,8 @@ impl ResolverApi for GethNode {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result<u128> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_transaction_receipt(*tx_hash)
.await?
.context("Failed to get the transaction receipt")
@@ -426,40 +470,48 @@ impl ResolverApi for GethNode {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result<u128> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the geth block")?
.context("Failed to get the Geth block, perhaps there are no blocks?")
.map(|block| block.header.gas_limit as _)
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result<Address> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the geth block")?
.context("Failed to get the Geth block, perhaps there are no blocks?")
.map(|block| block.header.beneficiary)
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result<U256> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the geth block")?
.context("Failed to get the Geth block, perhaps there are no blocks?")
.map(|block| U256::from_be_bytes(block.header.mix_hash.0))
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result<u64> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the geth block")?
.context("Failed to get the Geth block, perhaps there are no blocks?")
.and_then(|block| {
block
.header
@@ -471,27 +523,32 @@ impl ResolverApi for GethNode {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockHash> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the geth block")?
.context("Failed to get the Geth block, perhaps there are no blocks?")
.map(|block| block.header.hash)
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the geth block")?
.context("Failed to get the Geth block, perhaps there are no blocks?")
.map(|block| block.header.timestamp)
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn last_block_number(&self) -> anyhow::Result<BlockNumber> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_block_number()
.await
.map_err(Into::into)
@@ -576,8 +633,10 @@ impl Node for GethNode {
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn()?
.wait_with_output()?
.spawn()
.context("Failed to spawn geth --version process")?
.wait_with_output()
.context("Failed to wait for geth --version output")?
.stdout;
Ok(String::from_utf8_lossy(&output).into())
}
+138 -58
View File
@@ -54,6 +54,7 @@ static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
pub struct KitchensinkNode {
id: u32,
substrate_binary: PathBuf,
dev_node_binary: PathBuf,
eth_proxy_binary: PathBuf,
rpc_url: String,
base_directory: PathBuf,
@@ -63,6 +64,7 @@ pub struct KitchensinkNode {
wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager,
chain_id_filler: ChainIdFiller,
use_kitchensink_not_dev_node: bool,
/// This vector stores [`File`] objects that we use for logging which we want to flush when the
/// node object is dropped. We do not store them in a structured fashion at the moment (in
/// separate fields) as the logic that we need to apply to them is all the same regardless of
@@ -94,18 +96,30 @@ impl KitchensinkNode {
let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory);
create_dir_all(&self.base_directory)?;
create_dir_all(&self.logs_directory)?;
create_dir_all(&self.base_directory)
.context("Failed to create base directory for kitchensink node")?;
create_dir_all(&self.logs_directory)
.context("Failed to create logs directory for kitchensink node")?;
let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE);
// Note: we do not pipe the logs of this process to a separate file since this is just a
// once-off export of the default chain spec and not part of the long-running node process.
let output = Command::new(&self.substrate_binary)
.arg("export-chain-spec")
.arg("--chain")
.arg("dev")
.output()?;
let output = if self.use_kitchensink_not_dev_node {
Command::new(&self.substrate_binary)
.arg("export-chain-spec")
.arg("--chain")
.arg("dev")
.output()
.context("Failed to export the chain-spec")?
} else {
Command::new(&self.dev_node_binary)
.arg("build-spec")
.arg("--chain")
.arg("dev")
.output()
.context("Failed to export the chain-spec")?
};
if !output.status.success() {
anyhow::bail!(
@@ -114,8 +128,10 @@ impl KitchensinkNode {
);
}
let content = String::from_utf8(output.stdout)?;
let mut chainspec_json: JsonValue = serde_json::from_str(&content)?;
let content = String::from_utf8(output.stdout)
.context("Failed to decode substrate export-chain-spec output as UTF-8")?;
let mut chainspec_json: JsonValue =
serde_json::from_str(&content).context("Failed to parse substrate chain spec JSON")?;
let existing_chainspec_balances =
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"]
@@ -137,7 +153,8 @@ impl KitchensinkNode {
})
.collect();
let mut eth_balances = {
let mut genesis = serde_json::from_str::<Genesis>(genesis)?;
let mut genesis = serde_json::from_str::<Genesis>(genesis)
.context("Failed to deserialize EVM genesis JSON for kitchensink")?;
for signer_address in
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
{
@@ -148,7 +165,8 @@ impl KitchensinkNode {
.entry(signer_address)
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
}
self.extract_balance_from_genesis_file(&genesis)?
self.extract_balance_from_genesis_file(&genesis)
.context("Failed to extract balances from EVM genesis JSON")?
};
merged_balances.append(&mut eth_balances);
@@ -156,9 +174,11 @@ impl KitchensinkNode {
json!(merged_balances);
serde_json::to_writer_pretty(
std::fs::File::create(&template_chainspec_path)?,
std::fs::File::create(&template_chainspec_path)
.context("Failed to create kitchensink template chainspec file")?,
&chainspec_json,
)?;
)
.context("Failed to write kitchensink template chainspec JSON")?;
Ok(self)
}
@@ -184,11 +204,18 @@ impl KitchensinkNode {
// Start Substrate node
let kitchensink_stdout_logs_file = open_options
.clone()
.open(self.kitchensink_stdout_log_file_path())?;
.open(self.kitchensink_stdout_log_file_path())
.context("Failed to open kitchensink stdout logs file")?;
let kitchensink_stderr_logs_file = open_options
.clone()
.open(self.kitchensink_stderr_log_file_path())?;
self.process_substrate = Command::new(&self.substrate_binary)
.open(self.kitchensink_stderr_log_file_path())
.context("Failed to open kitchensink stderr logs file")?;
let node_binary_path = if self.use_kitchensink_not_dev_node {
self.substrate_binary.as_path()
} else {
self.dev_node_binary.as_path()
};
self.process_substrate = Command::new(node_binary_path)
.arg("--dev")
.arg("--chain")
.arg(chainspec_path)
@@ -203,10 +230,21 @@ impl KitchensinkNode {
.arg("Unsafe")
.arg("--rpc-cors")
.arg("all")
.arg("--rpc-max-connections")
.arg(u32::MAX.to_string())
.env("RUST_LOG", Self::SUBSTRATE_LOG_ENV)
.stdout(kitchensink_stdout_logs_file.try_clone()?)
.stderr(kitchensink_stderr_logs_file.try_clone()?)
.spawn()?
.stdout(
kitchensink_stdout_logs_file
.try_clone()
.context("Failed to clone kitchensink stdout log file handle")?,
)
.stderr(
kitchensink_stderr_logs_file
.try_clone()
.context("Failed to clone kitchensink stderr log file handle")?,
)
.spawn()
.context("Failed to spawn substrate node process")?
.into();
// Give the node a moment to boot
@@ -215,24 +253,39 @@ impl KitchensinkNode {
Self::SUBSTRATE_READY_MARKER,
Duration::from_secs(60),
) {
self.shutdown()?;
self.shutdown()
.context("Failed to gracefully shutdown after substrate start error")?;
return Err(error);
};
let eth_proxy_stdout_logs_file = open_options
.clone()
.open(self.proxy_stdout_log_file_path())?;
let eth_proxy_stderr_logs_file = open_options.open(self.proxy_stderr_log_file_path())?;
.open(self.proxy_stdout_log_file_path())
.context("Failed to open eth-proxy stdout logs file")?;
let eth_proxy_stderr_logs_file = open_options
.open(self.proxy_stderr_log_file_path())
.context("Failed to open eth-proxy stderr logs file")?;
self.process_proxy = Command::new(&self.eth_proxy_binary)
.arg("--dev")
.arg("--rpc-port")
.arg(proxy_rpc_port.to_string())
.arg("--node-rpc-url")
.arg(format!("ws://127.0.0.1:{substrate_rpc_port}"))
.arg("--rpc-max-connections")
.arg(u32::MAX.to_string())
.env("RUST_LOG", Self::PROXY_LOG_ENV)
.stdout(eth_proxy_stdout_logs_file.try_clone()?)
.stderr(eth_proxy_stderr_logs_file.try_clone()?)
.spawn()?
.stdout(
eth_proxy_stdout_logs_file
.try_clone()
.context("Failed to clone eth-proxy stdout log file handle")?,
)
.stderr(
eth_proxy_stderr_logs_file
.try_clone()
.context("Failed to clone eth-proxy stderr log file handle")?,
)
.spawn()
.context("Failed to spawn eth-proxy process")?
.into();
if let Err(error) = Self::wait_ready(
@@ -240,7 +293,8 @@ impl KitchensinkNode {
Self::ETH_PROXY_READY_MARKER,
Duration::from_secs(60),
) {
self.shutdown()?;
self.shutdown()
.context("Failed to gracefully shutdown after eth-proxy start error")?;
return Err(error);
};
@@ -365,11 +419,14 @@ impl EthereumNode for KitchensinkNode {
) -> anyhow::Result<TransactionReceipt> {
let receipt = self
.provider()
.await?
.await
.context("Failed to create provider for transaction submission")?
.send_transaction(transaction)
.await?
.await
.context("Failed to submit transaction to kitchensink proxy")?
.get_receipt()
.await?;
.await
.context("Failed to fetch transaction receipt from kitchensink proxy")?;
Ok(receipt)
}
@@ -379,11 +436,12 @@ impl EthereumNode for KitchensinkNode {
trace_options: GethDebugTracingOptions,
) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> {
let tx_hash = transaction.transaction_hash;
Ok(self
.provider()
.await?
self.provider()
.await
.context("Failed to create provider for debug tracing")?
.debug_trace_transaction(tx_hash, trace_options)
.await?)
.await
.context("Failed to obtain debug trace from kitchensink proxy")
}
async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result<DiffMode> {
@@ -404,7 +462,8 @@ impl EthereumNode for KitchensinkNode {
async fn balance_of(&self, address: Address) -> anyhow::Result<U256> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_balance(address)
.await
.map_err(Into::into)
@@ -416,7 +475,8 @@ impl EthereumNode for KitchensinkNode {
keys: Vec<StorageKey>,
) -> anyhow::Result<EIP1186AccountProofResponse> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_proof(address, keys)
.latest()
.await
@@ -427,7 +487,8 @@ impl EthereumNode for KitchensinkNode {
impl ResolverApi for KitchensinkNode {
async fn chain_id(&self) -> anyhow::Result<alloy::primitives::ChainId> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_chain_id()
.await
.map_err(Into::into)
@@ -435,7 +496,8 @@ impl ResolverApi for KitchensinkNode {
async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result<u128> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_transaction_receipt(*tx_hash)
.await?
.context("Failed to get the transaction receipt")
@@ -444,37 +506,45 @@ impl ResolverApi for KitchensinkNode {
async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result<u128> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the kitchensink block")?
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
.map(|block| block.header.gas_limit as _)
}
async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result<Address> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the kitchensink block")?
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
.map(|block| block.header.beneficiary)
}
async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result<U256> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the kitchensink block")?
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
.map(|block| U256::from_be_bytes(block.header.mix_hash.0))
}
async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result<u64> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the kitchensink block")?
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
.and_then(|block| {
block
.header
@@ -485,25 +555,30 @@ impl ResolverApi for KitchensinkNode {
async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockHash> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the kitchensink block")?
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
.map(|block| block.header.hash)
}
async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the kitchensink block")?
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
.map(|block| block.header.timestamp)
}
async fn last_block_number(&self) -> anyhow::Result<BlockNumber> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_block_number()
.await
.map_err(Into::into)
@@ -529,6 +604,7 @@ impl Node for KitchensinkNode {
Self {
id,
substrate_binary: config.kitchensink.clone(),
dev_node_binary: config.revive_dev_node.clone(),
eth_proxy_binary: config.eth_proxy.clone(),
rpc_url: String::new(),
base_directory,
@@ -538,6 +614,7 @@ impl Node for KitchensinkNode {
wallet: Arc::new(wallet),
chain_id_filler: Default::default(),
nonce_manager: Default::default(),
use_kitchensink_not_dev_node: config.use_kitchensink_not_dev_node,
// We know that we only need to be storing 4 files so we can specify that when creating
// the vector. It's the stdout and stderr of the substrate-node and the eth-rpc.
logs_file_to_flush: Vec::with_capacity(4),
@@ -588,8 +665,10 @@ impl Node for KitchensinkNode {
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn()?
.wait_with_output()?
.spawn()
.context("Failed to spawn kitchensink --version")?
.wait_with_output()
.context("Failed to wait for kitchensink --version")?
.stdout;
Ok(String::from_utf8_lossy(&output).into())
}
@@ -1055,6 +1134,7 @@ mod tests {
Arguments {
kitchensink: PathBuf::from("substrate-node"),
eth_proxy: PathBuf::from("eth-rpc"),
use_kitchensink_not_dev_node: true,
..Default::default()
}
}
+17 -3
View File
@@ -9,6 +9,7 @@ use revive_dt_common::cached_fs::read_to_string;
use anyhow::Context;
use revive_dt_config::Arguments;
use tracing::info;
use crate::Node;
@@ -43,8 +44,10 @@ where
nodes.push(
handle
.join()
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))?
.map_err(|error| anyhow::anyhow!("node failed to spawn: {error}"))?,
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))
.context("Failed to join node spawn thread")?
.map_err(|error| anyhow::anyhow!("node failed to spawn: {error}"))
.context("Node failed to spawn")?,
);
}
@@ -63,6 +66,17 @@ where
fn spawn_node<T: Node + Send>(args: &Arguments, genesis: String) -> anyhow::Result<T> {
let mut node = T::new(args);
node.spawn(genesis)?;
info!(
id = node.id(),
connection_string = node.connection_string(),
"Spawning node"
);
node.spawn(genesis)
.context("Failed to spawn node process")?;
info!(
id = node.id(),
connection_string = node.connection_string(),
"Spawned node"
);
Ok(node)
}
+7
View File
@@ -13,9 +13,16 @@ revive-dt-config = { workspace = true }
revive-dt-format = { workspace = true }
revive-dt-compiler = { workspace = true }
alloy-primitives = { workspace = true }
anyhow = { workspace = true }
paste = { workspace = true }
indexmap = { workspace = true, features = ["serde"] }
semver = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_with = { workspace = true }
tokio = { workspace = true }
tracing = { workspace = true }
[lints]
workspace = true
+561
View File
@@ -0,0 +1,561 @@
//! Implementation of the report aggregator task which consumes the events sent by the various
//! reporters and combines them into a single unified report.
use std::{
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
fs::OpenOptions,
path::PathBuf,
time::{SystemTime, UNIX_EPOCH},
};
use alloy_primitives::Address;
use anyhow::{Context as _, Result};
use indexmap::IndexMap;
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
use revive_dt_config::{Arguments, TestingPlatform};
use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance};
use semver::Version;
use serde::Serialize;
use serde_with::{DisplayFromStr, serde_as};
use tokio::sync::{
broadcast::{Sender, channel},
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
};
use tracing::debug;
use crate::*;
pub struct ReportAggregator {
/* Internal Report State */
report: Report,
remaining_cases: HashMap<MetadataFilePath, HashMap<Mode, HashSet<CaseIdx>>>,
/* Channels */
runner_tx: Option<UnboundedSender<RunnerEvent>>,
runner_rx: UnboundedReceiver<RunnerEvent>,
listener_tx: Sender<ReporterEvent>,
}
impl ReportAggregator {
pub fn new(config: Arguments) -> Self {
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
let (listener_tx, _) = channel::<ReporterEvent>(1024);
Self {
report: Report::new(config),
remaining_cases: Default::default(),
runner_tx: Some(runner_tx),
runner_rx,
listener_tx,
}
}
pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<()>>) {
let reporter = self
.runner_tx
.take()
.map(Into::into)
.expect("Can't fail since this can only be called once");
(reporter, async move { self.aggregate().await })
}
async fn aggregate(mut self) -> Result<()> {
debug!("Starting to aggregate report");
while let Some(event) = self.runner_rx.recv().await {
debug!(?event, "Received Event");
match event {
RunnerEvent::SubscribeToEvents(event) => {
self.handle_subscribe_to_events_event(*event);
}
RunnerEvent::CorpusFileDiscovery(event) => {
self.handle_corpus_file_discovered_event(*event)
}
RunnerEvent::MetadataFileDiscovery(event) => {
self.handle_metadata_file_discovery_event(*event);
}
RunnerEvent::TestCaseDiscovery(event) => {
self.handle_test_case_discovery(*event);
}
RunnerEvent::TestSucceeded(event) => {
self.handle_test_succeeded_event(*event);
}
RunnerEvent::TestFailed(event) => {
self.handle_test_failed_event(*event);
}
RunnerEvent::TestIgnored(event) => {
self.handle_test_ignored_event(*event);
}
RunnerEvent::LeaderNodeAssigned(event) => {
self.handle_leader_node_assigned_event(*event);
}
RunnerEvent::FollowerNodeAssigned(event) => {
self.handle_follower_node_assigned_event(*event);
}
RunnerEvent::PreLinkContractsCompilationSucceeded(event) => {
self.handle_pre_link_contracts_compilation_succeeded_event(*event)
}
RunnerEvent::PostLinkContractsCompilationSucceeded(event) => {
self.handle_post_link_contracts_compilation_succeeded_event(*event)
}
RunnerEvent::PreLinkContractsCompilationFailed(event) => {
self.handle_pre_link_contracts_compilation_failed_event(*event)
}
RunnerEvent::PostLinkContractsCompilationFailed(event) => {
self.handle_post_link_contracts_compilation_failed_event(*event)
}
RunnerEvent::LibrariesDeployed(event) => {
self.handle_libraries_deployed_event(*event);
}
RunnerEvent::ContractDeployed(event) => {
self.handle_contract_deployed_event(*event);
}
}
}
debug!("Report aggregation completed");
let file_name = {
let current_timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.context("System clock is before UNIX_EPOCH; cannot compute report timestamp")?
.as_secs();
let mut file_name = current_timestamp.to_string();
file_name.push_str(".json");
file_name
};
let file_path = self.report.config.directory().join(file_name);
let file = OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.read(false)
.open(&file_path)
.with_context(|| {
format!(
"Failed to open report file for writing: {}",
file_path.display()
)
})?;
serde_json::to_writer_pretty(&file, &self.report).with_context(|| {
format!("Failed to serialize report JSON to {}", file_path.display())
})?;
Ok(())
}
fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) {
let _ = event.tx.send(self.listener_tx.subscribe());
}
fn handle_corpus_file_discovered_event(&mut self, event: CorpusFileDiscoveryEvent) {
self.report.corpora.push(event.corpus);
}
fn handle_metadata_file_discovery_event(&mut self, event: MetadataFileDiscoveryEvent) {
self.report.metadata_files.insert(event.path.clone());
}
fn handle_test_case_discovery(&mut self, event: TestCaseDiscoveryEvent) {
self.remaining_cases
.entry(event.test_specifier.metadata_file_path.clone().into())
.or_default()
.entry(event.test_specifier.solc_mode.clone())
.or_default()
.insert(event.test_specifier.case_idx);
}
fn handle_test_succeeded_event(&mut self, event: TestSucceededEvent) {
// Remove this from the set of cases we're tracking since it has completed.
self.remaining_cases
.entry(event.test_specifier.metadata_file_path.clone().into())
.or_default()
.entry(event.test_specifier.solc_mode.clone())
.or_default()
.remove(&event.test_specifier.case_idx);
// Add information on the fact that the case was ignored to the report.
let test_case_report = self.test_case_report(&event.test_specifier);
test_case_report.status = Some(TestCaseStatus::Succeeded {
steps_executed: event.steps_executed,
});
self.handle_post_test_case_status_update(&event.test_specifier);
}
fn handle_test_failed_event(&mut self, event: TestFailedEvent) {
// Remove this from the set of cases we're tracking since it has completed.
self.remaining_cases
.entry(event.test_specifier.metadata_file_path.clone().into())
.or_default()
.entry(event.test_specifier.solc_mode.clone())
.or_default()
.remove(&event.test_specifier.case_idx);
// Add information on the fact that the case was ignored to the report.
let test_case_report = self.test_case_report(&event.test_specifier);
test_case_report.status = Some(TestCaseStatus::Failed {
reason: event.reason,
});
self.handle_post_test_case_status_update(&event.test_specifier);
}
fn handle_test_ignored_event(&mut self, event: TestIgnoredEvent) {
// Remove this from the set of cases we're tracking since it has completed.
self.remaining_cases
.entry(event.test_specifier.metadata_file_path.clone().into())
.or_default()
.entry(event.test_specifier.solc_mode.clone())
.or_default()
.remove(&event.test_specifier.case_idx);
// Add information on the fact that the case was ignored to the report.
let test_case_report = self.test_case_report(&event.test_specifier);
test_case_report.status = Some(TestCaseStatus::Ignored {
reason: event.reason,
additional_fields: event.additional_fields,
});
self.handle_post_test_case_status_update(&event.test_specifier);
}
fn handle_post_test_case_status_update(&mut self, specifier: &TestSpecifier) {
let remaining_cases = self
.remaining_cases
.entry(specifier.metadata_file_path.clone().into())
.or_default()
.entry(specifier.solc_mode.clone())
.or_default();
if !remaining_cases.is_empty() {
return;
}
let case_status = self
.report
.test_case_information
.entry(specifier.metadata_file_path.clone().into())
.or_default()
.entry(specifier.solc_mode.clone())
.or_default()
.iter()
.map(|(case_idx, case_report)| {
(
*case_idx,
case_report.status.clone().expect("Can't be uninitialized"),
)
})
.collect::<BTreeMap<_, _>>();
let event = ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
metadata_file_path: specifier.metadata_file_path.clone().into(),
mode: specifier.solc_mode.clone(),
case_status,
};
// According to the documentation on send, the sending fails if there are no more receiver
// handles. Therefore, this isn't an error that we want to bubble up or anything. If we fail
// to send then we ignore the error.
let _ = self.listener_tx.send(event);
}
fn handle_leader_node_assigned_event(&mut self, event: LeaderNodeAssignedEvent) {
let execution_information = self.execution_information(&ExecutionSpecifier {
test_specifier: event.test_specifier,
node_id: event.id,
node_designation: NodeDesignation::Leader,
});
execution_information.node = Some(TestCaseNodeInformation {
id: event.id,
platform: event.platform,
connection_string: event.connection_string,
});
}
fn handle_follower_node_assigned_event(&mut self, event: FollowerNodeAssignedEvent) {
let execution_information = self.execution_information(&ExecutionSpecifier {
test_specifier: event.test_specifier,
node_id: event.id,
node_designation: NodeDesignation::Follower,
});
execution_information.node = Some(TestCaseNodeInformation {
id: event.id,
platform: event.platform,
connection_string: event.connection_string,
});
}
fn handle_pre_link_contracts_compilation_succeeded_event(
&mut self,
event: PreLinkContractsCompilationSucceededEvent,
) {
let include_input = self.report.config.report_include_compiler_input;
let include_output = self.report.config.report_include_compiler_output;
let execution_information = self.execution_information(&event.execution_specifier);
let compiler_input = if include_input {
event.compiler_input
} else {
None
};
let compiler_output = if include_output {
Some(event.compiler_output)
} else {
None
};
execution_information.pre_link_compilation_status = Some(CompilationStatus::Success {
is_cached: event.is_cached,
compiler_version: event.compiler_version,
compiler_path: event.compiler_path,
compiler_input,
compiler_output,
});
}
fn handle_post_link_contracts_compilation_succeeded_event(
&mut self,
event: PostLinkContractsCompilationSucceededEvent,
) {
let include_input = self.report.config.report_include_compiler_input;
let include_output = self.report.config.report_include_compiler_output;
let execution_information = self.execution_information(&event.execution_specifier);
let compiler_input = if include_input {
event.compiler_input
} else {
None
};
let compiler_output = if include_output {
Some(event.compiler_output)
} else {
None
};
execution_information.post_link_compilation_status = Some(CompilationStatus::Success {
is_cached: event.is_cached,
compiler_version: event.compiler_version,
compiler_path: event.compiler_path,
compiler_input,
compiler_output,
});
}
fn handle_pre_link_contracts_compilation_failed_event(
&mut self,
event: PreLinkContractsCompilationFailedEvent,
) {
let include_input = self.report.config.report_include_compiler_input;
let execution_information = self.execution_information(&event.execution_specifier);
let compiler_input = if include_input {
event.compiler_input
} else {
None
};
execution_information.pre_link_compilation_status = Some(CompilationStatus::Failure {
reason: event.reason,
compiler_version: event.compiler_version,
compiler_path: event.compiler_path,
compiler_input,
});
}
fn handle_post_link_contracts_compilation_failed_event(
&mut self,
event: PostLinkContractsCompilationFailedEvent,
) {
let include_input = self.report.config.report_include_compiler_input;
let execution_information = self.execution_information(&event.execution_specifier);
let compiler_input = if include_input {
event.compiler_input
} else {
None
};
execution_information.post_link_compilation_status = Some(CompilationStatus::Failure {
reason: event.reason,
compiler_version: event.compiler_version,
compiler_path: event.compiler_path,
compiler_input,
});
}
fn handle_libraries_deployed_event(&mut self, event: LibrariesDeployedEvent) {
self.execution_information(&event.execution_specifier)
.deployed_libraries = Some(event.libraries);
}
fn handle_contract_deployed_event(&mut self, event: ContractDeployedEvent) {
self.execution_information(&event.execution_specifier)
.deployed_contracts
.get_or_insert_default()
.insert(event.contract_instance, event.address);
}
fn test_case_report(&mut self, specifier: &TestSpecifier) -> &mut TestCaseReport {
self.report
.test_case_information
.entry(specifier.metadata_file_path.clone().into())
.or_default()
.entry(specifier.solc_mode.clone())
.or_default()
.entry(specifier.case_idx)
.or_default()
}
fn execution_information(
&mut self,
specifier: &ExecutionSpecifier,
) -> &mut ExecutionInformation {
let test_case_report = self.test_case_report(&specifier.test_specifier);
match specifier.node_designation {
NodeDesignation::Leader => test_case_report
.leader_execution_information
.get_or_insert_default(),
NodeDesignation::Follower => test_case_report
.follower_execution_information
.get_or_insert_default(),
}
}
}
#[serde_as]
#[derive(Clone, Debug, Serialize)]
pub struct Report {
/// The configuration that the tool was started up with.
pub config: Arguments,
/// The platform of the leader chain.
pub leader_platform: TestingPlatform,
/// The platform of the follower chain.
pub follower_platform: TestingPlatform,
/// The list of corpus files that the tool found.
pub corpora: Vec<Corpus>,
/// The list of metadata files that were found by the tool.
pub metadata_files: BTreeSet<MetadataFilePath>,
/// Information relating to each test case.
#[serde_as(as = "BTreeMap<_, HashMap<DisplayFromStr, BTreeMap<DisplayFromStr, _>>>")]
pub test_case_information:
BTreeMap<MetadataFilePath, HashMap<Mode, BTreeMap<CaseIdx, TestCaseReport>>>,
}
impl Report {
pub fn new(config: Arguments) -> Self {
Self {
leader_platform: config.leader,
follower_platform: config.follower,
config,
corpora: Default::default(),
metadata_files: Default::default(),
test_case_information: Default::default(),
}
}
}
#[derive(Clone, Debug, Serialize, Default)]
pub struct TestCaseReport {
/// Information on the status of the test case and whether it succeeded, failed, or was ignored.
#[serde(skip_serializing_if = "Option::is_none")]
pub status: Option<TestCaseStatus>,
/// Information related to the execution on the leader.
#[serde(skip_serializing_if = "Option::is_none")]
pub leader_execution_information: Option<ExecutionInformation>,
/// Information related to the execution on the follower.
#[serde(skip_serializing_if = "Option::is_none")]
pub follower_execution_information: Option<ExecutionInformation>,
}
/// Information related to the status of the test. Could be that the test succeeded, failed, or that
/// it was ignored.
#[derive(Clone, Debug, Serialize)]
#[serde(tag = "status")]
pub enum TestCaseStatus {
/// The test case succeeded.
Succeeded {
/// The number of steps of the case that were executed.
steps_executed: usize,
},
/// The test case failed.
Failed {
/// The reason for the failure of the test case.
reason: String,
},
/// The test case was ignored. This variant carries information related to why it was ignored.
Ignored {
/// The reason behind the test case being ignored.
reason: String,
/// Additional fields that describe more information on why the test case is ignored.
#[serde(flatten)]
additional_fields: IndexMap<String, serde_json::Value>,
},
}
/// Information related to the leader or follower node that's being used to execute the step.
#[derive(Clone, Debug, Serialize)]
pub struct TestCaseNodeInformation {
/// The ID of the node that this case is being executed on.
pub id: usize,
/// The platform of the node.
pub platform: TestingPlatform,
/// The connection string of the node.
pub connection_string: String,
}
/// Execution information tied to the leader or the follower.
#[derive(Clone, Debug, Default, Serialize)]
pub struct ExecutionInformation {
/// Information related to the node assigned to this test case.
#[serde(skip_serializing_if = "Option::is_none")]
pub node: Option<TestCaseNodeInformation>,
/// Information on the pre-link compiled contracts.
#[serde(skip_serializing_if = "Option::is_none")]
pub pre_link_compilation_status: Option<CompilationStatus>,
/// Information on the post-link compiled contracts.
#[serde(skip_serializing_if = "Option::is_none")]
pub post_link_compilation_status: Option<CompilationStatus>,
/// Information on the deployed libraries.
#[serde(skip_serializing_if = "Option::is_none")]
pub deployed_libraries: Option<BTreeMap<ContractInstance, Address>>,
/// Information on the deployed contracts.
#[serde(skip_serializing_if = "Option::is_none")]
pub deployed_contracts: Option<BTreeMap<ContractInstance, Address>>,
}
/// Information related to compilation
#[derive(Clone, Debug, Serialize)]
#[serde(tag = "status")]
pub enum CompilationStatus {
/// The compilation was successful.
Success {
/// A flag with information on whether the compilation artifacts were cached or not.
is_cached: bool,
/// The version of the compiler used to compile the contracts.
compiler_version: Version,
/// The path of the compiler used to compile the contracts.
compiler_path: PathBuf,
/// The input provided to the compiler to compile the contracts. This is only included if
/// the appropriate flag is set in the CLI configuration and if the contracts were not
/// cached and the compiler was invoked.
#[serde(skip_serializing_if = "Option::is_none")]
compiler_input: Option<CompilerInput>,
/// The output of the compiler. This is only included if the appropriate flag is set in the
/// CLI configurations.
#[serde(skip_serializing_if = "Option::is_none")]
compiler_output: Option<CompilerOutput>,
},
/// The compilation failed.
Failure {
/// The failure reason.
reason: String,
/// The version of the compiler used to compile the contracts.
#[serde(skip_serializing_if = "Option::is_none")]
compiler_version: Option<Version>,
/// The path of the compiler used to compile the contracts.
#[serde(skip_serializing_if = "Option::is_none")]
compiler_path: Option<PathBuf>,
/// The input provided to the compiler to compile the contracts. This is only included if
/// the appropriate flag is set in the CLI configuration and if the contracts were not
/// cached and the compiler was invoked.
#[serde(skip_serializing_if = "Option::is_none")]
compiler_input: Option<CompilerInput>,
},
}
-81
View File
@@ -1,81 +0,0 @@
//! The report analyzer enriches the raw report data.
use revive_dt_compiler::CompilerOutput;
use serde::{Deserialize, Serialize};
use crate::reporter::CompilationTask;
/// Provides insights into how well the compilers perform.
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, PartialOrd)]
pub struct CompilerStatistics {
/// The sum of contracts observed.
pub n_contracts: usize,
/// The mean size of compiled contracts.
pub mean_code_size: usize,
/// The mean size of the optimized YUL IR.
pub mean_yul_size: usize,
/// Is a proxy because the YUL also contains a lot of comments.
pub yul_to_bytecode_size_ratio: f32,
}
impl CompilerStatistics {
/// Cumulatively update the statistics with the next compiler task.
pub fn sample(&mut self, compilation_task: &CompilationTask) {
let Some(CompilerOutput { contracts }) = &compilation_task.json_output else {
return;
};
for (_solidity, contracts) in contracts.iter() {
for (_name, (bytecode, _)) in contracts.iter() {
// The EVM bytecode can be unlinked and thus is not necessarily a decodable hex
// string; for our statistics this is a good enough approximation.
let bytecode_size = bytecode.len() / 2;
// TODO: for the time being we set the yul_size to be zero. We need to change this
// when we overhaul the reporting.
self.update_sizes(bytecode_size, 0);
}
}
}
/// Updates the size statistics cumulatively.
fn update_sizes(&mut self, bytecode_size: usize, yul_size: usize) {
let n_previous = self.n_contracts;
let n_current = self.n_contracts + 1;
self.n_contracts = n_current;
self.mean_code_size = (n_previous * self.mean_code_size + bytecode_size) / n_current;
self.mean_yul_size = (n_previous * self.mean_yul_size + yul_size) / n_current;
if self.mean_code_size > 0 {
self.yul_to_bytecode_size_ratio =
self.mean_yul_size as f32 / self.mean_code_size as f32;
}
}
}
#[cfg(test)]
mod tests {
use super::CompilerStatistics;
#[test]
fn compiler_statistics() {
let mut received = CompilerStatistics::default();
received.update_sizes(0, 0);
received.update_sizes(3, 37);
received.update_sizes(123, 456);
let mean_code_size = 41; // rounding error from integer truncation
let mean_yul_size = 164;
let expected = CompilerStatistics {
n_contracts: 3,
mean_code_size,
mean_yul_size,
yul_to_bytecode_size_ratio: mean_yul_size as f32 / mean_code_size as f32,
};
assert_eq!(received, expected);
}
}
+43
View File
@@ -0,0 +1,43 @@
//! Common types and functions used throughout the crate.
use std::{path::PathBuf, sync::Arc};
use revive_dt_common::define_wrapper_type;
use revive_dt_compiler::Mode;
use revive_dt_format::{case::CaseIdx, input::StepIdx};
use serde::{Deserialize, Serialize};
define_wrapper_type!(
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[serde(transparent)]
pub struct MetadataFilePath(PathBuf);
);
/// An absolute specifier for a test.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct TestSpecifier {
pub solc_mode: Mode,
pub metadata_file_path: PathBuf,
pub case_idx: CaseIdx,
}
/// An absolute path for a test that also includes information about the node that it's assigned to
/// and whether it's the leader or follower.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct ExecutionSpecifier {
pub test_specifier: Arc<TestSpecifier>,
pub node_id: usize,
pub node_designation: NodeDesignation,
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum NodeDesignation {
Leader,
Follower,
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct StepExecutionSpecifier {
pub execution_specifier: Arc<ExecutionSpecifier>,
pub step_idx: StepIdx,
}
+10 -3
View File
@@ -1,4 +1,11 @@
//! The revive differential tests reporting facility.
//! This crate implements the reporting infrastructure for the differential testing tool.
pub mod analyzer;
pub mod reporter;
mod aggregator;
mod common;
mod reporter_event;
mod runner_event;
pub use aggregator::*;
pub use common::*;
pub use reporter_event::*;
pub use runner_event::*;
-234
View File
@@ -1,234 +0,0 @@
//! The reporter is the central place observing test execution by collecting data.
//!
//! The data collected gives useful insights into the outcome of the test run
//! and helps identifying and reproducing failing cases.
use std::{
collections::HashMap,
fs::{self, File, create_dir_all},
path::PathBuf,
sync::{Mutex, OnceLock},
time::{SystemTime, UNIX_EPOCH},
};
use anyhow::Context;
use serde::Serialize;
use revive_dt_common::types::Mode;
use revive_dt_compiler::{CompilerInput, CompilerOutput};
use revive_dt_config::{Arguments, TestingPlatform};
use revive_dt_format::corpus::Corpus;
use crate::analyzer::CompilerStatistics;
pub(crate) static REPORTER: OnceLock<Mutex<Report>> = OnceLock::new();
/// The `Report` datastructure stores all relevant inforamtion required for generating reports.
#[derive(Clone, Debug, Default, Serialize)]
pub struct Report {
/// The configuration used during the test.
pub config: Arguments,
/// The observed test corpora.
pub corpora: Vec<Corpus>,
/// The observed test definitions.
pub metadata_files: Vec<PathBuf>,
/// The observed compilation results.
pub compiler_results: HashMap<TestingPlatform, Vec<CompilationResult>>,
/// The observed compilation statistics.
pub compiler_statistics: HashMap<TestingPlatform, CompilerStatistics>,
/// The file name this is serialized to.
#[serde(skip)]
directory: PathBuf,
}
/// Contains a compiled contract.
#[derive(Clone, Debug, Serialize)]
pub struct CompilationTask {
/// The observed compiler input.
pub json_input: CompilerInput,
/// The observed compiler output.
pub json_output: Option<CompilerOutput>,
/// The observed compiler mode.
pub mode: Mode,
/// The observed compiler version.
pub compiler_version: String,
/// The observed error, if any.
pub error: Option<String>,
}
/// Represents a report about a compilation task.
#[derive(Clone, Debug, Serialize)]
pub struct CompilationResult {
/// The observed compilation task.
pub compilation_task: CompilationTask,
/// The linked span.
pub span: Span,
}
/// The [Span] struct indicates the context of what is being reported.
#[derive(Clone, Copy, Debug, Serialize)]
pub struct Span {
/// The corpus index this belongs to.
corpus: usize,
/// The metadata file this belongs to.
metadata_file: usize,
/// The index of the case definition this belongs to.
case: usize,
/// The index of the case input this belongs to.
input: usize,
}
impl Report {
/// The file name where this report will be written to.
pub const FILE_NAME: &str = "report.json";
/// The [Span] is expected to initialize the reporter by providing the config.
const INITIALIZED_VIA_SPAN: &str = "requires a Span which initializes the reporter";
/// Create a new [Report].
fn new(config: Arguments) -> anyhow::Result<Self> {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_millis();
let directory = config.directory().join("report").join(format!("{now}"));
if !directory.exists() {
create_dir_all(&directory)?;
}
Ok(Self {
config,
directory,
..Default::default()
})
}
/// Add a compilation task to the report.
pub fn compilation(span: Span, platform: TestingPlatform, compilation_task: CompilationTask) {
let mut report = REPORTER
.get()
.expect(Report::INITIALIZED_VIA_SPAN)
.lock()
.unwrap();
report
.compiler_statistics
.entry(platform)
.or_default()
.sample(&compilation_task);
report
.compiler_results
.entry(platform)
.or_default()
.push(CompilationResult {
compilation_task,
span,
});
}
/// Write the report to disk.
pub fn save() -> anyhow::Result<()> {
let Some(reporter) = REPORTER.get() else {
return Ok(());
};
let report = reporter.lock().unwrap();
if let Err(error) = report.write_to_file() {
anyhow::bail!("can not write report: {error}");
}
if report.config.extract_problems {
if let Err(error) = report.save_compiler_problems() {
anyhow::bail!("can not write compiler problems: {error}");
}
}
Ok(())
}
/// Write compiler problems to disk for later debugging.
pub fn save_compiler_problems(&self) -> anyhow::Result<()> {
for (platform, results) in self.compiler_results.iter() {
for result in results {
// ignore if there were no errors
if result.compilation_task.error.is_none() {
continue;
}
let path = &self.metadata_files[result.span.metadata_file]
.parent()
.unwrap()
.join(format!("{platform}_errors"));
if !path.exists() {
create_dir_all(path)?;
}
if let Some(error) = result.compilation_task.error.as_ref() {
fs::write(path.join("compiler_error.txt"), error)?;
}
if let Some(errors) = result.compilation_task.json_output.as_ref() {
let file = File::create(path.join("compiler_output.txt"))?;
serde_json::to_writer_pretty(file, &errors)?;
}
}
}
Ok(())
}
fn write_to_file(&self) -> anyhow::Result<()> {
let path = self.directory.join(Self::FILE_NAME);
let file = File::create(&path).context(path.display().to_string())?;
serde_json::to_writer_pretty(file, &self)?;
Ok(())
}
}
impl Span {
/// Create a new [Span] with case and input index at 0.
///
/// Initializes the reporting facility on the first call.
pub fn new(corpus: Corpus, config: Arguments) -> anyhow::Result<Self> {
let report = Mutex::new(Report::new(config)?);
let mut reporter = REPORTER.get_or_init(|| report).lock().unwrap();
reporter.corpora.push(corpus);
Ok(Self {
corpus: reporter.corpora.len() - 1,
metadata_file: 0,
case: 0,
input: 0,
})
}
/// Advance to the next metadata file: Resets the case input index to 0.
pub fn next_metadata(&mut self, metadata_file: PathBuf) {
let mut reporter = REPORTER
.get()
.expect(Report::INITIALIZED_VIA_SPAN)
.lock()
.unwrap();
reporter.metadata_files.push(metadata_file);
self.metadata_file = reporter.metadata_files.len() - 1;
self.case = 0;
self.input = 0;
}
/// Advance to the next case: Increas the case index by one and resets the input index to 0.
pub fn next_case(&mut self) {
self.case += 1;
self.input = 0;
}
/// Advance to the next input.
pub fn next_input(&mut self) {
self.input += 1;
}
}
+22
View File
@@ -0,0 +1,22 @@
//! A reporter event sent by the report aggregator to the various listeners.
use std::collections::BTreeMap;
use revive_dt_compiler::Mode;
use revive_dt_format::case::CaseIdx;
use crate::{MetadataFilePath, TestCaseStatus};
#[derive(Clone, Debug)]
pub enum ReporterEvent {
/// An event sent by the reporter once an entire metadata file and solc mode combination has
/// finished execution.
MetadataFileSolcModeCombinationExecutionCompleted {
/// The path of the metadata file.
metadata_file_path: MetadataFilePath,
/// The Solc mode that this metadata file was executed in.
mode: Mode,
/// The status of each one of the cases.
case_status: BTreeMap<CaseIdx, TestCaseStatus>,
},
}
+642
View File
@@ -0,0 +1,642 @@
//! The types associated with the events sent by the runner to the reporter.
#![allow(dead_code)]
use std::{collections::BTreeMap, path::PathBuf, sync::Arc};
use alloy_primitives::Address;
use anyhow::Context as _;
use indexmap::IndexMap;
use revive_dt_compiler::{CompilerInput, CompilerOutput};
use revive_dt_config::TestingPlatform;
use revive_dt_format::metadata::Metadata;
use revive_dt_format::{corpus::Corpus, metadata::ContractInstance};
use semver::Version;
use tokio::sync::{broadcast, oneshot};
use crate::{ExecutionSpecifier, ReporterEvent, TestSpecifier, common::MetadataFilePath};
macro_rules! __report_gen_emit_test_specific {
(
$ident:ident,
$variant_ident:ident,
$skip_field:ident;
$( $bname:ident : $bty:ty, )*
;
$( $aname:ident : $aty:ty, )*
) => {
paste::paste! {
pub fn [< report_ $variant_ident:snake _event >](
&self
$(, $bname: impl Into<$bty> )*
$(, $aname: impl Into<$aty> )*
) -> anyhow::Result<()> {
self.report([< $variant_ident Event >] {
$skip_field: self.test_specifier.clone()
$(, $bname: $bname.into() )*
$(, $aname: $aname.into() )*
})
}
}
};
}
macro_rules! __report_gen_emit_test_specific_by_parse {
(
$ident:ident,
$variant_ident:ident,
$skip_field:ident;
$( $bname:ident : $bty:ty, )* ; $( $aname:ident : $aty:ty, )*
) => {
__report_gen_emit_test_specific!(
$ident, $variant_ident, $skip_field;
$( $bname : $bty, )* ; $( $aname : $aty, )*
);
};
}
macro_rules! __report_gen_scan_before {
(
$ident:ident, $variant_ident:ident;
$( $before:ident : $bty:ty, )*
;
test_specifier : $skip_ty:ty,
$( $after:ident : $aty:ty, )*
;
) => {
__report_gen_emit_test_specific_by_parse!(
$ident, $variant_ident, test_specifier;
$( $before : $bty, )* ; $( $after : $aty, )*
);
};
(
$ident:ident, $variant_ident:ident;
$( $before:ident : $bty:ty, )*
;
$name:ident : $ty:ty, $( $after:ident : $aty:ty, )*
;
) => {
__report_gen_scan_before!(
$ident, $variant_ident;
$( $before : $bty, )* $name : $ty,
;
$( $after : $aty, )*
;
);
};
(
$ident:ident, $variant_ident:ident;
$( $before:ident : $bty:ty, )*
;
;
) => {};
}
macro_rules! __report_gen_for_variant {
(
$ident:ident,
$variant_ident:ident;
) => {};
(
$ident:ident,
$variant_ident:ident;
$( $field_ident:ident : $field_ty:ty ),+ $(,)?
) => {
__report_gen_scan_before!(
$ident, $variant_ident;
;
$( $field_ident : $field_ty, )*
;
);
};
}
macro_rules! __report_gen_emit_execution_specific {
(
$ident:ident,
$variant_ident:ident,
$skip_field:ident;
$( $bname:ident : $bty:ty, )*
;
$( $aname:ident : $aty:ty, )*
) => {
paste::paste! {
pub fn [< report_ $variant_ident:snake _event >](
&self
$(, $bname: impl Into<$bty> )*
$(, $aname: impl Into<$aty> )*
) -> anyhow::Result<()> {
self.report([< $variant_ident Event >] {
$skip_field: self.execution_specifier.clone()
$(, $bname: $bname.into() )*
$(, $aname: $aname.into() )*
})
}
}
};
}
macro_rules! __report_gen_emit_execution_specific_by_parse {
(
$ident:ident,
$variant_ident:ident,
$skip_field:ident;
$( $bname:ident : $bty:ty, )* ; $( $aname:ident : $aty:ty, )*
) => {
__report_gen_emit_execution_specific!(
$ident, $variant_ident, $skip_field;
$( $bname : $bty, )* ; $( $aname : $aty, )*
);
};
}
macro_rules! __report_gen_scan_before_exec {
(
$ident:ident, $variant_ident:ident;
$( $before:ident : $bty:ty, )*
;
execution_specifier : $skip_ty:ty,
$( $after:ident : $aty:ty, )*
;
) => {
__report_gen_emit_execution_specific_by_parse!(
$ident, $variant_ident, execution_specifier;
$( $before : $bty, )* ; $( $after : $aty, )*
);
};
(
$ident:ident, $variant_ident:ident;
$( $before:ident : $bty:ty, )*
;
$name:ident : $ty:ty, $( $after:ident : $aty:ty, )*
;
) => {
__report_gen_scan_before_exec!(
$ident, $variant_ident;
$( $before : $bty, )* $name : $ty,
;
$( $after : $aty, )*
;
);
};
(
$ident:ident, $variant_ident:ident;
$( $before:ident : $bty:ty, )*
;
;
) => {};
}
macro_rules! __report_gen_for_variant_exec {
(
$ident:ident,
$variant_ident:ident;
) => {};
(
$ident:ident,
$variant_ident:ident;
$( $field_ident:ident : $field_ty:ty ),+ $(,)?
) => {
__report_gen_scan_before_exec!(
$ident, $variant_ident;
;
$( $field_ident : $field_ty, )*
;
);
};
}
macro_rules! __report_gen_emit_step_execution_specific {
(
$ident:ident,
$variant_ident:ident,
$skip_field:ident;
$( $bname:ident : $bty:ty, )*
;
$( $aname:ident : $aty:ty, )*
) => {
paste::paste! {
pub fn [< report_ $variant_ident:snake _event >](
&self
$(, $bname: impl Into<$bty> )*
$(, $aname: impl Into<$aty> )*
) -> anyhow::Result<()> {
self.report([< $variant_ident Event >] {
$skip_field: self.step_specifier.clone()
$(, $bname: $bname.into() )*
$(, $aname: $aname.into() )*
})
}
}
};
}
macro_rules! __report_gen_emit_step_execution_specific_by_parse {
(
$ident:ident,
$variant_ident:ident,
$skip_field:ident;
$( $bname:ident : $bty:ty, )* ; $( $aname:ident : $aty:ty, )*
) => {
__report_gen_emit_step_execution_specific!(
$ident, $variant_ident, $skip_field;
$( $bname : $bty, )* ; $( $aname : $aty, )*
);
};
}
macro_rules! __report_gen_scan_before_step {
(
$ident:ident, $variant_ident:ident;
$( $before:ident : $bty:ty, )*
;
step_specifier : $skip_ty:ty,
$( $after:ident : $aty:ty, )*
;
) => {
__report_gen_emit_step_execution_specific_by_parse!(
$ident, $variant_ident, step_specifier;
$( $before : $bty, )* ; $( $after : $aty, )*
);
};
(
$ident:ident, $variant_ident:ident;
$( $before:ident : $bty:ty, )*
;
$name:ident : $ty:ty, $( $after:ident : $aty:ty, )*
;
) => {
__report_gen_scan_before_step!(
$ident, $variant_ident;
$( $before : $bty, )* $name : $ty,
;
$( $after : $aty, )*
;
);
};
(
$ident:ident, $variant_ident:ident;
$( $before:ident : $bty:ty, )*
;
;
) => {};
}
macro_rules! __report_gen_for_variant_step {
(
$ident:ident,
$variant_ident:ident;
) => {};
(
$ident:ident,
$variant_ident:ident;
$( $field_ident:ident : $field_ty:ty ),+ $(,)?
) => {
__report_gen_scan_before_step!(
$ident, $variant_ident;
;
$( $field_ident : $field_ty, )*
;
);
};
}
/// Defines the runner-event which is sent from the test runners to the report aggregator.
///
/// This macro defines a number of things related to the reporting infrastructure and the interface
/// used. First of all, it defines the enum of all of the possible events that the runners can send
/// to the aggregator. For each one of the variants it defines a separate struct for it to allow the
/// variant field in the enum to be put in a [`Box`].
///
/// In addition to the above, it defines [`From`] implementations for the various event types for
/// the [`RunnerEvent`] enum essentially allowing for events such as [`CorpusFileDiscoveryEvent`] to
/// be converted into a [`RunnerEvent`].
///
/// In addition to the above, it also defines the [`RunnerEventReporter`] which is a wrapper around
/// an [`UnboundedSender`] allowing for events to be sent to the report aggregator.
///
/// With the above description, we can see that this macro defines almost all of the interface of
/// the reporting infrastructure, from the enum itself, to its associated types, and also to the
/// reporter that's used to report events to the aggregator.
///
/// [`UnboundedSender`]: tokio::sync::mpsc::UnboundedSender
macro_rules! define_event {
(
$(#[$enum_meta: meta])*
$vis: vis enum $ident: ident {
$(
$(#[$variant_meta: meta])*
$variant_ident: ident {
$(
$(#[$field_meta: meta])*
$field_ident: ident: $field_ty: ty
),* $(,)?
}
),* $(,)?
}
) => {
paste::paste! {
$(#[$enum_meta])*
#[derive(Debug)]
$vis enum $ident {
$(
$(#[$variant_meta])*
$variant_ident(Box<[<$variant_ident Event>]>)
),*
}
$(
#[derive(Debug)]
$(#[$variant_meta])*
$vis struct [<$variant_ident Event>] {
$(
$(#[$field_meta])*
$vis $field_ident: $field_ty
),*
}
)*
$(
impl From<[<$variant_ident Event>]> for $ident {
fn from(value: [<$variant_ident Event>]) -> Self {
Self::$variant_ident(Box::new(value))
}
}
)*
/// Provides a way to report events to the aggregator.
///
/// Under the hood, this is a wrapper around an [`UnboundedSender`] which abstracts away
/// the fact that channels are used and that implements high-level methods for reporting
/// various events to the aggregator.
#[derive(Clone, Debug)]
pub struct [< $ident Reporter >]($vis tokio::sync::mpsc::UnboundedSender<$ident>);
impl From<tokio::sync::mpsc::UnboundedSender<$ident>> for [< $ident Reporter >] {
fn from(value: tokio::sync::mpsc::UnboundedSender<$ident>) -> Self {
Self(value)
}
}
impl [< $ident Reporter >] {
pub fn test_specific_reporter(
&self,
test_specifier: impl Into<std::sync::Arc<crate::common::TestSpecifier>>
) -> [< $ident TestSpecificReporter >] {
[< $ident TestSpecificReporter >] {
reporter: self.clone(),
test_specifier: test_specifier.into(),
}
}
fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> {
self.0.send(event.into()).map_err(Into::into)
}
$(
pub fn [< report_ $variant_ident:snake _event >](&self, $($field_ident: impl Into<$field_ty>),*) -> anyhow::Result<()> {
self.report([< $variant_ident Event >] {
$($field_ident: $field_ident.into()),*
})
}
)*
}
/// A reporter that's tied to a specific test case.
#[derive(Clone, Debug)]
pub struct [< $ident TestSpecificReporter >] {
$vis reporter: [< $ident Reporter >],
$vis test_specifier: std::sync::Arc<crate::common::TestSpecifier>,
}
impl [< $ident TestSpecificReporter >] {
pub fn execution_specific_reporter(
&self,
node_id: impl Into<usize>,
node_designation: impl Into<$crate::common::NodeDesignation>
) -> [< $ident ExecutionSpecificReporter >] {
[< $ident ExecutionSpecificReporter >] {
reporter: self.reporter.clone(),
execution_specifier: Arc::new($crate::common::ExecutionSpecifier {
test_specifier: self.test_specifier.clone(),
node_id: node_id.into(),
node_designation: node_designation.into(),
})
}
}
fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> {
self.reporter.report(event)
}
$(
__report_gen_for_variant! { $ident, $variant_ident; $( $field_ident : $field_ty ),* }
)*
}
/// A reporter that's tied to a specific execution of the test case such as execution on
/// a specific node like the leader or follower.
#[derive(Clone, Debug)]
pub struct [< $ident ExecutionSpecificReporter >] {
$vis reporter: [< $ident Reporter >],
$vis execution_specifier: std::sync::Arc<$crate::common::ExecutionSpecifier>,
}
impl [< $ident ExecutionSpecificReporter >] {
fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> {
self.reporter.report(event)
}
$(
__report_gen_for_variant_exec! { $ident, $variant_ident; $( $field_ident : $field_ty ),* }
)*
}
/// A reporter that's tied to a specific step execution
#[derive(Clone, Debug)]
pub struct [< $ident StepExecutionSpecificReporter >] {
$vis reporter: [< $ident Reporter >],
$vis step_specifier: std::sync::Arc<$crate::common::StepExecutionSpecifier>,
}
impl [< $ident StepExecutionSpecificReporter >] {
fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> {
self.reporter.report(event)
}
$(
__report_gen_for_variant_step! { $ident, $variant_ident; $( $field_ident : $field_ty ),* }
)*
}
}
};
}
define_event! {
/// An event type that's sent by the test runners/drivers to the report aggregator.
pub(crate) enum RunnerEvent {
/// An event emitted by the reporter when it wishes to listen to events emitted by the
/// aggregator.
SubscribeToEvents {
/// The channel that the aggregator is to send the receive side of the channel on.
tx: oneshot::Sender<broadcast::Receiver<ReporterEvent>>
},
/// An event emitted by runners when they've discovered a corpus file.
CorpusFileDiscovery {
/// The contents of the corpus file.
corpus: Corpus
},
/// An event emitted by runners when they've discovered a metadata file.
MetadataFileDiscovery {
/// The path of the metadata file discovered.
path: MetadataFilePath,
/// The content of the metadata file.
metadata: Metadata
},
/// An event emitted by the runners when they discover a test case.
TestCaseDiscovery {
/// A specifier for the test that was discovered.
test_specifier: Arc<TestSpecifier>,
},
/// An event emitted by the runners when a test case is ignored.
TestIgnored {
/// A specifier for the test that's been ignored.
test_specifier: Arc<TestSpecifier>,
/// A reason for the test to be ignored.
reason: String,
/// Additional fields that describe more information on why the test was ignored.
additional_fields: IndexMap<String, serde_json::Value>
},
/// An event emitted by the runners when a test case has succeeded.
TestSucceeded {
/// A specifier for the test that succeeded.
test_specifier: Arc<TestSpecifier>,
/// The number of steps of the case that were executed by the driver.
steps_executed: usize,
},
/// An event emitted by the runners when a test case has failed.
TestFailed {
/// A specifier for the test that succeeded.
test_specifier: Arc<TestSpecifier>,
/// A reason for the failure of the test.
reason: String,
},
/// An event emitted when the test case is assigned a leader node.
LeaderNodeAssigned {
/// A specifier for the test that the assignment is for.
test_specifier: Arc<TestSpecifier>,
/// The ID of the node that this case is being executed on.
id: usize,
/// The platform of the node.
platform: TestingPlatform,
/// The connection string of the node.
connection_string: String,
},
/// An event emitted when the test case is assigned a follower node.
FollowerNodeAssigned {
/// A specifier for the test that the assignment is for.
test_specifier: Arc<TestSpecifier>,
/// The ID of the node that this case is being executed on.
id: usize,
/// The platform of the node.
platform: TestingPlatform,
/// The connection string of the node.
connection_string: String,
},
/// An event emitted by the runners when the compilation of the contracts has succeeded
/// on the pre-link contracts.
PreLinkContractsCompilationSucceeded {
/// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>,
/// The version of the compiler used to compile the contracts.
compiler_version: Version,
/// The path of the compiler used to compile the contracts.
compiler_path: PathBuf,
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
/// anew.
is_cached: bool,
/// The input provided to the compiler - this is optional and not provided if the
/// contracts were obtained from the cache.
compiler_input: Option<CompilerInput>,
/// The output of the compiler.
compiler_output: CompilerOutput
},
/// An event emitted by the runners when the compilation of the contracts has succeeded
/// on the post-link contracts.
PostLinkContractsCompilationSucceeded {
/// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>,
/// The version of the compiler used to compile the contracts.
compiler_version: Version,
/// The path of the compiler used to compile the contracts.
compiler_path: PathBuf,
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
/// anew.
is_cached: bool,
/// The input provided to the compiler - this is optional and not provided if the
/// contracts were obtained from the cache.
compiler_input: Option<CompilerInput>,
/// The output of the compiler.
compiler_output: CompilerOutput
},
/// An event emitted by the runners when the compilation of the pre-link contract has
/// failed.
PreLinkContractsCompilationFailed {
/// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>,
/// The version of the compiler used to compile the contracts.
compiler_version: Option<Version>,
/// The path of the compiler used to compile the contracts.
compiler_path: Option<PathBuf>,
/// The input provided to the compiler - this is optional and not provided if the
/// contracts were obtained from the cache.
compiler_input: Option<CompilerInput>,
/// The failure reason.
reason: String,
},
/// An event emitted by the runners when the compilation of the post-link contract has
/// failed.
PostLinkContractsCompilationFailed {
/// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>,
/// The version of the compiler used to compile the contracts.
compiler_version: Option<Version>,
/// The path of the compiler used to compile the contracts.
compiler_path: Option<PathBuf>,
/// The input provided to the compiler - this is optional and not provided if the
/// contracts were obtained from the cache.
compiler_input: Option<CompilerInput>,
/// The failure reason.
reason: String,
},
/// An event emitted by the runners when a library has been deployed.
LibrariesDeployed {
/// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>,
/// The addresses of the libraries that were deployed.
libraries: BTreeMap<ContractInstance, Address>
},
/// An event emitted by the runners when they've deployed a new contract.
ContractDeployed {
/// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>,
/// The instance name of the contract.
contract_instance: ContractInstance,
/// The address of the contract.
address: Address
},
}
}
/// An extension to the [`Reporter`] implemented by the macro.
impl RunnerEventReporter {
pub async fn subscribe(&self) -> anyhow::Result<broadcast::Receiver<ReporterEvent>> {
let (tx, rx) = oneshot::channel::<broadcast::Receiver<ReporterEvent>>();
self.report_subscribe_to_events_event(tx)
.context("Failed to send subscribe request to reporter task")?;
rx.await.map_err(Into::into)
}
}
pub type Reporter = RunnerEventReporter;
pub type TestSpecificReporter = RunnerEventTestSpecificReporter;
pub type ExecutionSpecificReporter = RunnerEventExecutionSpecificReporter;
+45 -8
View File
@@ -12,6 +12,7 @@ use std::{
use tokio::sync::Mutex;
use crate::download::SolcDownloader;
use anyhow::Context;
pub const SOLC_CACHE_DIRECTORY: &str = "solc";
pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default);
@@ -31,8 +32,20 @@ pub(crate) async fn get_or_download(
return Ok(target_file);
}
create_dir_all(target_directory)?;
download_to_file(&target_file, downloader).await?;
create_dir_all(&target_directory).with_context(|| {
format!(
"Failed to create solc cache directory: {}",
target_directory.display()
)
})?;
download_to_file(&target_file, downloader)
.await
.with_context(|| {
format!(
"Failed to write downloaded solc to {}",
target_file.display()
)
})?;
cache.insert(target_file.clone());
Ok(target_file)
@@ -45,14 +58,26 @@ async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::R
#[cfg(unix)]
{
let mut permissions = file.metadata()?.permissions();
let mut permissions = file
.metadata()
.with_context(|| format!("Failed to read metadata for {}", path.display()))?
.permissions();
permissions.set_mode(permissions.mode() | 0o111);
file.set_permissions(permissions)?;
file.set_permissions(permissions).with_context(|| {
format!("Failed to set executable permissions on {}", path.display())
})?;
}
let mut file = BufWriter::new(file);
file.write_all(&downloader.download().await?)?;
file.flush()?;
file.write_all(
&downloader
.download()
.await
.context("Failed to download solc binary bytes")?,
)
.with_context(|| format!("Failed to write solc binary to {}", path.display()))?;
file.flush()
.with_context(|| format!("Failed to flush file {}", path.display()))?;
drop(file);
#[cfg(target_os = "macos")]
@@ -63,8 +88,20 @@ async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::R
.stderr(std::process::Stdio::null())
.stdout(std::process::Stdio::null())
.stdout(std::process::Stdio::null())
.spawn()?
.wait()?;
.spawn()
.with_context(|| {
format!(
"Failed to spawn xattr to remove quarantine attribute on {}",
path.display()
)
})?
.wait()
.with_context(|| {
format!(
"Failed waiting for xattr operation to complete on {}",
path.display()
)
})?;
Ok(())
}
+27 -5
View File
@@ -11,6 +11,7 @@ use semver::Version;
use sha2::{Digest, Sha256};
use crate::list::List;
use anyhow::Context;
pub static LIST_CACHE: LazyLock<Mutex<HashMap<&'static str, List>>> =
LazyLock::new(Default::default);
@@ -30,7 +31,12 @@ impl List {
return Ok(list.clone());
}
let body: List = reqwest::get(url).await?.json().await?;
let body: List = reqwest::get(url)
.await
.with_context(|| format!("Failed to GET solc list from {url}"))?
.json()
.await
.with_context(|| format!("Failed to deserialize solc list JSON from {url}"))?;
LIST_CACHE.lock().unwrap().insert(url, body.clone());
@@ -68,7 +74,8 @@ impl SolcDownloader {
}),
VersionOrRequirement::Requirement(requirement) => {
let Some(version) = List::download(list)
.await?
.await
.with_context(|| format!("Failed to download solc builds list from {list}"))?
.builds
.into_iter()
.map(|build| build.version)
@@ -107,11 +114,20 @@ impl SolcDownloader {
/// Errors out if the download fails or the digest of the downloaded file
/// mismatches the expected digest from the release [List].
pub async fn download(&self) -> anyhow::Result<Vec<u8>> {
let builds = List::download(self.list).await?.builds;
let builds = List::download(self.list)
.await
.with_context(|| format!("Failed to download solc builds list from {}", self.list))?
.builds;
let build = builds
.iter()
.find(|build| build.version == self.version)
.ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version))?;
.ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version))
.with_context(|| {
format!(
"Requested solc version {} was not found in builds list fetched from {}",
self.version, self.list
)
})?;
let path = build.path.clone();
let expected_digest = build
@@ -121,7 +137,13 @@ impl SolcDownloader {
.to_string();
let url = format!("{}/{}/{}", Self::BASE_URL, self.target, path.display());
let file = reqwest::get(url).await?.bytes().await?.to_vec();
let file = reqwest::get(&url)
.await
.with_context(|| format!("Failed to GET solc binary from {url}"))?
.bytes()
.await
.with_context(|| format!("Failed to read solc binary bytes from {url}"))?
.to_vec();
if hex::encode(Sha256::digest(&file)) != expected_digest {
anyhow::bail!("sha256 mismatch for solc version {}", self.version);
+3 -1
View File
@@ -5,6 +5,7 @@
use std::path::{Path, PathBuf};
use anyhow::Context;
use cache::get_or_download;
use download::SolcDownloader;
@@ -34,7 +35,8 @@ pub async fn download_solc(
SolcDownloader::windows(version).await
} else {
unimplemented!()
}?;
}
.context("Failed to initialize the Solc Downloader")?;
get_or_download(cache_directory, &downloader).await
}
Executable
+102
View File
@@ -0,0 +1,102 @@
#!/bin/bash
# Revive Differential Tests - Quick Start Script
# This script clones the test repository, sets up the corpus file, and runs the tool
set -e # Exit on any error
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Configuration
TEST_REPO_URL="https://github.com/paritytech/resolc-compiler-tests"
TEST_REPO_DIR="resolc-compiler-tests"
CORPUS_FILE="./corpus.json"
WORKDIR="workdir"
# Optional positional argument: path to polkadot-sdk directory
POLKADOT_SDK_DIR="${1:-}"
# Binary paths (default to names in $PATH)
REVIVE_DEV_NODE_BIN="revive-dev-node"
ETH_RPC_BIN="eth-rpc"
SUBSTRATE_NODE_BIN="substrate-node"
echo -e "${GREEN}=== Revive Differential Tests Quick Start ===${NC}"
echo ""
# Check if test repo already exists
if [ -d "$TEST_REPO_DIR" ]; then
echo -e "${YELLOW}Test repository already exists. Pulling latest changes...${NC}"
cd "$TEST_REPO_DIR"
git pull
cd ..
else
echo -e "${GREEN}Cloning test repository...${NC}"
git clone "$TEST_REPO_URL"
fi
# If polkadot-sdk path is provided, verify and use binaries from there; build if needed
if [ -n "$POLKADOT_SDK_DIR" ]; then
if [ ! -d "$POLKADOT_SDK_DIR" ]; then
echo -e "${RED}Provided polkadot-sdk directory does not exist: $POLKADOT_SDK_DIR${NC}"
exit 1
fi
POLKADOT_SDK_DIR=$(realpath "$POLKADOT_SDK_DIR")
echo -e "${GREEN}Using polkadot-sdk at: $POLKADOT_SDK_DIR${NC}"
REVIVE_DEV_NODE_BIN="$POLKADOT_SDK_DIR/target/release/revive-dev-node"
ETH_RPC_BIN="$POLKADOT_SDK_DIR/target/release/eth-rpc"
SUBSTRATE_NODE_BIN="$POLKADOT_SDK_DIR/target/release/substrate-node"
if [ ! -x "$REVIVE_DEV_NODE_BIN" ] || [ ! -x "$ETH_RPC_BIN" ] || [ ! -x "$SUBSTRATE_NODE_BIN" ]; then
echo -e "${YELLOW}Required binaries not found in release target. Building...${NC}"
(cd "$POLKADOT_SDK_DIR" && cargo build --release --package revive-dev-node --package eth-rpc --package substrate-node)
fi
for bin in "$REVIVE_DEV_NODE_BIN" "$ETH_RPC_BIN" "$SUBSTRATE_NODE_BIN"; do
if [ ! -x "$bin" ]; then
echo -e "${RED}Expected binary not found after build: $bin${NC}"
exit 1
fi
done
else
echo -e "${YELLOW}No polkadot-sdk path provided. Using binaries from $PATH.${NC}"
fi
# Create corpus file with absolute path resolved at runtime
echo -e "${GREEN}Creating corpus file...${NC}"
ABSOLUTE_PATH=$(realpath "$TEST_REPO_DIR/fixtures/solidity/")
cat > "$CORPUS_FILE" << EOF
{
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
"path": "$ABSOLUTE_PATH"
}
EOF
echo -e "${GREEN}Corpus file created: $CORPUS_FILE${NC}"
# Create workdir if it doesn't exist
mkdir -p "$WORKDIR"
echo -e "${GREEN}Starting differential tests...${NC}"
echo "This may take a while..."
echo ""
# Run the tool
RUST_LOG="error" cargo run --release -- \
--corpus "$CORPUS_FILE" \
--workdir "$WORKDIR" \
--number-of-nodes 5 \
--kitchensink "$SUBSTRATE_NODE_BIN" \
--revive-dev-node "$REVIVE_DEV_NODE_BIN" \
--eth_proxy "$ETH_RPC_BIN" \
> logs.log \
2> output.log
echo -e "${GREEN}=== Test run completed! ===${NC}"