Compare commits

..

30 Commits

Author SHA1 Message Date
Omar Abdulla 4bf22f2d2b Merge remote-tracking branch 'origin/main' into bugfix/finding-contract-abi 2025-07-16 15:21:21 +03:00
Omar baa11ad28f Correctly identify which contracts to compile (#44)
* Compile all contracts for a test file

* Fix compilation errors related to paths

* Set the base path if specified
2025-07-16 11:52:40 +00:00
Omar c2e65f9e33 Fix function selector & argument encoding (#39)
* Fix function selector and argument encoding

* Avoid extra buffer allocation

* Remove reliance on the web3 crate

* Fix tests
2025-07-15 20:00:10 +00:00
Omar 14888f9767 Update the async runtime (#42)
* Update the async runtime with syntactic sugar.

* Fix doc test

* Update crates/node-interaction/src/blocking_executor.rs

Co-authored-by: xermicus <cyrill@parity.io>

* Update crates/node-interaction/src/blocking_executor.rs

Co-authored-by: xermicus <cyrill@parity.io>

* Update crates/node-interaction/src/blocking_executor.rs

Co-authored-by: xermicus <cyrill@parity.io>

* Update crates/node-interaction/src/blocking_executor.rs

Co-authored-by: xermicus <cyrill@parity.io>

* Update crates/node-interaction/src/blocking_executor.rs

Co-authored-by: xermicus <cyrill@parity.io>

* Update crates/node-interaction/src/blocking_executor.rs

Co-authored-by: xermicus <cyrill@parity.io>

* Update crates/node-interaction/src/blocking_executor.rs

Co-authored-by: xermicus <cyrill@parity.io>

* Update crates/node-interaction/src/blocking_executor.rs

Co-authored-by: xermicus <cyrill@parity.io>

* Update crates/node-interaction/src/blocking_executor.rs

Co-authored-by: xermicus <cyrill@parity.io>

* Update crates/node-interaction/src/blocking_executor.rs

Co-authored-by: xermicus <cyrill@parity.io>

* Improve the comments

* Update the release profile

---------

Co-authored-by: xermicus <cyrill@parity.io>
2025-07-15 11:19:17 +00:00
Omar 3e99d1c2a5 Allow alloy to estimate tx gas (#37) 2025-07-14 17:34:44 +00:00
Omar Abdulla 5f86ade1e0 Implement ABI fix in the compiler trait impl 2025-07-14 20:31:06 +03:00
Omar Abdulla 43064022e8 Merge remote-tracking branch 'origin/main' into bugfix/finding-contract-abi 2025-07-14 20:25:08 +03:00
Omar 4e234aa1bd Remove code that was accidentally committed. (#41)
* Remove code that was accidentally committed.

* Remove unneeded dependency
2025-07-14 16:24:39 +00:00
Omar b204de5484 Persist node logs (#36)
* Persist node logs

* Fix clippy lints

* Delete the node's db on shutdown but persist logs

* Fix tests

* Separate stdout and stderr and use more consts.

* More consistent handling of open options

* Revert the use of subprocess

* Remove outdated comment

* Flush the log files on drop

* Rename `log_files` -> `logs_file_to_flush`
2025-07-14 16:08:47 +00:00
Omar 5eb3a0e1b5 Fix for "transaction indexing is in progress" (#32)
* Retry getting transaction receipt

* Small fix to logging consistency

* Introduce a custom kitchensink network

* Fix formtting and clippy
2025-07-14 09:32:57 +00:00
Omar 772bd217c3 Fixing the CI on Ubuntu (#31)
* pin the version of geth used in CI

* pin the version of geth used in CI

* temp: run on each push

* pin the version of geth used in CI

* Make geth installation arch dependent

* Remove temp run on push to branch

* Add a comment on the need for pre-built binaries
2025-07-14 09:17:13 +00:00
Omar Abdulla 4d4398f83e Fix the ABI finding logic 2025-07-13 15:59:23 +03:00
Omar 0513a4befb Use tracing for logging. (#29)
This commit updates how logging is done in the differential testing
harness to use `tracing` instead of using the `log` crate. This allows
us to be able to better associate logs with the cases being executed
which makes it easier to debug and understand what the harness is doing.
2025-07-10 07:28:16 +00:00
activecoder10 de7c7d6703 Compute transaction input for executing transactions (#28)
* Parsed ABI field in order to get method parameter

* Added logic for ABI

* Refactored dependencies

* Small refactoring

* Added unit tests for ABI parameter extraction logic

* Fixed format issues

* Fixed format

* Added new changes to format

* Added bail to stop execution when we have an error during deployment
2025-07-09 11:03:38 +00:00
activecoder10 3a537c2812 Added extra logging for critical part of the flow. (#27)
* Fix legacy_transaction to address for execution part

* updated polkadot-sdk to latest

* Update polkadot-sdk to latest main with fixes

* Added extra logging

* Applied some clippy improvements
2025-06-27 15:24:57 +00:00
activecoder10 4ab79ed97e Fixed the contract deployment logic. Added new tracing logging for differential for leader and follower receipt structure (#26) 2025-06-20 13:02:54 +00:00
activecoder10 ee97b62e70 Added fetch_add_nonce method for NodeInteraction trait. Added extra logging. (#25)
* added logging

* added fetch_add_nonce method

* Added nonce for legacy transaction also

* Addressed PR comments
2025-06-18 19:43:16 +00:00
xermicus e9b5a06aec fix the simple test case definition (#24)
Signed-off-by: xermicus <cyrill@parity.io>
2025-06-17 10:23:09 +00:00
xermicus 534170db6f dont fail machete on polkadot-sdk submodule (#23)
Signed-off-by: xermicus <cyrill@parity.io>
2025-06-14 10:12:30 +00:00
activecoder10 090b56c46a deploy contracts (#22) 2025-06-12 11:09:01 +00:00
activecoder10 547563e718 Extended execute_input method (#21)
* Extended execute_input method

* Improve tracing part
2025-06-10 08:23:37 +00:00
xermicus c8eb8cf7b0 the state diff method belongs to node interactions (#20)
Signed-off-by: Cyrill Leutwiler <bigcyrill@hotmail.com>
2025-06-05 07:50:54 +00:00
activecoder10 3b26e1e1d6 Implement the Node trait for kitchensink (#16)
* feat: implement Node trait for Kitchensink node

* removed self from eth_to_substrate_address method
2025-06-05 06:12:54 +00:00
xermicus 1bc20d088f update dependencies (#19)
Signed-off-by: Cyrill Leutwiler <bigcyrill@hotmail.com>
2025-05-26 07:02:27 +00:00
xermicus 10bfaed461 Implement basic reporting facility (#18)
* wip

Signed-off-by: Cyrill Leutwiler <bigcyrill@hotmail.com>

* save to file after all tasks done

Signed-off-by: Cyrill Leutwiler <bigcyrill@hotmail.com>

* error out early if the workdir does not exist

Signed-off-by: Cyrill Leutwiler <bigcyrill@hotmail.com>

* the compiler statistics

Signed-off-by: Cyrill Leutwiler <bigcyrill@hotmail.com>

* allow compiler statistics per implementation

Signed-off-by: Cyrill Leutwiler <bigcyrill@hotmail.com>

* save compiler problems

Signed-off-by: Cyrill Leutwiler <bigcyrill@hotmail.com>

* add flag whether to extract compiler errors

Signed-off-by: Cyrill Leutwiler <bigcyrill@hotmail.com>

* whitespace

Signed-off-by: Cyrill Leutwiler <bigcyrill@hotmail.com>

---------

Signed-off-by: Cyrill Leutwiler <bigcyrill@hotmail.com>
2025-05-23 17:15:04 +00:00
xermicus 399f7820cd add all cargo tasks to the test target (#14)
Signed-off-by: Cyrill Leutwiler <bigcyrill@hotmail.com>
2025-05-15 11:15:50 +00:00
activecoder10 ae1174febe Added basic CI workflow (#13) 2025-05-12 13:00:13 +03:00
activecoder10 38b42560ec Added implementation for resolc trait (#12)
Implement the Solidity Compiler trait for resolc
2025-05-08 11:09:02 +02:00
Cyrill Leutwiler 8009f5880c update README.md
Signed-off-by: Cyrill Leutwiler <bigcyrill@hotmail.com>
2025-03-31 16:44:16 +02:00
xermicus c590fa7bfd Scaffold utility and library (#3)
Signed-off-by: Cyrill Leutwiler <bigcyrill@hotmail.com>
Signed-off-by: xermicus <bigcyrill@hotmail.com>
2025-03-31 11:40:05 +02:00
41 changed files with 5244 additions and 959 deletions
+147
View File
@@ -0,0 +1,147 @@
name: Test workflow
on:
push:
branches:
- main
pull_request:
branches:
- main
types: [opened, synchronize]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
env:
CARGO_TERM_COLOR: always
jobs:
cache-polkadot:
name: Build and cache Polkadot binaries on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-24.04, macos-14]
steps:
- name: Checkout repo and submodules
uses: actions/checkout@v4
with:
submodules: recursive
- name: Install dependencies (Linux)
if: matrix.os == 'ubuntu-24.04'
run: |
sudo apt-get update
sudo apt-get install -y protobuf-compiler clang libclang-dev
rustup target add wasm32-unknown-unknown
rustup component add rust-src
- name: Install dependencies (macOS)
if: matrix.os == 'macos-14'
run: |
brew install protobuf
rustup target add wasm32-unknown-unknown
rustup component add rust-src
- name: Cache binaries
id: cache
uses: actions/cache@v3
with:
path: |
~/.cargo/bin/substrate-node
~/.cargo/bin/eth-rpc
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}
- name: Build substrate-node
if: steps.cache.outputs.cache-hit != 'true'
run: |
cd polkadot-sdk
cargo install --locked --force --profile=production --path substrate/bin/node/cli --bin substrate-node --features cli
- name: Build eth-rpc
if: steps.cache.outputs.cache-hit != 'true'
run: |
cd polkadot-sdk
cargo install --path substrate/frame/revive/rpc --bin eth-rpc
ci:
name: CI on ${{ matrix.os }}
needs: cache-polkadot
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-24.04, macos-14]
steps:
- name: Checkout repo
uses: actions/checkout@v4
- name: Restore binaries from cache
uses: actions/cache@v3
with:
path: |
~/.cargo/bin/substrate-node
~/.cargo/bin/eth-rpc
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}
- name: Setup Rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
rustflags: ""
- name: Add wasm32 target
run: |
rustup target add wasm32-unknown-unknown
rustup component add rust-src
- name: Install Geth on Ubuntu
if: matrix.os == 'ubuntu-24.04'
run: |
sudo apt-get update
sudo apt-get install -y protobuf-compiler
# We were facing some issues in CI with the 1.16.* versions of geth, and specifically on
# Ubuntu. Eventually, we found out that the last version of geth that worked in our CI was
# version 1.15.11. Thus, this is the version that we want to use in CI. The PPA sadly does
# not have historic versions of Geth and therefore we need to resort to downloading pre
# built binaries for Geth and the surrounding tools which is what the following parts of
# the script do.
sudo apt-get install -y wget ca-certificates tar
ARCH=$(uname -m)
if [ "$ARCH" = "x86_64" ]; then
URL="https://gethstore.blob.core.windows.net/builds/geth-alltools-linux-amd64-1.15.11-36b2371c.tar.gz"
elif [ "$ARCH" = "aarch64" ]; then
URL="https://gethstore.blob.core.windows.net/builds/geth-alltools-linux-arm64-1.15.11-36b2371c.tar.gz"
else
echo "Unsupported architecture: $ARCH"
exit 1
fi
wget -qO- "$URL" | sudo tar xz -C /usr/local/bin --strip-components=1
geth --version
- name: Install Geth on macOS
if: matrix.os == 'macos-14'
run: |
brew tap ethereum/ethereum
brew install ethereum protobuf
- name: Machete
uses: bnjbvr/cargo-machete@v0.7.1
- name: Format
run: make format
- name: Clippy
run: make clippy
- name: Check substrate-node version
run: substrate-node --version
- name: Check eth-rpc version
run: eth-rpc --version
- name: Test cargo workspace
run: make test
+4
View File
@@ -3,3 +3,7 @@
.DS_Store
node_modules
/*.json
# We do not want to commit any log files that we produce from running the code locally so this is
# added to the .gitignore file.
*.log
+3
View File
@@ -0,0 +1,3 @@
[submodule "polkadot-sdk"]
path = polkadot-sdk
url = https://github.com/paritytech/polkadot-sdk.git
Generated
+2303 -494
View File
File diff suppressed because it is too large Load Diff
+31 -13
View File
@@ -4,9 +4,7 @@ members = ["crates/*"]
[workspace.package]
version = "0.1.0"
authors = [
"Parity Technologies <admin@parity.io>",
]
authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT/Apache-2.0"
edition = "2024"
repository = "https://github.com/paritytech/revive-differential-testing.git"
@@ -20,30 +18,47 @@ revive-dt-format = { version = "0.1.0", path = "crates/format" }
revive-dt-node = { version = "0.1.0", path = "crates/node" }
revive-dt-node-interaction = { version = "0.1.0", path = "crates/node-interaction" }
revive-dt-node-pool = { version = "0.1.0", path = "crates/node-pool" }
revive-dt-report = { version = "0.1.0", path = "crates/report" }
revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
alloy-primitives = "1.2.1"
alloy-sol-types = "1.2.1"
anyhow = "1.0"
clap = { version = "4", features = ["derive"] }
env_logger = "0.11.7"
futures = { version = "0.3.31" }
hex = "0.4.3"
reqwest = { version = "0.12.15", features = ["blocking", "json"] }
log = "0.4.26"
once_cell = "1.21"
rayon = { version = "1.10" }
semver = { version = "1.0", features = ["serde"] }
serde = { version = "1.0", default-features = false, features = ["derive"] }
serde_json = { version = "1.0", default-features = false, features = ["arbitrary_precision", "std"] }
sha2 = { version = "0.10.8" }
temp-dir = { version = "0.1.14" }
tokio = { version = "1", default-features = false, features = ["rt-multi-thread"] }
serde_json = { version = "1.0", default-features = false, features = [
"arbitrary_precision",
"std",
] }
sha2 = { version = "0.10.9" }
sp-core = "36.1.0"
sp-runtime = "41.1.0"
temp-dir = { version = "0.1.16" }
tempfile = "3.3"
tokio = { version = "1", default-features = false, features = [
"rt-multi-thread",
] }
uuid = { version = "1.8", features = ["v4"] }
tracing = "0.1.41"
tracing-subscriber = { version = "0.3.19", default-features = false, features = [
"fmt",
"json",
"env-filter",
] }
# revive compiler
revive-solc-json-interface = { git = "https://github.com/paritytech/revive", rev = "497dae2494dabe12d1af32d6d687122903cb2ada" }
revive-common = { git = "https://github.com/paritytech/revive", rev = "497dae2494dabe12d1af32d6d687122903cb2ada" }
revive-differential = { git = "https://github.com/paritytech/revive", rev = "497dae2494dabe12d1af32d6d687122903cb2ada" }
revive-solc-json-interface = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
revive-common = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
revive-differential = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
[workspace.dependencies.alloy]
version = "0.13.0"
version = "1.0"
default-features = false
features = [
"json-abi",
@@ -54,6 +69,9 @@ features = [
"rpc-types",
"signer-local",
"std",
"network",
"serde",
"rpc-types-eth",
]
[profile.bench]
+15
View File
@@ -0,0 +1,15 @@
.PHONY: format clippy test machete
format:
cargo fmt --all -- --check
clippy:
cargo clippy --all-features --workspace -- --deny warnings
machete:
cargo install cargo-machete
cargo machete crates
test: format clippy machete
cargo test --workspace -- --nocapture
+33 -1
View File
@@ -1,2 +1,34 @@
# revive-differential-tests
revive differential testing framework
The revive differential testing framework allows to define smart contract tests in a declarative manner in order to compile and execute them against different Ethereum-compatible blockchain implmentations. This is useful to:
- Analyze observable differences in contract compilation and execution across different blockchain implementations, including contract storage, account balances, transaction output and emitted events on a per-transaction base.
- Collect and compare benchmark metrics such as code size, gas usage or transaction throughput per seconds (TPS) of different blockchain implementations.
- Ensure reproducible contract builds across multiple compiler implementations or multiple host platforms.
- Implement end-to-end regression tests for Ethereum-compatible smart contract stacks.
# Declarative test format
For now, the format used to write tests is the [matter-labs era compiler format](https://github.com/matter-labs/era-compiler-tests?tab=readme-ov-file#matter-labs-simplecomplex-format). This allows us to re-use many tests from their corpora.
# The `retester` utility
The `retester` helper utilty is used to run the tests. To get an idea of what `retester` can do, please consults its command line help:
```
cargo run -p revive-dt-core -- --help
```
For example, to run the [complex Solidity tests](https://github.com/matter-labs/era-compiler-tests/tree/main/solidity/complex), define a corpus structure as follows:
```json
{
"name": "ML Solidity Complex",
"path": "/path/to/era-compiler-tests/solidity/complex"
}
```
Assuming this to be saved in a `ml-solidity-complex.json` file, the following command will try to compile and execute the tests found inside the corpus:
```bash
RUST_LOG=debug cargo r --release -p revive-dt-core -- --corpus ml-solidity-complex.json
```
+3
View File
@@ -11,6 +11,9 @@ rust-version.workspace = true
[dependencies]
anyhow = { workspace = true }
revive-solc-json-interface = { workspace = true }
revive-dt-config = { workspace = true }
revive-dt-solc-binaries = { workspace = true }
revive-common = { workspace = true }
semver = { workspace = true }
serde_json = { workspace = true }
tracing = { workspace = true }
+23 -4
View File
@@ -9,6 +9,8 @@ use std::{
path::{Path, PathBuf},
};
use revive_dt_config::Arguments;
use revive_common::EVMVersion;
use revive_solc_json_interface::{
SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
@@ -33,6 +35,8 @@ pub trait SolidityCompiler {
) -> anyhow::Result<CompilerOutput<Self::Options>>;
fn new(solc_executable: PathBuf) -> Self;
fn get_compiler_executable(config: &Arguments, version: Version) -> anyhow::Result<PathBuf>;
}
/// The generic compilation input configuration.
@@ -40,12 +44,19 @@ pub trait SolidityCompiler {
pub struct CompilerInput<T: PartialEq + Eq + Hash> {
pub extra_options: T,
pub input: SolcStandardJsonInput,
pub allow_paths: Vec<PathBuf>,
pub base_path: Option<PathBuf>,
}
/// The generic compilation output configuration.
#[derive(Debug)]
pub struct CompilerOutput<T: PartialEq + Eq + Hash> {
/// The solc standard JSON input.
pub input: CompilerInput<T>,
/// The produced solc standard JSON output.
pub output: SolcStandardJsonOutput,
/// The error message in case the compiler returns abnormally.
pub error: Option<String>,
}
impl<T> PartialEq for CompilerInput<T>
@@ -75,8 +86,8 @@ where
pub struct Compiler<T: SolidityCompiler> {
input: SolcStandardJsonInput,
extra_options: T::Options,
allow_paths: Vec<String>,
base_path: Option<String>,
allow_paths: Vec<PathBuf>,
base_path: Option<PathBuf>,
}
impl Default for Compiler<solc::Solc> {
@@ -106,6 +117,7 @@ where
false,
),
None,
None,
),
},
extra_options: Default::default(),
@@ -136,12 +148,12 @@ where
self
}
pub fn allow_path(mut self, path: String) -> Self {
pub fn allow_path(mut self, path: PathBuf) -> Self {
self.allow_paths.push(path);
self
}
pub fn base_path(mut self, base_path: String) -> Self {
pub fn base_path(mut self, base_path: PathBuf) -> Self {
self.base_path = Some(base_path);
self
}
@@ -150,6 +162,13 @@ where
T::new(solc_path).build(CompilerInput {
extra_options: self.extra_options,
input: self.input,
allow_paths: self.allow_paths,
base_path: self.base_path,
})
}
/// Returns the compiler JSON input.
pub fn input(&self) -> SolcStandardJsonInput {
self.input.clone()
}
}
+154 -2
View File
@@ -1,2 +1,154 @@
//! Implements the [crate::SolidityCompiler] trait with resolc for
//! compiling contracts to PVM bytecode.
//! Implements the [SolidityCompiler] trait with `resolc` for
//! compiling contracts to PolkaVM (PVM) bytecode.
use std::{
path::PathBuf,
process::{Command, Stdio},
};
use crate::{CompilerInput, CompilerOutput, SolidityCompiler};
use revive_dt_config::Arguments;
use revive_solc_json_interface::SolcStandardJsonOutput;
/// A wrapper around the `resolc` binary, emitting PVM-compatible bytecode.
#[derive(Debug)]
pub struct Resolc {
/// Path to the `resolc` executable
resolc_path: PathBuf,
}
impl SolidityCompiler for Resolc {
type Options = Vec<String>;
#[tracing::instrument(level = "debug", ret)]
fn build(
&self,
input: CompilerInput<Self::Options>,
) -> anyhow::Result<CompilerOutput<Self::Options>> {
let mut command = Command::new(&self.resolc_path);
command
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.arg("--standard-json");
if let Some(ref base_path) = input.base_path {
command.arg("--base-path").arg(base_path);
}
if !input.allow_paths.is_empty() {
command.arg("--allow-paths").arg(
input
.allow_paths
.iter()
.map(|path| path.display().to_string())
.collect::<Vec<_>>()
.join(","),
);
}
let mut child = command.spawn()?;
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
serde_json::to_writer(stdin_pipe, &input.input)?;
let json_in = serde_json::to_string_pretty(&input.input)?;
let output = child.wait_with_output()?;
let stdout = output.stdout;
let stderr = output.stderr;
if !output.status.success() {
let message = String::from_utf8_lossy(&stderr);
tracing::error!(
"resolc failed exit={} stderr={} JSON-in={} ",
output.status,
&message,
json_in,
);
return Ok(CompilerOutput {
input,
output: Default::default(),
error: Some(message.into()),
});
}
let mut parsed =
serde_json::from_slice::<SolcStandardJsonOutput>(&stdout).map_err(|e| {
anyhow::anyhow!(
"failed to parse resolc JSON output: {e}\nstderr: {}",
String::from_utf8_lossy(&stderr)
)
})?;
// Detecting if the compiler output contained errors and reporting them through logs and
// errors instead of returning the compiler output that might contain errors.
for error in parsed.errors.iter().flatten() {
if error.severity == "error" {
tracing::error!(?error, ?input, "Encountered an error in the compilation");
anyhow::bail!("Encountered an error in the compilation: {error}")
}
}
// We need to do some post processing on the output to make it in the same format that solc
// outputs. More specifically, for each contract, the `.metadata` field should be replaced
// with the `.metadata.solc_metadata` field which contains the ABI and other information
// about the compiled contracts. We do this because we do not want any downstream logic to
// need to differentiate between which compiler is being used when extracting the ABI of the
// contracts.
if let Some(ref mut contracts) = parsed.contracts {
for (contract_path, contracts_map) in contracts.iter_mut() {
for (contract_name, contract_info) in contracts_map.iter_mut() {
let Some(metadata) = contract_info.metadata.take() else {
continue;
};
// Get the `solc_metadata` in the metadata of the contract.
let Some(solc_metadata) = metadata
.get("solc_metadata")
.and_then(|metadata| metadata.as_str())
else {
tracing::error!(
contract_path,
contract_name,
metadata = serde_json::to_string(&metadata).unwrap(),
"Encountered a contract compiled with resolc that has no solc_metadata"
);
anyhow::bail!(
"Contract {} compiled with resolc that has no solc_metadata",
contract_name
);
};
// Replace the original metadata with the new solc_metadata.
contract_info.metadata =
Some(serde_json::Value::String(solc_metadata.to_string()));
}
}
}
tracing::debug!(
output = %serde_json::to_string(&parsed).unwrap(),
"Compiled successfully"
);
Ok(CompilerOutput {
input,
output: parsed,
error: None,
})
}
fn new(resolc_path: PathBuf) -> Self {
Resolc { resolc_path }
}
fn get_compiler_executable(
config: &Arguments,
_version: semver::Version,
) -> anyhow::Result<PathBuf> {
if !config.resolc.as_os_str().is_empty() {
return Ok(config.resolc.clone());
}
Ok(PathBuf::from("resolc"))
}
}
+66 -5
View File
@@ -7,7 +7,11 @@ use std::{
};
use crate::{CompilerInput, CompilerOutput, SolidityCompiler};
use revive_dt_config::Arguments;
use revive_dt_solc_binaries::download_solc;
use revive_solc_json_interface::SolcStandardJsonOutput;
#[derive(Debug)]
pub struct Solc {
solc_path: PathBuf,
}
@@ -15,28 +19,85 @@ pub struct Solc {
impl SolidityCompiler for Solc {
type Options = ();
#[tracing::instrument(level = "debug", ret)]
fn build(
&self,
input: CompilerInput<Self::Options>,
) -> anyhow::Result<CompilerOutput<Self::Options>> {
let mut child = Command::new(&self.solc_path)
let mut command = Command::new(&self.solc_path);
command
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.arg("--standard-json")
.spawn()?;
.arg("--standard-json");
if let Some(ref base_path) = input.base_path {
command.arg("--base-path").arg(base_path);
}
if !input.allow_paths.is_empty() {
command.arg("--allow-paths").arg(
input
.allow_paths
.iter()
.map(|path| path.display().to_string())
.collect::<Vec<_>>()
.join(","),
);
}
let mut child = command.spawn()?;
let stdin = child.stdin.as_mut().expect("should be piped");
serde_json::to_writer(stdin, &input.input)?;
let output = child.wait_with_output()?;
if !output.status.success() {
let message = String::from_utf8_lossy(&output.stderr);
tracing::error!("solc failed exit={} stderr={}", output.status, &message);
return Ok(CompilerOutput {
input,
output: Default::default(),
error: Some(message.into()),
});
}
let parsed =
serde_json::from_slice::<SolcStandardJsonOutput>(&output.stdout).map_err(|e| {
anyhow::anyhow!(
"failed to parse resolc JSON output: {e}\nstderr: {}",
String::from_utf8_lossy(&output.stdout)
)
})?;
// Detecting if the compiler output contained errors and reporting them through logs and
// errors instead of returning the compiler output that might contain errors.
for error in parsed.errors.iter().flatten() {
if error.severity == "error" {
tracing::error!(?error, ?input, "Encountered an error in the compilation");
anyhow::bail!("Encountered an error in the compilation: {error}")
}
}
tracing::debug!(
output = %String::from_utf8_lossy(&output.stdout).to_string(),
"Compiled successfully"
);
let output = child.wait_with_output()?.stdout;
Ok(CompilerOutput {
input,
output: serde_json::from_slice(&output)?,
output: parsed,
error: None,
})
}
fn new(solc_path: PathBuf) -> Self {
Self { solc_path }
}
fn get_compiler_executable(
config: &Arguments,
version: semver::Version,
) -> anyhow::Result<PathBuf> {
let path = download_solc(config.directory(), version, config.wasm)?;
Ok(path)
}
}
+2
View File
@@ -13,3 +13,5 @@ alloy = { workspace = true }
clap = { workspace = true }
semver = { workspace = true }
temp-dir = { workspace = true }
serde = { workspace = true }
+34 -4
View File
@@ -1,13 +1,17 @@
//! The global configuration used accross all revive differential testing crates.
//! The global configuration used across all revive differential testing crates.
use std::path::{Path, PathBuf};
use std::{
fmt::Display,
path::{Path, PathBuf},
};
use alloy::{network::EthereumWallet, signers::local::PrivateKeySigner};
use clap::{Parser, ValueEnum};
use semver::Version;
use serde::{Deserialize, Serialize};
use temp_dir::TempDir;
#[derive(Debug, Parser, Clone)]
#[derive(Debug, Parser, Clone, Serialize, Deserialize)]
#[command(name = "retester")]
pub struct Arguments {
/// The `solc` version to use if the test didn't specify it explicitly.
@@ -40,6 +44,7 @@ pub struct Arguments {
///
/// We attach it here because [TempDir] prunes itself on drop.
#[clap(skip)]
#[serde(skip)]
pub temp_dir: Option<&'static TempDir>,
/// The path to the `geth` executable.
@@ -83,6 +88,22 @@ pub struct Arguments {
/// Determines the amount of tests that are executed in parallel.
#[arg(long = "workers", default_value = "12")]
pub workers: usize,
/// Extract problems back to the test corpus.
#[arg(short, long = "extract-problems")]
pub extract_problems: bool,
/// The path to the `kitchensink` executable.
///
/// By default it uses `substrate-node` binary found in `$PATH`.
#[arg(short, long = "kitchensink", default_value = "substrate-node")]
pub kitchensink: PathBuf,
/// The path to the `eth_proxy` executable.
///
/// By default it uses `eth-rpc` binary found in `$PATH`.
#[arg(short = 'p', long = "eth_proxy", default_value = "eth-rpc")]
pub eth_proxy: PathBuf,
}
impl Arguments {
@@ -124,7 +145,7 @@ impl Default for Arguments {
/// The Solidity compatible node implementation.
///
/// This describes the solutions to be tested against on a high level.
#[derive(Clone, Debug, Eq, Hash, PartialEq, ValueEnum)]
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, ValueEnum, Serialize, Deserialize)]
#[clap(rename_all = "lower")]
pub enum TestingPlatform {
/// The go-ethereum reference full node EVM implementation.
@@ -132,3 +153,12 @@ pub enum TestingPlatform {
/// The kitchensink runtime provides the PolkaVM (PVM) based node implentation.
Kitchensink,
}
impl Display for TestingPlatform {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Geth => f.write_str("geth"),
Self::Kitchensink => f.write_str("revive"),
}
}
}
+3 -5
View File
@@ -18,16 +18,14 @@ revive-dt-config = { workspace = true }
revive-dt-format = { workspace = true }
revive-dt-node = { workspace = true }
revive-dt-node-interaction = { workspace = true }
revive-dt-solc-binaries = { workspace = true }
revive-dt-report = { workspace = true }
alloy = { workspace = true }
anyhow = { workspace = true }
clap = { workspace = true }
log = { workspace = true }
env_logger = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
rayon = { workspace = true }
revive-solc-json-interface = { workspace = true }
semver = { workspace = true }
serde = { workspace = true, features = [ "derive" ] }
serde_json = { workspace = true }
temp-dir = { workspace = true }
+456 -35
View File
@@ -1,15 +1,25 @@
//! The test driver handles the compilation and execution of the test cases.
use alloy::json_abi::JsonAbi;
use alloy::network::TransactionBuilder;
use alloy::rpc::types::TransactionReceipt;
use alloy::rpc::types::trace::geth::GethTrace;
use alloy::{
primitives::{Address, map::HashMap},
rpc::types::trace::geth::GethTrace,
rpc::types::{
TransactionRequest,
trace::geth::{AccountState, DiffMode},
},
};
use revive_dt_compiler::{Compiler, CompilerInput, SolidityCompiler};
use revive_dt_config::Arguments;
use revive_dt_format::{input::Input, metadata::Metadata, mode::SolcMode};
use revive_dt_node_interaction::EthereumNode;
use revive_dt_solc_binaries::download_solc;
use revive_dt_report::reporter::{CompilationTask, Report, Span};
use revive_solc_json_interface::SolcStandardJsonOutput;
use serde_json::Value;
use std::collections::HashMap as StdHashMap;
use tracing::Level;
use crate::Platform;
@@ -20,58 +30,299 @@ type Contracts<T> = HashMap<
pub struct State<'a, T: Platform> {
config: &'a Arguments,
span: Span,
contracts: Contracts<T>,
deployed_contracts: HashMap<String, Address>,
deployed_contracts: StdHashMap<String, Address>,
deployed_abis: StdHashMap<String, JsonAbi>,
}
impl<'a, T> State<'a, T>
where
T: Platform,
{
pub fn new(config: &'a Arguments) -> Self {
pub fn new(config: &'a Arguments, span: Span) -> Self {
Self {
config,
span,
contracts: Default::default(),
deployed_contracts: Default::default(),
deployed_abis: Default::default(),
}
}
/// Returns a copy of the current span.
fn span(&self) -> Span {
self.span
}
pub fn build_contracts(&mut self, mode: &SolcMode, metadata: &Metadata) -> anyhow::Result<()> {
let mut span = self.span();
span.next_metadata(
metadata
.file_path
.as_ref()
.expect("metadata should have been read from a file")
.clone(),
);
let Some(version) = mode.last_patch_version(&self.config.solc) else {
anyhow::bail!("unsupported solc version: {:?}", mode.solc_version);
anyhow::bail!("unsupported solc version: {:?}", &mode.solc_version);
};
let sources = metadata.contract_sources()?;
let base_path = metadata.directory()?.display().to_string();
let mut compiler = Compiler::<T::Compiler>::new().base_path(base_path.clone());
for (file, _contract) in sources.values() {
log::debug!("contract source {}", file.display());
compiler = compiler.with_source(file)?;
let compiler = Compiler::<T::Compiler>::new()
.allow_path(metadata.directory()?)
.solc_optimizer(mode.solc_optimize());
let compiler = FilesWithExtensionIterator::new(metadata.directory()?)
.with_allowed_extension("sol")
.try_fold(compiler, |compiler, path| compiler.with_source(&path))?;
let mut task = CompilationTask {
json_input: compiler.input(),
json_output: None,
mode: mode.clone(),
compiler_version: format!("{}", &version),
error: None,
};
let compiler_path = T::Compiler::get_compiler_executable(self.config, version)?;
match compiler.try_build(compiler_path) {
Ok(output) => {
task.json_output = Some(output.output.clone());
task.error = output.error;
self.contracts.insert(output.input, output.output);
if let Some(last_output) = self.contracts.values().last() {
if let Some(contracts) = &last_output.contracts {
for (file, contracts_map) in contracts {
for contract_name in contracts_map.keys() {
tracing::debug!(
"Compiled contract: {contract_name} from file: {file}"
);
}
}
} else {
tracing::warn!("Compiled contracts field is None");
}
}
Report::compilation(span, T::config_id(), task);
Ok(())
}
Err(error) => {
tracing::error!("Failed to compile contract: {:?}", error.to_string());
task.error = Some(error.to_string());
Err(error)
}
}
let solc_path = download_solc(self.config.directory(), version, self.config.wasm)?;
let output = compiler
.solc_optimizer(mode.solc_optimize())
.try_build(solc_path)?;
self.contracts.insert(output.input, output.output);
Ok(())
}
pub fn execute_input(
&mut self,
input: &Input,
node: &T::Blockchain,
) -> anyhow::Result<GethTrace> {
let receipt = node.execute_transaction(input.legacy_transaction(
self.config.network_id,
0,
&self.deployed_contracts,
)?)?;
dbg!(&receipt);
//node.trace_transaction(receipt)
todo!()
) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> {
tracing::trace!("Calling execute_input for input: {input:?}");
let nonce = node.fetch_add_nonce(input.caller)?;
tracing::debug!(
"Nonce calculated on the execute contract, calculated nonce {}, for contract {}, having address {} on node: {}",
&nonce,
&input.instance,
&input.caller,
std::any::type_name::<T>()
);
let tx =
match input.legacy_transaction(nonce, &self.deployed_contracts, &self.deployed_abis) {
Ok(tx) => {
tracing::debug!("Legacy transaction data: {tx:#?}");
tx
}
Err(err) => {
tracing::error!("Failed to construct legacy transaction: {err:?}");
return Err(err);
}
};
tracing::trace!("Executing transaction for input: {input:?}");
let receipt = match node.execute_transaction(tx) {
Ok(receipt) => receipt,
Err(err) => {
tracing::error!(
"Failed to execute transaction when executing the contract: {}, {:?}",
&input.instance,
err
);
return Err(err);
}
};
tracing::trace!(
"Transaction receipt for executed contract: {} - {:?}",
&input.instance,
receipt,
);
let trace = node.trace_transaction(receipt.clone())?;
tracing::trace!(
"Trace result for contract: {} - {:?}",
&input.instance,
trace
);
let diff = node.state_diff(receipt.clone())?;
Ok((receipt, trace, diff))
}
pub fn deploy_contracts(&mut self, input: &Input, node: &T::Blockchain) -> anyhow::Result<()> {
let tracing_span = tracing::debug_span!(
"Deploying contracts",
?input,
node = std::any::type_name::<T>()
);
let _guard = tracing_span.enter();
tracing::debug!(number_of_contracts_to_deploy = self.contracts.len());
for output in self.contracts.values() {
let Some(contract_map) = &output.contracts else {
tracing::debug!(
"No contracts in output — skipping deployment for this input {}",
&input.instance
);
continue;
};
for contracts in contract_map.values() {
for (contract_name, contract) in contracts {
let tracing_span = tracing::info_span!("Deploying contract", contract_name);
let _guard = tracing_span.enter();
tracing::debug!(
"Contract name is: {:?} and the input name is: {:?}",
&contract_name,
&input.instance
);
let bytecode = contract
.evm
.as_ref()
.and_then(|evm| evm.bytecode.as_ref())
.map(|b| b.object.clone());
let Some(code) = bytecode else {
tracing::error!("no bytecode for contract {contract_name}");
continue;
};
let nonce = node.fetch_add_nonce(input.caller)?;
tracing::debug!(
"Calculated nonce {}, for contract {}, having address {} on node: {}",
&nonce,
&input.instance,
&input.caller,
std::any::type_name::<T>()
);
// We are using alloy for building and submitting the transactions and it will
// automatically fill in all of the missing fields from the provider that we
// are using.
let code = alloy::hex::decode(&code)?;
let tx = TransactionRequest::default()
.nonce(nonce)
.from(input.caller)
.with_deploy_code(code);
let receipt = match node.execute_transaction(tx) {
Ok(receipt) => receipt,
Err(err) => {
tracing::error!(
"Failed to execute transaction when deploying the contract on node : {:?}, {:?}, {:?}",
std::any::type_name::<T>(),
&contract_name,
err
);
return Err(err);
}
};
tracing::debug!(
"Deployment tx sent for {} with nonce {} → tx hash: {:?}, on node: {:?}",
contract_name,
nonce,
receipt.transaction_hash,
std::any::type_name::<T>(),
);
tracing::trace!(
"Deployed transaction receipt for contract: {} - {:?}, on node: {:?}",
&contract_name,
receipt,
std::any::type_name::<T>(),
);
let Some(address) = receipt.contract_address else {
tracing::error!(
"contract {contract_name} deployment did not return an address"
);
continue;
};
self.deployed_contracts
.insert(contract_name.clone(), address);
tracing::trace!(
"deployed contract `{}` at {:?}, on node {:?}",
contract_name,
address,
std::any::type_name::<T>()
);
let Some(Value::String(metadata)) = &contract.metadata else {
tracing::error!(?contract, "Contract does not have a metadata field");
anyhow::bail!("Contract does not have a metadata field: {contract:?}");
};
// Deserialize the solc metadata into a JSON object so we can get the ABI of the
// contracts. If we fail to perform the deserialization then we return an error
// as there's no other way to handle this.
let Ok(metadata) = serde_json::from_str::<Value>(metadata) else {
tracing::error!(%metadata, "Failed to parse solc metadata into a structured value");
anyhow::bail!(
"Failed to parse solc metadata into a structured value {metadata}"
);
};
// Accessing the ABI on the solc metadata and erroring if the accessing failed
let Some(abi) = metadata.get("output").and_then(|value| value.get("abi"))
else {
tracing::error!(%metadata, "Failed to access the .output.abi field of the solc metadata");
anyhow::bail!(
"Failed to access the .output.abi field of the solc metadata {metadata}"
);
};
// Deserialize the ABI object that we got from the unstructured JSON into a
// structured ABI object and error out if we fail.
let Ok(abi) = serde_json::from_value::<JsonAbi>(abi.clone()) else {
tracing::error!(%metadata, "Failed to deserialize ABI into a structured format");
anyhow::bail!(
"Failed to deserialize ABI into a structured format {metadata}"
);
};
self.deployed_abis.insert(contract_name.clone(), abi);
}
}
}
tracing::debug!("Available contracts: {:?}", self.deployed_contracts.keys());
Ok(())
}
}
@@ -101,18 +352,114 @@ where
}
}
pub fn execute(&mut self) -> anyhow::Result<()> {
pub fn trace_diff_mode(label: &str, diff: &DiffMode) {
tracing::trace!("{label} - PRE STATE:");
for (addr, state) in &diff.pre {
Self::trace_account_state(" [pre]", addr, state);
}
tracing::trace!("{label} - POST STATE:");
for (addr, state) in &diff.post {
Self::trace_account_state(" [post]", addr, state);
}
}
fn trace_account_state(prefix: &str, addr: &Address, state: &AccountState) {
tracing::trace!("{prefix} 0x{addr:x}");
if let Some(balance) = &state.balance {
tracing::trace!("{prefix} balance: {balance}");
}
if let Some(nonce) = &state.nonce {
tracing::trace!("{prefix} nonce: {nonce}");
}
if let Some(code) = &state.code {
tracing::trace!("{prefix} code: {code}");
}
}
pub fn execute(&mut self, span: Span) -> anyhow::Result<()> {
for mode in self.metadata.solc_modes() {
let mut leader_state = State::<L>::new(self.config);
let mut leader_state = State::<L>::new(self.config, span);
leader_state.build_contracts(&mode, self.metadata)?;
let mut follower_state = State::<F>::new(self.config);
let mut follower_state = State::<F>::new(self.config, span);
follower_state.build_contracts(&mode, self.metadata)?;
for case in &self.metadata.cases {
for (case_idx, case) in self.metadata.cases.iter().enumerate() {
// Creating a tracing span to know which case within the metadata is being executed
// and which one we're getting logs for.
let tracing_span = tracing::span!(
Level::INFO,
"Executing case",
case = case.name,
case_idx = case_idx
);
let _guard = tracing_span.enter();
for input in &case.inputs {
let _ = leader_state.execute_input(input, self.leader_node)?;
let _ = follower_state.execute_input(input, self.follower_node)?;
tracing::debug!("Starting deploying contract {}", &input.instance);
if let Err(err) = leader_state.deploy_contracts(input, self.leader_node) {
tracing::error!("Leader deployment failed for {}: {err}", input.instance);
continue;
} else {
tracing::debug!("Leader deployment succeeded for {}", &input.instance);
}
if let Err(err) = follower_state.deploy_contracts(input, self.follower_node) {
tracing::error!("Follower deployment failed for {}: {err}", input.instance);
continue;
} else {
tracing::debug!("Follower deployment succeeded for {}", &input.instance);
}
tracing::debug!("Starting executing contract {}", &input.instance);
let (leader_receipt, _, leader_diff) =
match leader_state.execute_input(input, self.leader_node) {
Ok(result) => result,
Err(err) => {
tracing::error!(
"Leader execution failed for {}: {err}",
input.instance
);
continue;
}
};
let (follower_receipt, _, follower_diff) =
match follower_state.execute_input(input, self.follower_node) {
Ok(result) => result,
Err(err) => {
tracing::error!(
"Follower execution failed for {}: {err}",
input.instance
);
continue;
}
};
if leader_diff == follower_diff {
tracing::debug!("State diffs match between leader and follower.");
} else {
tracing::debug!("State diffs mismatch between leader and follower.");
Self::trace_diff_mode("Leader", &leader_diff);
Self::trace_diff_mode("Follower", &follower_diff);
}
if leader_receipt.logs() != follower_receipt.logs() {
tracing::debug!("Log/event mismatch between leader and follower.");
tracing::trace!("Leader logs: {:?}", leader_receipt.logs());
tracing::trace!("Follower logs: {:?}", follower_receipt.logs());
}
if leader_receipt.status() != follower_receipt.status() {
tracing::debug!(
"Mismatch in status: leader = {}, follower = {}",
leader_receipt.status(),
follower_receipt.status()
);
}
}
}
}
@@ -120,3 +467,77 @@ where
Ok(())
}
}
/// An iterator that finds files of a certain extension in the provided directory. You can think of
/// this a glob pattern similar to: `${path}/**/*.md`
struct FilesWithExtensionIterator {
/// The set of allowed extensions that that match the requirement and that should be returned
/// when found.
allowed_extensions: std::collections::HashSet<std::borrow::Cow<'static, str>>,
/// The set of directories to visit next. This iterator does BFS and so these directories will
/// only be visited if we can't find any files in our state.
directories_to_search: Vec<std::path::PathBuf>,
/// The set of files matching the allowed extensions that were found. If there are entries in
/// this vector then they will be returned when the [`Iterator::next`] method is called. If not
/// then we visit one of the next directories to visit.
///
/// [`Iterator`]: std::iter::Iterator
files_matching_allowed_extensions: Vec<std::path::PathBuf>,
}
impl FilesWithExtensionIterator {
fn new(root_directory: std::path::PathBuf) -> Self {
Self {
allowed_extensions: Default::default(),
directories_to_search: vec![root_directory],
files_matching_allowed_extensions: Default::default(),
}
}
fn with_allowed_extension(
mut self,
allowed_extension: impl Into<std::borrow::Cow<'static, str>>,
) -> Self {
self.allowed_extensions.insert(allowed_extension.into());
self
}
}
impl Iterator for FilesWithExtensionIterator {
type Item = std::path::PathBuf;
fn next(&mut self) -> Option<Self::Item> {
if let Some(file_path) = self.files_matching_allowed_extensions.pop() {
return Some(file_path);
};
let directory_to_search = self.directories_to_search.pop()?;
// Read all of the entries in the directory. If we failed to read this dir's entires then we
// elect to just ignore it and look in the next directory, we do that by calling the next
// method again on the iterator, which is an intentional decision that we made here instead
// of panicking.
let Ok(dir_entries) = std::fs::read_dir(directory_to_search) else {
return self.next();
};
for entry in dir_entries.flatten() {
let entry_path = entry.path();
if entry_path.is_dir() {
self.directories_to_search.push(entry_path)
} else if entry_path.is_file()
&& entry_path.extension().is_some_and(|ext| {
self.allowed_extensions
.iter()
.any(|allowed| ext.eq_ignore_ascii_case(allowed.as_ref()))
})
{
self.files_matching_allowed_extensions.push(entry_path)
}
}
self.next()
}
}
+16 -4
View File
@@ -3,8 +3,9 @@
//! This crate defines the testing configuration and
//! provides a helper utilty to execute tests.
use revive_dt_compiler::{SolidityCompiler, solc};
use revive_dt_node::geth;
use revive_dt_compiler::{SolidityCompiler, revive_resolc, solc};
use revive_dt_config::TestingPlatform;
use revive_dt_node::{geth, kitchensink::KitchensinkNode};
use revive_dt_node_interaction::EthereumNode;
pub mod driver;
@@ -15,6 +16,9 @@ pub mod driver;
pub trait Platform {
type Blockchain: EthereumNode;
type Compiler: SolidityCompiler;
/// Returns the matching [TestingPlatform] of the [revive_dt_config::Arguments].
fn config_id() -> TestingPlatform;
}
#[derive(Default)]
@@ -23,12 +27,20 @@ pub struct Geth;
impl Platform for Geth {
type Blockchain = geth::Instance;
type Compiler = solc::Solc;
fn config_id() -> TestingPlatform {
TestingPlatform::Geth
}
}
#[derive(Default)]
pub struct Kitchensink;
impl Platform for Kitchensink {
type Blockchain = geth::Instance;
type Compiler = solc::Solc;
type Blockchain = KitchensinkNode;
type Compiler = revive_resolc::Resolc;
fn config_id() -> TestingPlatform {
TestingPlatform::Kitchensink
}
}
+104 -52
View File
@@ -5,117 +5,169 @@ use rayon::{ThreadPoolBuilder, prelude::*};
use revive_dt_config::*;
use revive_dt_core::{
Geth,
Geth, Kitchensink, Platform,
driver::{Driver, State},
};
use revive_dt_format::{corpus::Corpus, metadata::Metadata};
use revive_dt_format::{corpus::Corpus, metadata::MetadataFile};
use revive_dt_node::pool::NodePool;
use revive_dt_report::reporter::{Report, Span};
use temp_dir::TempDir;
use tracing::Level;
use tracing_subscriber::{EnvFilter, FmtSubscriber, fmt::format::FmtSpan};
static TEMP_DIR: LazyLock<TempDir> = LazyLock::new(|| TempDir::new().unwrap());
fn main() -> anyhow::Result<()> {
let args = init_cli()?;
let corpora = collect_corpora(&args)?;
for (corpus, tests) in collect_corpora(&args)? {
let span = Span::new(corpus, args.clone())?;
if let Some(platform) = &args.compile_only {
for tests in corpora.values() {
main_compile_only(&args, tests, platform)?;
match &args.compile_only {
Some(platform) => compile_corpus(&args, &tests, platform, span),
None => execute_corpus(&args, &tests, span)?,
}
return Ok(());
}
for tests in corpora.values() {
main_execute_differential(&args, tests)?;
Report::save()?;
}
Ok(())
}
fn init_cli() -> anyhow::Result<Arguments> {
env_logger::init();
let subscriber = FmtSubscriber::builder()
.with_thread_ids(true)
.with_thread_names(true)
.with_env_filter(EnvFilter::from_default_env())
.with_span_events(FmtSpan::ENTER | FmtSpan::CLOSE)
.pretty()
.finish();
tracing::subscriber::set_global_default(subscriber)?;
let mut args = Arguments::parse();
if args.corpus.is_empty() {
anyhow::bail!("no test corpus specified");
}
if args.working_directory.is_none() {
args.temp_dir = Some(&TEMP_DIR);
match args.working_directory.as_ref() {
Some(dir) => {
if !dir.exists() {
anyhow::bail!("workdir {} does not exist", dir.display());
}
}
None => {
args.temp_dir = Some(&TEMP_DIR);
}
}
tracing::info!("workdir: {}", args.directory().display());
ThreadPoolBuilder::new()
.num_threads(args.workers)
.build_global()
.unwrap();
.build_global()?;
Ok(args)
}
fn collect_corpora(args: &Arguments) -> anyhow::Result<HashMap<Corpus, Vec<Metadata>>> {
fn collect_corpora(args: &Arguments) -> anyhow::Result<HashMap<Corpus, Vec<MetadataFile>>> {
let mut corpora = HashMap::new();
for path in &args.corpus {
let corpus = Corpus::try_from_path(path)?;
log::info!("found corpus: {}", path.display());
tracing::info!("found corpus: {}", path.display());
let tests = corpus.enumerate_tests();
log::info!("corpus '{}' contains {} tests", &corpus.name, tests.len());
tracing::info!("corpus '{}' contains {} tests", &corpus.name, tests.len());
corpora.insert(corpus, tests);
}
Ok(corpora)
}
fn main_execute_differential(args: &Arguments, tests: &[Metadata]) -> anyhow::Result<()> {
let leader_nodes = NodePool::new(args)?;
let follower_nodes = NodePool::new(args)?;
fn run_driver<L, F>(args: &Arguments, tests: &[MetadataFile], span: Span) -> anyhow::Result<()>
where
L: Platform,
F: Platform,
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
{
let leader_nodes = NodePool::<L::Blockchain>::new(args)?;
let follower_nodes = NodePool::<F::Blockchain>::new(args)?;
tests.par_iter().for_each(|metadata| {
let mut driver = match (&args.leader, &args.follower) {
(TestingPlatform::Geth, TestingPlatform::Kitchensink) => Driver::<Geth, Geth>::new(
tests.par_iter().for_each(
|MetadataFile {
content: metadata,
path: metadata_file_path,
}| {
// Starting a new tracing span for this metadata file. This allows our logs to be clear
// about which metadata file the logs belong to. We can add other information into this
// as well to be able to associate the logs with the correct metadata file and case
// that's being executed.
let tracing_span = tracing::span!(
Level::INFO,
"Running driver",
metadata_file_path = metadata_file_path.display().to_string(),
);
let _guard = tracing_span.enter();
let mut driver = Driver::<L, F>::new(
metadata,
args,
leader_nodes.round_robbin(),
follower_nodes.round_robbin(),
),
_ => unimplemented!(),
};
);
match driver.execute() {
Ok(build) => {
log::info!(
"metadata {} success",
metadata.directory().as_ref().unwrap().display()
);
build
match driver.execute(span) {
Ok(_) => {
tracing::info!(
"metadata {} success",
metadata.directory().as_ref().unwrap().display()
);
}
Err(error) => {
tracing::warn!(
"metadata {} failure: {error:?}",
metadata.file_path.as_ref().unwrap().display()
);
}
}
Err(error) => {
log::warn!(
"metadata {} failure: {error:?}",
metadata.file_path.as_ref().unwrap().display()
);
}
}
});
},
);
Ok(())
}
fn main_compile_only(
fn execute_corpus(args: &Arguments, tests: &[MetadataFile], span: Span) -> anyhow::Result<()> {
match (&args.leader, &args.follower) {
(TestingPlatform::Geth, TestingPlatform::Kitchensink) => {
run_driver::<Geth, Kitchensink>(args, tests, span)?
}
(TestingPlatform::Geth, TestingPlatform::Geth) => {
run_driver::<Geth, Geth>(args, tests, span)?
}
_ => unimplemented!(),
}
Ok(())
}
fn compile_corpus(
config: &Arguments,
tests: &[Metadata],
tests: &[MetadataFile],
platform: &TestingPlatform,
) -> anyhow::Result<()> {
span: Span,
) {
tests.par_iter().for_each(|metadata| {
for mode in &metadata.solc_modes() {
let mut state = match platform {
TestingPlatform::Geth => State::<Geth>::new(config),
_ => todo!(),
match platform {
TestingPlatform::Geth => {
let mut state = State::<Geth>::new(config, span);
let _ = state.build_contracts(mode, metadata);
}
TestingPlatform::Kitchensink => {
let mut state = State::<Kitchensink>::new(config, span);
let _ = state.build_contracts(mode, metadata);
}
};
let _ = state.build_contracts(mode, metadata);
}
});
Ok(())
}
+4 -2
View File
@@ -10,8 +10,10 @@ rust-version.workspace = true
[dependencies]
alloy = { workspace = true }
alloy-primitives = { workspace = true }
alloy-sol-types = { workspace = true }
anyhow = { workspace = true }
log = { workspace = true }
tracing = { workspace = true }
semver = { workspace = true }
serde = { workspace = true, features = [ "derive" ] }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
+8 -8
View File
@@ -3,11 +3,11 @@ use std::{
path::{Path, PathBuf},
};
use serde::Deserialize;
use serde::{Deserialize, Serialize};
use crate::metadata::Metadata;
use crate::metadata::MetadataFile;
#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Hash)]
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, Hash)]
pub struct Corpus {
pub name: String,
pub path: PathBuf,
@@ -21,7 +21,7 @@ impl Corpus {
}
/// Scan the corpus base directory and return all tests found.
pub fn enumerate_tests(&self) -> Vec<Metadata> {
pub fn enumerate_tests(&self) -> Vec<MetadataFile> {
let mut tests = Vec::new();
collect_metadata(&self.path, &mut tests);
tests
@@ -34,11 +34,11 @@ impl Corpus {
/// Found tests are inserted into `tests`.
///
/// `path` is expected to be a directory.
pub fn collect_metadata(path: &Path, tests: &mut Vec<Metadata>) {
pub fn collect_metadata(path: &Path, tests: &mut Vec<MetadataFile>) {
let dir_entry = match std::fs::read_dir(path) {
Ok(dir_entry) => dir_entry,
Err(error) => {
log::error!("failed to read dir '{}': {error}", path.display());
tracing::error!("failed to read dir '{}': {error}", path.display());
return;
}
};
@@ -47,7 +47,7 @@ pub fn collect_metadata(path: &Path, tests: &mut Vec<Metadata>) {
let entry = match entry {
Ok(entry) => entry,
Err(error) => {
log::error!("error reading dir entry: {error}");
tracing::error!("error reading dir entry: {error}");
continue;
}
};
@@ -59,7 +59,7 @@ pub fn collect_metadata(path: &Path, tests: &mut Vec<Metadata>) {
}
if path.is_file() {
if let Some(metadata) = Metadata::try_from_file(&path) {
if let Some(metadata) = MetadataFile::try_from_file(&path) {
tests.push(metadata)
}
}
+257 -45
View File
@@ -1,11 +1,13 @@
use std::collections::HashMap;
use alloy::{
json_abi::Function, network::TransactionBuilder, primitives::Address,
json_abi::JsonAbi,
network::TransactionBuilder,
primitives::{Address, Bytes, U256},
rpc::types::TransactionRequest,
};
use semver::VersionReq;
use serde::{Deserialize, de::Deserializer};
use serde::Deserialize;
use serde_json::Value;
#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq)]
@@ -15,7 +17,6 @@ pub struct Input {
pub comment: Option<String>,
#[serde(default = "default_instance")]
pub instance: String,
#[serde(deserialize_with = "deserialize_method")]
pub method: Method,
pub calldata: Option<Calldata>,
pub expected: Option<Expected>,
@@ -47,46 +48,24 @@ pub enum Calldata {
}
/// Specify how the contract is called.
#[derive(Debug, Default, Clone, Eq, PartialEq)]
#[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)]
pub enum Method {
/// Initiate a deploy transaction, calling contracts constructor.
///
/// Indicated by `#deployer`.
#[serde(rename = "#deployer")]
Deployer,
/// Does not calculate and insert a function selector.
///
/// Indicated by `#fallback`.
#[default]
#[serde(rename = "#fallback")]
Fallback,
/// Call the public function with this selector.
///
/// Calculates the selector if neither deployer or fallback matches.
Function([u8; 4]),
}
fn deserialize_method<'de, D>(deserializer: D) -> Result<Method, D::Error>
where
D: Deserializer<'de>,
{
Ok(match String::deserialize(deserializer)?.as_str() {
"#deployer" => Method::Deployer,
"#fallback" => Method::Fallback,
signature => {
let signature = if signature.ends_with(')') {
signature.to_string()
} else {
format!("{signature}()")
};
match Function::parse(&signature) {
Ok(function) => Method::Function(function.selector().0),
Err(error) => {
return Err(serde::de::Error::custom(format!(
"parsing function signature '{signature}' error: {error}"
)));
}
}
}
})
/// Call the public function with the given name.
#[serde(untagged)]
FunctionName(String),
}
impl Input {
@@ -101,25 +80,100 @@ impl Input {
.ok_or_else(|| anyhow::anyhow!("instance {instance} not deployed"))
}
pub fn encoded_input(
&self,
deployed_abis: &HashMap<String, JsonAbi>,
deployed_contracts: &HashMap<String, Address>,
) -> anyhow::Result<Bytes> {
let Method::FunctionName(ref function_name) = self.method else {
return Ok(Bytes::default()); // fallback or deployer — no input
};
let Some(abi) = deployed_abis.get(&self.instance) else {
tracing::error!(
contract_name = self.instance,
available_abis = ?deployed_abis.keys().collect::<Vec<_>>(),
"Attempted to lookup ABI of contract but it wasn't found"
);
anyhow::bail!("ABI for instance '{}' not found", &self.instance);
};
tracing::trace!("ABI found for instance: {}", &self.instance);
// We follow the same logic that's implemented in the matter-labs-tester where they resolve
// the function name into a function selector and they assume that he function doesn't have
// any existing overloads.
// https://github.com/matter-labs/era-compiler-tester/blob/1dfa7d07cba0734ca97e24704f12dd57f6990c2c/compiler_tester/src/test/case/input/mod.rs#L158-L190
let function = abi
.functions()
.find(|function| function.name.starts_with(function_name))
.ok_or_else(|| {
anyhow::anyhow!(
"Function with name {:?} not found in ABI for the instance {:?}",
function_name,
&self.instance
)
})?;
tracing::trace!("Functions found for instance: {}", &self.instance);
let calldata_args = match &self.calldata {
Some(Calldata::Compound(args)) => args,
_ => anyhow::bail!("Expected compound calldata for function call"),
};
if calldata_args.len() != function.inputs.len() {
anyhow::bail!(
"Function expects {} args, but got {}",
function.inputs.len(),
calldata_args.len()
);
}
tracing::trace!(
"Starting encoding ABI's parameters for instance: {}",
&self.instance
);
// Allocating a vector that we will be using for the calldata. The vector size will be:
// 4 bytes for the function selector.
// function.inputs.len() * 32 bytes for the arguments (each argument is a U256).
//
// We're using indices in the following code in order to avoid the need for us to allocate
// a new buffer for each one of the resolved arguments.
let mut calldata = Vec::<u8>::with_capacity(4 + calldata_args.len() * 32);
calldata.extend(function.selector().0);
for (arg_idx, arg) in calldata_args.iter().enumerate() {
match resolve_argument(arg, deployed_contracts) {
Ok(resolved) => {
calldata.extend(resolved.to_be_bytes::<32>());
}
Err(error) => {
tracing::error!(arg, arg_idx, ?error, "Failed to resolve argument");
return Err(error);
}
};
}
Ok(calldata.into())
}
/// Parse this input into a legacy transaction.
pub fn legacy_transaction(
&self,
chain_id: u64,
nonce: u64,
deployed_contracts: &HashMap<String, Address>,
deployed_abis: &HashMap<String, JsonAbi>,
) -> anyhow::Result<TransactionRequest> {
let to = match self.method {
Method::Deployer => Address::ZERO,
_ => self.instance_to_address(&self.instance, deployed_contracts)?,
};
Ok(TransactionRequest::default()
.with_from(self.caller)
.with_to(to)
.with_nonce(nonce)
.with_chain_id(chain_id)
.with_gas_price(20_000_000_000)
.with_gas_limit(20_000_000_000))
let input_data = self.encoded_input(deployed_abis, deployed_contracts)?;
let transaction_request = TransactionRequest::default().nonce(nonce);
match self.method {
Method::Deployer => Ok(transaction_request.with_deploy_code(input_data)),
_ => Ok(transaction_request
.to(self.instance_to_address(&self.instance, deployed_contracts)?)
.input(input_data.into())),
}
}
}
@@ -130,3 +184,161 @@ fn default_instance() -> String {
fn default_caller() -> Address {
"90F8bf6A479f320ead074411a4B0e7944Ea8c9C1".parse().unwrap()
}
/// This function takes in the string calldata argument provided in the JSON input and resolves it
/// into a [`U256`] which is later used to construct the calldata.
///
/// # Note
///
/// This piece of code is taken from the matter-labs-tester repository which is licensed under MIT
/// or Apache. The original source code can be found here:
/// https://github.com/matter-labs/era-compiler-tester/blob/0ed598a27f6eceee7008deab3ff2311075a2ec69/compiler_tester/src/test/case/input/value.rs#L43-L146
fn resolve_argument(
value: &str,
deployed_contracts: &HashMap<String, Address>,
) -> anyhow::Result<U256> {
if let Some(instance) = value.strip_suffix(".address") {
Ok(U256::from_be_slice(
deployed_contracts
.get(instance)
.ok_or_else(|| anyhow::anyhow!("Instance `{}` not found", instance))?
.as_ref(),
))
} else if let Some(value) = value.strip_prefix('-') {
let value = U256::from_str_radix(value, 10)
.map_err(|error| anyhow::anyhow!("Invalid decimal literal after `-`: {}", error))?;
if value > U256::ONE << 255u8 {
anyhow::bail!("Decimal literal after `-` is too big");
}
let value = value
.checked_sub(U256::ONE)
.ok_or_else(|| anyhow::anyhow!("`-0` is invalid literal"))?;
Ok(U256::MAX.checked_sub(value).expect("Always valid"))
} else if let Some(value) = value.strip_prefix("0x") {
Ok(U256::from_str_radix(value, 16)
.map_err(|error| anyhow::anyhow!("Invalid hexadecimal literal: {}", error))?)
} else {
// TODO: This is a set of "variables" that we need to be able to resolve to be fully in
// compliance with the matter labs tester but we currently do not resolve them. We need to
// add logic that does their resolution in the future, perhaps through some kind of system
// context API that we pass down to the resolution function that allows it to make calls to
// the node to perform these resolutions.
let is_unsupported = [
"$CHAIN_ID",
"$GAS_LIMIT",
"$COINBASE",
"$DIFFICULTY",
"$BLOCK_HASH",
"$BLOCK_TIMESTAMP",
]
.iter()
.any(|var| value.starts_with(var));
if is_unsupported {
tracing::error!(value, "Unsupported variable used");
anyhow::bail!("Encountered {value} which is currently unsupported by the framework");
} else {
Ok(U256::from_str_radix(value, 10)
.map_err(|error| anyhow::anyhow!("Invalid decimal literal: {}", error))?)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy::json_abi::JsonAbi;
use alloy_primitives::address;
use alloy_sol_types::SolValue;
use std::collections::HashMap;
#[test]
fn test_encoded_input_uint256() {
let raw_metadata = r#"
[
{
"inputs": [{"name": "value", "type": "uint256"}],
"name": "store",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
}
]
"#;
let parsed_abi: JsonAbi = serde_json::from_str(raw_metadata).unwrap();
let selector = parsed_abi
.function("store")
.unwrap()
.first()
.unwrap()
.selector()
.0;
let input = Input {
instance: "Contract".to_string(),
method: Method::FunctionName("store".to_owned()),
calldata: Some(Calldata::Compound(vec!["42".into()])),
..Default::default()
};
let mut deployed_abis = HashMap::new();
deployed_abis.insert("Contract".to_string(), parsed_abi);
let deployed_contracts = HashMap::new();
let encoded = input
.encoded_input(&deployed_abis, &deployed_contracts)
.unwrap();
assert!(encoded.0.starts_with(&selector));
type T = (u64,);
let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap();
assert_eq!(decoded.0, 42);
}
#[test]
fn test_encoded_input_address() {
let raw_abi = r#"[
{
"inputs": [{"name": "recipient", "type": "address"}],
"name": "send",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
}
]"#;
let parsed_abi: JsonAbi = serde_json::from_str(raw_abi).unwrap();
let selector = parsed_abi
.function("send")
.unwrap()
.first()
.unwrap()
.selector()
.0;
let input: Input = Input {
instance: "Contract".to_string(),
method: Method::FunctionName("send".to_owned()),
calldata: Some(Calldata::Compound(vec![
"0x1000000000000000000000000000000000000001".to_string(),
])),
..Default::default()
};
let mut abis = HashMap::new();
abis.insert("Contract".to_string(), parsed_abi);
let contracts = HashMap::new();
let encoded = input.encoded_input(&abis, &contracts).unwrap();
assert!(encoded.0.starts_with(&selector));
type T = (alloy_primitives::Address,);
let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap();
assert_eq!(
decoded.0,
address!("0x1000000000000000000000000000000000000001")
);
}
}
+41 -11
View File
@@ -1,6 +1,7 @@
use std::{
collections::BTreeMap,
fs::{File, read_to_string},
ops::Deref,
path::{Path, PathBuf},
};
@@ -15,6 +16,29 @@ pub const METADATA_FILE_EXTENSION: &str = "json";
pub const SOLIDITY_CASE_FILE_EXTENSION: &str = "sol";
pub const SOLIDITY_CASE_COMMENT_MARKER: &str = "//!";
#[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)]
pub struct MetadataFile {
pub path: PathBuf,
pub content: Metadata,
}
impl MetadataFile {
pub fn try_from_file(path: &Path) -> Option<Self> {
Metadata::try_from_file(path).map(|metadata| Self {
path: path.to_owned(),
content: metadata,
})
}
}
impl Deref for MetadataFile {
type Target = Metadata;
fn deref(&self) -> &Self::Target {
&self.content
}
}
#[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)]
pub struct Metadata {
pub cases: Vec<Case>,
@@ -35,7 +59,7 @@ impl Metadata {
.filter_map(|mode| match mode {
Mode::Solidity(solc_mode) => Some(solc_mode),
Mode::Unknown(mode) => {
log::debug!("compiler: ignoring unknown mode '{mode}'");
tracing::debug!("compiler: ignoring unknown mode '{mode}'");
None
}
})
@@ -90,7 +114,7 @@ impl Metadata {
assert!(path.is_file(), "not a file: {}", path.display());
let Some(file_extension) = path.extension() else {
log::debug!("skipping corpus file: {}", path.display());
tracing::debug!("skipping corpus file: {}", path.display());
return None;
};
@@ -102,14 +126,14 @@ impl Metadata {
return Self::try_from_solidity(path);
}
log::debug!("ignoring invalid corpus file: {}", path.display());
tracing::debug!("ignoring invalid corpus file: {}", path.display());
None
}
fn try_from_json(path: &Path) -> Option<Self> {
let file = File::open(path)
.inspect_err(|error| {
log::error!(
tracing::error!(
"opening JSON test metadata file '{}' error: {error}",
path.display()
);
@@ -122,7 +146,7 @@ impl Metadata {
Some(metadata)
}
Err(error) => {
log::error!(
tracing::error!(
"parsing JSON test metadata file '{}' error: {error}",
path.display()
);
@@ -132,9 +156,9 @@ impl Metadata {
}
fn try_from_solidity(path: &Path) -> Option<Self> {
let buf = read_to_string(path)
let spec = read_to_string(path)
.inspect_err(|error| {
log::error!(
tracing::error!(
"opening JSON test metadata file '{}' error: {error}",
path.display()
);
@@ -147,18 +171,24 @@ impl Metadata {
buf
});
if buf.is_empty() {
if spec.is_empty() {
return None;
}
match serde_json::from_str::<Self>(&buf) {
match serde_json::from_str::<Self>(&spec) {
Ok(mut metadata) => {
metadata.file_path = Some(path.to_path_buf());
let name = path
.file_name()
.expect("this should be the path to a Solidity file")
.to_str()
.expect("the file name should be valid UTF-8k");
metadata.contracts = Some([(String::from("Test"), format!("{name}:Test"))].into());
Some(metadata)
}
Err(error) => {
log::error!(
"parsing Solidity test metadata file '{}' error: {error}",
tracing::error!(
"parsing Solidity test metadata file '{}' error: '{error}' from data: {spec}",
path.display()
);
None
+3 -3
View File
@@ -1,16 +1,16 @@
use semver::Version;
use serde::Deserialize;
use serde::de::Deserializer;
use serde::{Deserialize, Serialize};
/// Specifies the compilation mode of the test artifact.
#[derive(Debug, Clone, Eq, PartialEq)]
#[derive(Hash, Debug, Clone, Eq, PartialEq)]
pub enum Mode {
Solidity(SolcMode),
Unknown(String),
}
/// Specify Solidity specific compiler options.
#[derive(Debug, Default, Clone, Eq, PartialEq)]
#[derive(Hash, Debug, Default, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct SolcMode {
pub solc_version: Option<semver::VersionReq>,
solc_optimize: Option<bool>,
+2 -3
View File
@@ -11,8 +11,7 @@ rust-version.workspace = true
[dependencies]
alloy = { workspace = true }
anyhow = { workspace = true }
hex = { workspace = true }
log = { workspace = true }
futures = { workspace = true }
tracing = { workspace = true }
once_cell = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true }
@@ -0,0 +1,221 @@
//! The alloy crate __requires__ a tokio runtime.
//! We contain any async rust right here.
use std::{any::Any, panic::AssertUnwindSafe, pin::Pin, thread};
use futures::FutureExt;
use once_cell::sync::Lazy;
use tokio::{
runtime::Builder,
sync::{mpsc::UnboundedSender, oneshot},
};
/// A blocking async executor.
///
/// This struct exposes the abstraction of a blocking async executor. It is a global and static
/// executor which means that it doesn't require for new instances of it to be created, it's a
/// singleton and can be accessed by any thread that wants to perform some async computation on the
/// blocking executor thread.
///
/// The API of the blocking executor is created in a way so that it's very natural, simple to use,
/// and unbounded to specific tasks or return types. The following is an example of using this
/// executor to drive an async computation:
///
/// ```rust
/// use revive_dt_node_interaction::*;
///
/// fn blocking_function() {
/// let result = BlockingExecutor::execute(async move {
/// tokio::time::sleep(std::time::Duration::from_secs(1)).await;
/// 0xFFu8
/// })
/// .expect("Computation failed");
///
/// assert_eq!(result, 0xFF);
/// }
/// ```
///
/// Users get to pass in their async tasks without needing to worry about putting them in a [`Box`],
/// [`Pin`], needing to perform down-casting, or the internal channel mechanism used by the runtime.
/// To the user, it just looks like a function that converts some async code into sync code.
///
/// This struct also handled panics that occur in the passed futures and converts them into errors
/// that can be handled by the user. This is done to allow the executor to be robust.
///
/// Internally, the executor communicates with the tokio runtime thread through channels which carry
/// the [`TaskMessage`] and the results of the execution.
pub struct BlockingExecutor;
impl BlockingExecutor {
pub fn execute<R>(future: impl Future<Output = R> + Send + 'static) -> Result<R, anyhow::Error>
where
R: Send + 'static,
{
// Note: The blocking executor is a singleton and therefore we store its state in a static
// so that it's assigned only once. Additionally, when we set the state of the executor we
// spawn the thread where the async runtime runs.
static STATE: Lazy<ExecutorState> = Lazy::new(|| {
tracing::trace!("Initializing the BlockingExecutor state");
// All communication with the tokio runtime thread happens over mspc channels where the
// producers here are the threads that want to run async tasks and the consumer here is
// the tokio runtime thread.
let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel::<TaskMessage>();
thread::spawn(move || {
let runtime = Builder::new_current_thread()
.enable_all()
.build()
.expect("Failed to create the async runtime");
runtime.block_on(async move {
while let Some(TaskMessage {
future: task,
response_tx: response_channel,
}) = rx.recv().await
{
tracing::trace!("Received a new future to execute");
tokio::spawn(async move {
// One of the things that the blocking executor does is that it allows
// us to catch panics if they occur. By wrapping the given future in an
// AssertUnwindSafe::catch_unwind we are able to catch all panic unwinds
// in the given future and convert them into errors.
let task = AssertUnwindSafe(task).catch_unwind();
let result = task.await;
let _ = response_channel.send(result);
});
}
})
});
ExecutorState { tx }
});
// We need to perform blocking synchronous communication between the current thread and the
// tokio runtime thread with the result of the async computation and the oneshot channels
// from tokio allows us to do that. The sender side of the channel will be given to the
// tokio runtime thread to send the result when the computation is completed and the receive
// side of the channel will be kept with this thread to await for the response of the async
// task to come back.
let (response_tx, response_rx) =
oneshot::channel::<Result<Box<dyn Any + Send>, Box<dyn Any + Send>>>();
// The tokio runtime thread expects a Future<Output = Box<dyn Any + Send>> + Send to be
// sent to it to execute. However, this function has a typed Future<Output = R> + Send and
// therefore we need to change the type of the future to fit what the runtime thread expects
// in the task message. In doing this conversion, we lose some of the type information since
// we're converting R => dyn Any. However, we will perform down-casting on the result to
// convert it back into R.
let future = Box::pin(async move { Box::new(future.await) as Box<dyn Any + Send> });
let task = TaskMessage::new(future, response_tx);
if let Err(error) = STATE.tx.send(task) {
tracing::error!(?error, "Failed to send the task to the blocking executor");
anyhow::bail!("Failed to send the task to the blocking executor: {error:?}")
}
let result = match response_rx.blocking_recv() {
Ok(result) => result,
Err(error) => {
tracing::error!(
?error,
"Failed to get the response from the blocking executor"
);
anyhow::bail!("Failed to get the response from the blocking executor: {error:?}")
}
};
match result.map(|result| {
*result
.downcast::<R>()
.expect("Type mismatch in the downcast")
}) {
Ok(result) => Ok(result),
Err(error) => {
tracing::error!(
?error,
"Failed to downcast the returned result into the expected type"
);
anyhow::bail!(
"Failed to downcast the returned result into the expected type: {error:?}"
)
}
}
}
}
/// Represents the state of the async runtime. This runtime is designed to be a singleton runtime
/// which means that in the current running program there's just a single thread that has an async
/// runtime.
struct ExecutorState {
/// The sending side of the task messages channel. This is used by all of the other threads to
/// communicate with the async runtime thread.
tx: UnboundedSender<TaskMessage>,
}
/// Represents a message that contains an asynchronous task that's to be executed by the runtime
/// as well as a way for the runtime to report back on the result of the execution.
struct TaskMessage {
/// The task that's being requested to run. This is a future that returns an object that does
/// implement [`Any`] and [`Send`] to allow it to be sent between the requesting thread and the
/// async thread.
future: Pin<Box<dyn Future<Output = Box<dyn Any + Send>> + Send>>,
/// A one shot sender channel where the sender of the task is expecting to hear back on the
/// result of the task.
response_tx: oneshot::Sender<Result<Box<dyn Any + Send>, Box<dyn Any + Send>>>,
}
impl TaskMessage {
pub fn new(
future: Pin<Box<dyn Future<Output = Box<dyn Any + Send>> + Send>>,
response_tx: oneshot::Sender<Result<Box<dyn Any + Send>, Box<dyn Any + Send>>>,
) -> Self {
Self {
future,
response_tx,
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn simple_future_works() {
// Act
let result = BlockingExecutor::execute(async move {
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
0xFFu8
})
.unwrap();
// Assert
assert_eq!(result, 0xFFu8);
}
#[test]
#[allow(unreachable_code, clippy::unreachable)]
fn panics_in_futures_are_caught() {
// Act
let result = BlockingExecutor::execute(async move {
panic!("This is a panic!");
0xFFu8
});
// Assert
assert!(result.is_err());
// Act
let result = BlockingExecutor::execute(async move {
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
0xFFu8
})
.unwrap();
// Assert
assert_eq!(result, 0xFFu8)
}
}
+10 -5
View File
@@ -1,12 +1,11 @@
//! This crate implements all node interactions.
use alloy::rpc::types::trace::geth::GethTrace;
use alloy::primitives::Address;
use alloy::rpc::types::trace::geth::{DiffMode, GethTrace};
use alloy::rpc::types::{TransactionReceipt, TransactionRequest};
use tokio_runtime::TO_TOKIO;
mod tokio_runtime;
pub mod trace;
pub mod transaction;
mod blocking_executor;
pub use blocking_executor::*;
/// An interface for all interactions with Ethereum compatible nodes.
pub trait EthereumNode {
@@ -18,4 +17,10 @@ pub trait EthereumNode {
/// Trace the transaction in the [TransactionReceipt] and return a [GethTrace].
fn trace_transaction(&self, transaction: TransactionReceipt) -> anyhow::Result<GethTrace>;
/// Returns the state diff of the transaction hash in the [TransactionReceipt].
fn state_diff(&self, transaction: TransactionReceipt) -> anyhow::Result<DiffMode>;
/// Returns the next available nonce for the given [Address].
fn fetch_add_nonce(&self, address: Address) -> anyhow::Result<u64>;
}
@@ -1,79 +0,0 @@
//! The alloy crate __requires__ a tokio runtime.
//! We contain any async rust right here.
use once_cell::sync::Lazy;
use std::pin::Pin;
use std::sync::Mutex;
use std::thread;
use tokio::runtime::Runtime;
use tokio::spawn;
use tokio::sync::{mpsc, oneshot};
use tokio::task::JoinError;
use crate::trace::Trace;
use crate::transaction::Transaction;
pub(crate) static TO_TOKIO: Lazy<Mutex<TokioRuntime>> =
Lazy::new(|| Mutex::new(TokioRuntime::spawn()));
/// Common interface for executing async node interactions from a non-async context.
#[allow(clippy::type_complexity)]
pub(crate) trait AsyncNodeInteraction: Send + 'static {
type Output: Send;
//// Returns the task and the output sender.
fn split(
self,
) -> (
Pin<Box<dyn Future<Output = Self::Output> + Send>>,
oneshot::Sender<Self::Output>,
);
}
pub(crate) struct TokioRuntime {
pub(crate) transaction_sender: mpsc::Sender<Transaction>,
pub(crate) trace_sender: mpsc::Sender<Trace>,
}
impl TokioRuntime {
fn spawn() -> Self {
let rt = Runtime::new().expect("should be able to create the tokio runtime");
let (transaction_sender, transaction_receiver) = mpsc::channel::<Transaction>(1024);
let (trace_sender, trace_receiver) = mpsc::channel::<Trace>(1024);
thread::spawn(move || {
rt.block_on(async move {
let transaction_task = spawn(interaction::<Transaction>(transaction_receiver));
let trace_task = spawn(interaction::<Trace>(trace_receiver));
if let Err(error) = transaction_task.await {
log::error!("tokio transaction task failed: {error}");
}
if let Err(error) = trace_task.await {
log::error!("tokio trace transaction task failed: {error}");
}
});
});
Self {
transaction_sender,
trace_sender,
}
}
}
async fn interaction<T>(mut receiver: mpsc::Receiver<T>) -> Result<(), JoinError>
where
T: AsyncNodeInteraction,
{
while let Some(task) = receiver.recv().await {
spawn(async move {
let (task, sender) = task.split();
sender
.send(task.await)
.unwrap_or_else(|_| panic!("failed to send task output"));
});
}
Ok(())
}
-43
View File
@@ -1,43 +0,0 @@
//! Trace transactions in a sync context.
use std::pin::Pin;
use alloy::rpc::types::trace::geth::GethTrace;
use tokio::sync::oneshot;
use crate::TO_TOKIO;
use crate::tokio_runtime::AsyncNodeInteraction;
pub type Task = Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + Send>>;
pub(crate) struct Trace {
sender: oneshot::Sender<anyhow::Result<GethTrace>>,
task: Task,
}
impl AsyncNodeInteraction for Trace {
type Output = anyhow::Result<GethTrace>;
fn split(
self,
) -> (
std::pin::Pin<Box<dyn Future<Output = Self::Output> + Send>>,
oneshot::Sender<Self::Output>,
) {
(self.task, self.sender)
}
}
/// Execute some [Task] that return a [GethTrace] result.
pub fn trace_transaction(task: Task) -> anyhow::Result<GethTrace> {
let task_sender = TO_TOKIO.lock().unwrap().trace_sender.clone();
let (sender, receiver) = oneshot::channel();
task_sender
.blocking_send(Trace { task, sender })
.expect("we are not calling this from an async context");
receiver
.blocking_recv()
.unwrap_or_else(|error| anyhow::bail!("no trace received: {error}"))
}
@@ -1,46 +0,0 @@
//! Execute transactions in a sync context.
use std::pin::Pin;
use alloy::rpc::types::TransactionReceipt;
use tokio::sync::oneshot;
use crate::TO_TOKIO;
use crate::tokio_runtime::AsyncNodeInteraction;
pub type Task = Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + Send>>;
pub(crate) struct Transaction {
receipt_sender: oneshot::Sender<anyhow::Result<TransactionReceipt>>,
task: Task,
}
impl AsyncNodeInteraction for Transaction {
type Output = anyhow::Result<TransactionReceipt>;
fn split(
self,
) -> (
Pin<Box<dyn Future<Output = Self::Output> + Send>>,
oneshot::Sender<Self::Output>,
) {
(self.task, self.receipt_sender)
}
}
/// Execute some [Task] that returns a [TransactionReceipt].
pub fn execute_transaction(task: Task) -> anyhow::Result<TransactionReceipt> {
let request_sender = TO_TOKIO.lock().unwrap().transaction_sender.clone();
let (receipt_sender, receipt_receiver) = oneshot::channel();
request_sender
.blocking_send(Transaction {
receipt_sender,
task,
})
.expect("we are not calling this from an async context");
receipt_receiver
.blocking_recv()
.unwrap_or_else(|error| anyhow::bail!("no receipt received: {error}"))
}
+7 -2
View File
@@ -11,11 +11,16 @@ rust-version.workspace = true
[dependencies]
anyhow = { workspace = true }
alloy = { workspace = true }
log = { workspace = true }
serde_json = { workspace = true }
tracing = { workspace = true }
tokio = { workspace = true }
revive-dt-node-interaction = { workspace = true }
revive-dt-config = { workspace = true }
serde_json = { workspace = true }
sp-core = { workspace = true }
sp-runtime = { workspace = true }
[dev-dependencies]
temp-dir = { workspace = true }
+246 -80
View File
@@ -1,17 +1,21 @@
//! The go-ethereum node implementation.
use std::{
fs::{File, create_dir_all, remove_dir_all},
collections::HashMap,
fs::{File, OpenOptions, create_dir_all, remove_dir_all},
io::{BufRead, BufReader, Read, Write},
path::PathBuf,
process::{Child, Command, Stdio},
sync::atomic::{AtomicU32, Ordering},
thread,
sync::{
Mutex,
atomic::{AtomicU32, Ordering},
},
time::{Duration, Instant},
};
use alloy::{
network::EthereumWallet,
primitives::Address,
providers::{Provider, ProviderBuilder, ext::DebugApi},
rpc::types::{
TransactionReceipt, TransactionRequest,
@@ -19,9 +23,8 @@ use alloy::{
},
};
use revive_dt_config::Arguments;
use revive_dt_node_interaction::{
EthereumNode, trace::trace_transaction, transaction::execute_transaction,
};
use revive_dt_node_interaction::{BlockingExecutor, EthereumNode};
use tracing::Level;
use crate::Node;
@@ -39,17 +42,25 @@ pub struct Instance {
connection_string: String,
base_directory: PathBuf,
data_directory: PathBuf,
logs_directory: PathBuf,
geth: PathBuf,
id: u32,
handle: Option<Child>,
network_id: u64,
start_timeout: u64,
wallet: EthereumWallet,
nonces: Mutex<HashMap<Address, u64>>,
/// This vector stores [`File`] objects that we use for logging which we want to flush when the
/// node object is dropped. We do not store them in a structured fashion at the moment (in
/// separate fields) as the logic that we need to apply to them is all the same regardless of
/// what it belongs to, we just want to flush them on [`Drop`] of the node.
logs_file_to_flush: Vec<File>,
}
impl Instance {
const BASE_DIRECTORY: &str = "geth";
const DATA_DIRECTORY: &str = "data";
const LOGS_DIRECTORY: &str = "logs";
const IPC_FILE: &str = "geth.ipc";
const GENESIS_JSON_FILE: &str = "genesis.json";
@@ -57,9 +68,14 @@ impl Instance {
const READY_MARKER: &str = "IPC endpoint opened";
const ERROR_MARKER: &str = "Fatal:";
const GETH_STDOUT_LOG_FILE_NAME: &str = "node_stdout.log";
const GETH_STDERR_LOG_FILE_NAME: &str = "node_stderr.log";
/// Create the node directory and call `geth init` to configure the genesis.
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
fn init(&mut self, genesis: String) -> anyhow::Result<&mut Self> {
create_dir_all(&self.base_directory)?;
create_dir_all(&self.logs_directory)?;
let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE);
File::create(&genesis_path)?.write_all(genesis.as_bytes())?;
@@ -89,8 +105,24 @@ impl Instance {
/// Spawn the go-ethereum node child process.
///
/// [Instance::init] must be called priorly.
/// [Instance::init] must be called prior.
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
fn spawn_process(&mut self) -> anyhow::Result<&mut Self> {
// This is the `OpenOptions` that we wish to use for all of the log files that we will be
// opening in this method. We need to construct it in this way to:
// 1. Be consistent
// 2. Less verbose and more dry
// 3. Because the builder pattern uses mutable references so we need to get around that.
let open_options = {
let mut options = OpenOptions::new();
options.create(true).truncate(true).write(true);
options
};
let stdout_logs_file = open_options
.clone()
.open(self.geth_stdout_log_file_path())?;
let stderr_logs_file = open_options.open(self.geth_stderr_log_file_path())?;
self.handle = Command::new(&self.geth)
.arg("--dev")
.arg("--datadir")
@@ -102,49 +134,67 @@ impl Instance {
.arg("--nodiscover")
.arg("--maxpeers")
.arg("0")
.stderr(Stdio::piped())
.stdout(Stdio::null())
.stderr(stderr_logs_file.try_clone()?)
.stdout(stdout_logs_file.try_clone()?)
.spawn()?
.into();
if let Err(error) = self.wait_ready() {
tracing::error!(?error, "Failed to start geth, shutting down gracefully");
self.shutdown()?;
return Err(error);
}
self.logs_file_to_flush
.extend([stderr_logs_file, stdout_logs_file]);
Ok(self)
}
/// Wait for the g-ethereum node child process getting ready.
///
/// [Instance::spawn_process] must be called priorly.
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
fn wait_ready(&mut self) -> anyhow::Result<&mut Self> {
// Thanks clippy but geth is a server; we don't `wait` but eventually kill it.
#[allow(clippy::zombie_processes)]
let mut child = self.handle.take().expect("should be spawned");
let start_time = Instant::now();
let maximum_wait_time = Duration::from_millis(self.start_timeout);
let mut stderr = BufReader::new(child.stderr.take().expect("should be piped")).lines();
let error = loop {
let Some(Ok(line)) = stderr.next() else {
break "child process stderr reading error".to_string();
};
if line.contains(Self::ERROR_MARKER) {
break line;
}
if line.contains(Self::READY_MARKER) {
// Keep stderr alive
// https://github.com/alloy-rs/alloy/issues/2091#issuecomment-2676134147
thread::spawn(move || for _ in stderr.by_ref() {});
self.handle = child.into();
return Ok(self);
let logs_file = OpenOptions::new()
.read(true)
.write(false)
.append(false)
.truncate(false)
.open(self.geth_stderr_log_file_path())?;
let maximum_wait_time = Duration::from_millis(self.start_timeout);
let mut stderr = BufReader::new(logs_file).lines();
loop {
if let Some(Ok(line)) = stderr.next() {
if line.contains(Self::ERROR_MARKER) {
anyhow::bail!("Failed to start geth {line}");
}
if line.contains(Self::READY_MARKER) {
return Ok(self);
}
}
if Instant::now().duration_since(start_time) > maximum_wait_time {
break "spawn timeout".to_string();
anyhow::bail!("Timeout in starting geth");
}
};
}
}
let _ = child.kill();
anyhow::bail!("geth node #{} spawn error: {error}", self.id)
#[tracing::instrument(skip_all, fields(geth_node_id = self.id), level = Level::TRACE)]
fn geth_stdout_log_file_path(&self) -> PathBuf {
self.logs_directory.join(Self::GETH_STDOUT_LOG_FILE_NAME)
}
#[tracing::instrument(skip_all, fields(geth_node_id = self.id), level = Level::TRACE)]
fn geth_stderr_log_file_path(&self) -> PathBuf {
self.logs_directory.join(Self::GETH_STDERR_LOG_FILE_NAME)
}
}
impl EthereumNode for Instance {
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
fn execute_transaction(
&self,
transaction: TransactionRequest,
@@ -152,18 +202,89 @@ impl EthereumNode for Instance {
let connection_string = self.connection_string();
let wallet = self.wallet.clone();
execute_transaction(Box::pin(async move {
Ok(ProviderBuilder::new()
BlockingExecutor::execute(async move {
let outer_span = tracing::debug_span!("Submitting transaction", ?transaction,);
let _outer_guard = outer_span.enter();
let provider = ProviderBuilder::new()
.wallet(wallet)
.connect(&connection_string)
.await?
.send_transaction(transaction)
.await?
.get_receipt()
.await?)
}))
.await?;
let pending_transaction = provider.send_transaction(transaction).await?;
let transaction_hash = pending_transaction.tx_hash();
let span = tracing::info_span!("Awaiting transaction receipt", ?transaction_hash);
let _guard = span.enter();
// The following is a fix for the "transaction indexing is in progress" error that we
// used to get. You can find more information on this in the following GH issue in geth
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
// before we can get the receipt of the transaction it needs to have been indexed by the
// node's indexer. Just because the transaction has been confirmed it doesn't mean that
// it has been indexed. When we call alloy's `get_receipt` it checks if the transaction
// was confirmed. If it has been, then it will call `eth_getTransactionReceipt` method
// which _might_ return the above error if the tx has not yet been indexed yet. So, we
// need to implement a retry mechanism for the receipt to keep retrying to get it until
// it eventually works, but we only do that if the error we get back is the "transaction
// indexing is in progress" error or if the receipt is None.
//
// At the moment we do not allow for the 60 seconds to be modified and we take it as
// being an implementation detail that's invisible to anything outside of this module.
//
// We allow a total of 60 retries for getting the receipt with one second between each
// retry and the next which means that we allow for a total of 60 seconds of waiting
// before we consider that we're unable to get the transaction receipt.
let mut retries = 0;
loop {
match provider.get_transaction_receipt(*transaction_hash).await {
Ok(Some(receipt)) => {
tracing::info!("Obtained the transaction receipt");
break Ok(receipt);
}
Ok(None) => {
if retries == 60 {
tracing::error!(
"Polled for transaction receipt for 60 seconds but failed to get it"
);
break Err(anyhow::anyhow!("Failed to get the transaction receipt"));
} else {
tracing::trace!(
retries,
"Sleeping for 1 second and trying to get the receipt again"
);
retries += 1;
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
continue;
}
}
Err(error) => {
let error_string = error.to_string();
if error_string.contains("transaction indexing is in progress") {
if retries == 60 {
tracing::error!(
"Polled for transaction receipt for 60 seconds but failed to get it"
);
break Err(error.into());
} else {
tracing::trace!(
retries,
"Sleeping for 1 second and trying to get the receipt again"
);
retries += 1;
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
continue;
}
} else {
break Err(error.into());
}
}
}
}
})?
}
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
fn trace_transaction(
&self,
transaction: TransactionReceipt,
@@ -176,49 +297,17 @@ impl EthereumNode for Instance {
});
let wallet = self.wallet.clone();
trace_transaction(Box::pin(async move {
BlockingExecutor::execute(async move {
Ok(ProviderBuilder::new()
.wallet(wallet)
.connect(&connection_string)
.await?
.debug_trace_transaction(transaction.transaction_hash, trace_options)
.await?)
}))
}
}
impl Node for Instance {
fn new(config: &Arguments) -> Self {
let geth_directory = config.directory().join(Self::BASE_DIRECTORY);
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
let base_directory = geth_directory.join(id.to_string());
Self {
connection_string: base_directory.join(Self::IPC_FILE).display().to_string(),
data_directory: base_directory.join(Self::DATA_DIRECTORY),
base_directory,
geth: config.geth.clone(),
id,
handle: None,
network_id: config.network_id,
start_timeout: config.geth_start_timeout,
wallet: config.wallet(),
}
}
fn connection_string(&self) -> String {
self.connection_string.clone()
}
fn shutdown(self) -> anyhow::Result<()> {
Ok(())
}
fn spawn(&mut self, genesis: String) -> anyhow::Result<()> {
self.init(genesis)?.spawn_process()?.wait_ready()?;
Ok(())
})?
}
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
fn state_diff(
&self,
transaction: alloy::rpc::types::TransactionReceipt,
@@ -232,6 +321,87 @@ impl Node for Instance {
}
}
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
fn fetch_add_nonce(&self, address: Address) -> anyhow::Result<u64> {
let connection_string = self.connection_string.clone();
let wallet = self.wallet.clone();
let onchain_nonce = BlockingExecutor::execute::<anyhow::Result<_>>(async move {
ProviderBuilder::new()
.wallet(wallet)
.connect(&connection_string)
.await?
.get_transaction_count(address)
.await
.map_err(Into::into)
})??;
let mut nonces = self.nonces.lock().unwrap();
let current = nonces.entry(address).or_insert(onchain_nonce);
let value = *current;
*current += 1;
Ok(value)
}
}
impl Node for Instance {
fn new(config: &Arguments) -> Self {
let geth_directory = config.directory().join(Self::BASE_DIRECTORY);
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
let base_directory = geth_directory.join(id.to_string());
Self {
connection_string: base_directory.join(Self::IPC_FILE).display().to_string(),
data_directory: base_directory.join(Self::DATA_DIRECTORY),
logs_directory: base_directory.join(Self::LOGS_DIRECTORY),
base_directory,
geth: config.geth.clone(),
id,
handle: None,
network_id: config.network_id,
start_timeout: config.geth_start_timeout,
wallet: config.wallet(),
nonces: Mutex::new(HashMap::new()),
// We know that we only need to be storing 2 files so we can specify that when creating
// the vector. It's the stdout and stderr of the geth node.
logs_file_to_flush: Vec::with_capacity(2),
}
}
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
fn connection_string(&self) -> String {
self.connection_string.clone()
}
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
fn shutdown(&mut self) -> anyhow::Result<()> {
// Terminate the processes in a graceful manner to allow for the output to be flushed.
if let Some(mut child) = self.handle.take() {
child
.kill()
.map_err(|error| anyhow::anyhow!("Failed to kill the geth process: {error:?}"))?;
}
// Flushing the files that we're using for keeping the logs before shutdown.
for file in self.logs_file_to_flush.iter_mut() {
file.flush()?
}
// Remove the node's database so that subsequent runs do not run on the same database. We
// ignore the error just in case the directory didn't exist in the first place and therefore
// there's nothing to be deleted.
let _ = remove_dir_all(self.base_directory.join(Self::DATA_DIRECTORY));
Ok(())
}
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
fn spawn(&mut self, genesis: String) -> anyhow::Result<()> {
self.init(genesis)?.spawn_process()?;
Ok(())
}
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
fn version(&self) -> anyhow::Result<String> {
let output = Command::new(&self.geth)
.arg("--version")
@@ -246,13 +416,9 @@ impl Node for Instance {
}
impl Drop for Instance {
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
fn drop(&mut self) {
if let Some(child) = self.handle.as_mut() {
let _ = child.kill();
}
if self.base_directory.exists() {
let _ = remove_dir_all(&self.base_directory);
}
self.shutdown().expect("Failed to shutdown")
}
}
+678
View File
@@ -0,0 +1,678 @@
use std::{
collections::HashMap,
fs::{File, OpenOptions, create_dir_all, remove_dir_all},
io::{BufRead, Write},
path::{Path, PathBuf},
process::{Child, Command, Stdio},
sync::{
Mutex,
atomic::{AtomicU32, Ordering},
},
time::Duration,
};
use alloy::{
hex,
network::EthereumWallet,
primitives::Address,
providers::{Provider, ProviderBuilder, ext::DebugApi},
rpc::types::{
TransactionReceipt,
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
},
};
use serde_json::{Value as JsonValue, json};
use sp_core::crypto::Ss58Codec;
use sp_runtime::AccountId32;
use tracing::Level;
use revive_dt_config::Arguments;
use revive_dt_node_interaction::{BlockingExecutor, EthereumNode};
use crate::Node;
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
#[derive(Debug)]
pub struct KitchensinkNode {
id: u32,
substrate_binary: PathBuf,
eth_proxy_binary: PathBuf,
rpc_url: String,
wallet: EthereumWallet,
base_directory: PathBuf,
logs_directory: PathBuf,
process_substrate: Option<Child>,
process_proxy: Option<Child>,
nonces: Mutex<HashMap<Address, u64>>,
/// This vector stores [`File`] objects that we use for logging which we want to flush when the
/// node object is dropped. We do not store them in a structured fashion at the moment (in
/// separate fields) as the logic that we need to apply to them is all the same regardless of
/// what it belongs to, we just want to flush them on [`Drop`] of the node.
logs_file_to_flush: Vec<File>,
}
impl KitchensinkNode {
const BASE_DIRECTORY: &str = "kitchensink";
const LOGS_DIRECTORY: &str = "logs";
const DATA_DIRECTORY: &str = "chains";
const SUBSTRATE_READY_MARKER: &str = "Running JSON-RPC server";
const ETH_PROXY_READY_MARKER: &str = "Running JSON-RPC server";
const CHAIN_SPEC_JSON_FILE: &str = "template_chainspec.json";
const BASE_SUBSTRATE_RPC_PORT: u16 = 9944;
const BASE_PROXY_RPC_PORT: u16 = 8545;
const SUBSTRATE_LOG_ENV: &str = "error,evm=debug,sc_rpc_server=info,runtime::revive=debug";
const PROXY_LOG_ENV: &str = "info,eth-rpc=debug";
const KITCHENSINK_STDOUT_LOG_FILE_NAME: &str = "node_stdout.log";
const KITCHENSINK_STDERR_LOG_FILE_NAME: &str = "node_stderr.log";
const PROXY_STDOUT_LOG_FILE_NAME: &str = "proxy_stdout.log";
const PROXY_STDERR_LOG_FILE_NAME: &str = "proxy_stderr.log";
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
fn init(&mut self, genesis: &str) -> anyhow::Result<&mut Self> {
create_dir_all(&self.base_directory)?;
create_dir_all(&self.logs_directory)?;
let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE);
// Note: we do not pipe the logs of this process to a separate file since this is just a
// once-off export of the default chain spec and not part of the long-running node process.
let output = Command::new(&self.substrate_binary)
.arg("export-chain-spec")
.arg("--chain")
.arg("dev")
.output()?;
if !output.status.success() {
anyhow::bail!(
"substrate-node export-chain-spec failed: {}",
String::from_utf8_lossy(&output.stderr)
);
}
let content = String::from_utf8(output.stdout)?;
let mut chainspec_json: JsonValue = serde_json::from_str(&content)?;
let existing_chainspec_balances =
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"]
.as_array()
.cloned()
.unwrap_or_default();
let mut merged_balances: Vec<(String, u128)> = existing_chainspec_balances
.into_iter()
.filter_map(|val| {
if let Some(arr) = val.as_array() {
if arr.len() == 2 {
let account = arr[0].as_str()?.to_string();
let balance = arr[1].as_f64()? as u128;
return Some((account, balance));
}
}
None
})
.collect();
let mut eth_balances = self.extract_balance_from_genesis_file(genesis)?;
merged_balances.append(&mut eth_balances);
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"] =
json!(merged_balances);
serde_json::to_writer_pretty(
std::fs::File::create(&template_chainspec_path)?,
&chainspec_json,
)?;
Ok(self)
}
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
fn spawn_process(&mut self) -> anyhow::Result<()> {
let substrate_rpc_port = Self::BASE_SUBSTRATE_RPC_PORT + self.id as u16;
let proxy_rpc_port = Self::BASE_PROXY_RPC_PORT + self.id as u16;
self.rpc_url = format!("http://127.0.0.1:{proxy_rpc_port}");
let chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE);
// This is the `OpenOptions` that we wish to use for all of the log files that we will be
// opening in this method. We need to construct it in this way to:
// 1. Be consistent
// 2. Less verbose and more dry
// 3. Because the builder pattern uses mutable references so we need to get around that.
let open_options = {
let mut options = OpenOptions::new();
options.create(true).truncate(true).write(true);
options
};
// Start Substrate node
let kitchensink_stdout_logs_file = open_options
.clone()
.open(self.kitchensink_stdout_log_file_path())?;
let kitchensink_stderr_logs_file = open_options
.clone()
.open(self.kitchensink_stderr_log_file_path())?;
self.process_substrate = Command::new(&self.substrate_binary)
.arg("--chain")
.arg(chainspec_path)
.arg("--base-path")
.arg(&self.base_directory)
.arg("--rpc-port")
.arg(substrate_rpc_port.to_string())
.arg("--name")
.arg(format!("revive-kitchensink-{}", self.id))
.arg("--force-authoring")
.arg("--rpc-methods")
.arg("Unsafe")
.arg("--rpc-cors")
.arg("all")
.env("RUST_LOG", Self::SUBSTRATE_LOG_ENV)
.stdout(kitchensink_stdout_logs_file.try_clone()?)
.stderr(kitchensink_stderr_logs_file.try_clone()?)
.spawn()?
.into();
// Give the node a moment to boot
if let Err(error) = Self::wait_ready(
self.kitchensink_stderr_log_file_path().as_path(),
Self::SUBSTRATE_READY_MARKER,
Duration::from_secs(30),
) {
tracing::error!(
?error,
"Failed to start substrate, shutting down gracefully"
);
self.shutdown()?;
return Err(error);
};
let eth_proxy_stdout_logs_file = open_options
.clone()
.open(self.proxy_stdout_log_file_path())?;
let eth_proxy_stderr_logs_file = open_options.open(self.proxy_stderr_log_file_path())?;
self.process_proxy = Command::new(&self.eth_proxy_binary)
.arg("--dev")
.arg("--rpc-port")
.arg(proxy_rpc_port.to_string())
.arg("--node-rpc-url")
.arg(format!("ws://127.0.0.1:{substrate_rpc_port}"))
.env("RUST_LOG", Self::PROXY_LOG_ENV)
.stdout(eth_proxy_stdout_logs_file.try_clone()?)
.stderr(eth_proxy_stderr_logs_file.try_clone()?)
.spawn()?
.into();
if let Err(error) = Self::wait_ready(
self.proxy_stderr_log_file_path().as_path(),
Self::ETH_PROXY_READY_MARKER,
Duration::from_secs(30),
) {
tracing::error!(?error, "Failed to start proxy, shutting down gracefully");
self.shutdown()?;
return Err(error);
};
self.logs_file_to_flush.extend([
kitchensink_stdout_logs_file,
kitchensink_stderr_logs_file,
eth_proxy_stdout_logs_file,
eth_proxy_stderr_logs_file,
]);
Ok(())
}
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
fn extract_balance_from_genesis_file(
&self,
genesis_str: &str,
) -> anyhow::Result<Vec<(String, u128)>> {
let genesis_json: JsonValue = serde_json::from_str(genesis_str)?;
let alloc = genesis_json
.get("alloc")
.and_then(|a| a.as_object())
.ok_or_else(|| anyhow::anyhow!("Missing 'alloc' in genesis"))?;
let mut balances = Vec::new();
for (eth_addr, obj) in alloc.iter() {
let balance_str = obj.get("balance").and_then(|b| b.as_str()).unwrap_or("0");
let balance = if balance_str.starts_with("0x") {
u128::from_str_radix(balance_str.trim_start_matches("0x"), 16)?
} else {
balance_str.parse::<u128>()?
};
let substrate_addr = Self::eth_to_substrate_address(eth_addr)?;
balances.push((substrate_addr.clone(), balance));
}
Ok(balances)
}
fn eth_to_substrate_address(eth_addr: &str) -> anyhow::Result<String> {
let eth_bytes = hex::decode(eth_addr.trim_start_matches("0x"))?;
if eth_bytes.len() != 20 {
anyhow::bail!(
"Invalid Ethereum address length: expected 20 bytes, got {}",
eth_bytes.len()
);
}
let mut padded = [0xEEu8; 32];
padded[..20].copy_from_slice(&eth_bytes);
let account_id = AccountId32::from(padded);
Ok(account_id.to_ss58check())
}
fn wait_ready(logs_file_path: &Path, marker: &str, timeout: Duration) -> anyhow::Result<()> {
let start_time = std::time::Instant::now();
let logs_file = OpenOptions::new()
.read(true)
.write(false)
.append(false)
.truncate(false)
.open(logs_file_path)?;
let mut lines = std::io::BufReader::new(logs_file).lines();
loop {
if let Some(Ok(line)) = lines.next() {
if line.contains(marker) {
return Ok(());
}
}
if start_time.elapsed() > timeout {
anyhow::bail!("Timeout waiting for process readiness: {marker}");
}
}
}
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
pub fn eth_rpc_version(&self) -> anyhow::Result<String> {
let output = Command::new(&self.eth_proxy_binary)
.arg("--version")
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn()?
.wait_with_output()?
.stdout;
Ok(String::from_utf8_lossy(&output).trim().to_string())
}
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)]
fn kitchensink_stdout_log_file_path(&self) -> PathBuf {
self.logs_directory
.join(Self::KITCHENSINK_STDOUT_LOG_FILE_NAME)
}
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)]
fn kitchensink_stderr_log_file_path(&self) -> PathBuf {
self.logs_directory
.join(Self::KITCHENSINK_STDERR_LOG_FILE_NAME)
}
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)]
fn proxy_stdout_log_file_path(&self) -> PathBuf {
self.logs_directory.join(Self::PROXY_STDOUT_LOG_FILE_NAME)
}
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)]
fn proxy_stderr_log_file_path(&self) -> PathBuf {
self.logs_directory.join(Self::PROXY_STDERR_LOG_FILE_NAME)
}
}
impl EthereumNode for KitchensinkNode {
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
fn execute_transaction(
&self,
transaction: alloy::rpc::types::TransactionRequest,
) -> anyhow::Result<TransactionReceipt> {
let url = self.rpc_url.clone();
let wallet = self.wallet.clone();
tracing::debug!("Submitting transaction: {transaction:#?}");
tracing::info!("Submitting tx to kitchensink");
let receipt = BlockingExecutor::execute(async move {
Ok(ProviderBuilder::new()
.wallet(wallet)
.connect(&url)
.await?
.send_transaction(transaction)
.await?
.get_receipt()
.await?)
})?;
tracing::info!(?receipt, "Submitted tx to kitchensink");
receipt
}
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
fn trace_transaction(
&self,
transaction: TransactionReceipt,
) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> {
let url = self.rpc_url.clone();
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
diff_mode: Some(true),
disable_code: None,
disable_storage: None,
});
let wallet = self.wallet.clone();
BlockingExecutor::execute(async move {
Ok(ProviderBuilder::new()
.wallet(wallet)
.connect(&url)
.await?
.debug_trace_transaction(transaction.transaction_hash, trace_options)
.await?)
})?
}
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
fn state_diff(&self, transaction: TransactionReceipt) -> anyhow::Result<DiffMode> {
match self
.trace_transaction(transaction)?
.try_into_pre_state_frame()?
{
PreStateFrame::Diff(diff) => Ok(diff),
_ => anyhow::bail!("expected a diff mode trace"),
}
}
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
fn fetch_add_nonce(&self, address: Address) -> anyhow::Result<u64> {
let url = self.rpc_url.clone();
let wallet = self.wallet.clone();
let onchain_nonce = BlockingExecutor::execute::<anyhow::Result<_>>(async move {
ProviderBuilder::new()
.wallet(wallet)
.connect(&url)
.await?
.get_transaction_count(address)
.await
.map_err(Into::into)
})??;
let mut nonces = self.nonces.lock().unwrap();
let current = nonces.entry(address).or_insert(onchain_nonce);
let value = *current;
*current += 1;
Ok(value)
}
}
impl Node for KitchensinkNode {
fn new(config: &Arguments) -> Self {
let kitchensink_directory = config.directory().join(Self::BASE_DIRECTORY);
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
let base_directory = kitchensink_directory.join(id.to_string());
let logs_directory = base_directory.join(Self::LOGS_DIRECTORY);
Self {
id,
substrate_binary: config.kitchensink.clone(),
eth_proxy_binary: config.eth_proxy.clone(),
rpc_url: String::new(),
wallet: config.wallet(),
base_directory,
logs_directory,
process_substrate: None,
process_proxy: None,
nonces: Mutex::new(HashMap::new()),
// We know that we only need to be storing 4 files so we can specify that when creating
// the vector. It's the stdout and stderr of the substrate-node and the eth-rpc.
logs_file_to_flush: Vec::with_capacity(4),
}
}
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
fn connection_string(&self) -> String {
self.rpc_url.clone()
}
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
fn shutdown(&mut self) -> anyhow::Result<()> {
// Terminate the processes in a graceful manner to allow for the output to be flushed.
if let Some(mut child) = self.process_proxy.take() {
child
.kill()
.map_err(|error| anyhow::anyhow!("Failed to kill the proxy process: {error:?}"))?;
}
if let Some(mut child) = self.process_substrate.take() {
child.kill().map_err(|error| {
anyhow::anyhow!("Failed to kill the substrate process: {error:?}")
})?;
}
// Flushing the files that we're using for keeping the logs before shutdown.
for file in self.logs_file_to_flush.iter_mut() {
file.flush()?
}
// Remove the node's database so that subsequent runs do not run on the same database. We
// ignore the error just in case the directory didn't exist in the first place and therefore
// there's nothing to be deleted.
let _ = remove_dir_all(self.base_directory.join(Self::DATA_DIRECTORY));
Ok(())
}
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
fn spawn(&mut self, genesis: String) -> anyhow::Result<()> {
self.init(&genesis)?.spawn_process()
}
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
fn version(&self) -> anyhow::Result<String> {
let output = Command::new(&self.substrate_binary)
.arg("--version")
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn()?
.wait_with_output()?
.stdout;
Ok(String::from_utf8_lossy(&output).into())
}
}
impl Drop for KitchensinkNode {
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
fn drop(&mut self) {
self.shutdown().expect("Failed to shutdown")
}
}
#[cfg(test)]
mod tests {
use revive_dt_config::Arguments;
use std::path::PathBuf;
use temp_dir::TempDir;
use std::fs;
use super::KitchensinkNode;
use crate::{GENESIS_JSON, Node};
fn test_config() -> (Arguments, TempDir) {
let mut config = Arguments::default();
let temp_dir = TempDir::new().unwrap();
config.working_directory = temp_dir.path().to_path_buf().into();
config.kitchensink = PathBuf::from("substrate-node");
config.eth_proxy = PathBuf::from("eth-rpc");
(config, temp_dir)
}
#[test]
fn test_init_generates_chainspec_with_balances() {
let genesis_content = r#"
{
"alloc": {
"90F8bf6A479f320ead074411a4B0e7944Ea8c9C1": {
"balance": "1000000000000000000"
},
"Ab8483F64d9C6d1EcF9b849Ae677dD3315835cb2": {
"balance": "2000000000000000000"
}
}
}
"#;
let mut dummy_node = KitchensinkNode::new(&test_config().0);
// Call `init()`
dummy_node.init(genesis_content).expect("init failed");
// Check that the patched chainspec file was generated
let final_chainspec_path = dummy_node
.base_directory
.join(KitchensinkNode::CHAIN_SPEC_JSON_FILE);
assert!(final_chainspec_path.exists(), "Chainspec file should exist");
let contents = fs::read_to_string(&final_chainspec_path).expect("Failed to read chainspec");
// Validate that the Substrate addresses derived from the Ethereum addresses are in the file
let first_eth_addr =
KitchensinkNode::eth_to_substrate_address("90F8bf6A479f320ead074411a4B0e7944Ea8c9C1")
.unwrap();
let second_eth_addr =
KitchensinkNode::eth_to_substrate_address("Ab8483F64d9C6d1EcF9b849Ae677dD3315835cb2")
.unwrap();
assert!(
contents.contains(&first_eth_addr),
"Chainspec should contain Substrate address for first Ethereum account"
);
assert!(
contents.contains(&second_eth_addr),
"Chainspec should contain Substrate address for second Ethereum account"
);
}
#[test]
fn test_parse_genesis_alloc() {
// Create test genesis file
let genesis_json = r#"
{
"alloc": {
"0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1": { "balance": "1000000000000000000" },
"0x0000000000000000000000000000000000000000": { "balance": "0xDE0B6B3A7640000" },
"0xffffffffffffffffffffffffffffffffffffffff": { "balance": "123456789" }
}
}
"#;
let node = KitchensinkNode::new(&test_config().0);
let result = node
.extract_balance_from_genesis_file(genesis_json)
.unwrap();
let result_map: std::collections::HashMap<_, _> = result.into_iter().collect();
assert_eq!(
result_map.get("5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV"),
Some(&1_000_000_000_000_000_000u128)
);
assert_eq!(
result_map.get("5C4hrfjw9DjXZTzV3MwzrrAr9P1MLDHajjSidz9bR544LEq1"),
Some(&1_000_000_000_000_000_000u128)
);
assert_eq!(
result_map.get("5HrN7fHLXWcFiXPwwtq2EkSGns9eMmoUQnbVKweNz3VVr6N4"),
Some(&123_456_789u128)
);
}
#[test]
fn print_eth_to_substrate_mappings() {
let eth_addresses = vec![
"0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1",
"0xffffffffffffffffffffffffffffffffffffffff",
"90F8bf6A479f320ead074411a4B0e7944Ea8c9C1",
];
for eth_addr in eth_addresses {
let ss58 = KitchensinkNode::eth_to_substrate_address(eth_addr).unwrap();
println!("Ethereum: {eth_addr} -> Substrate SS58: {ss58}");
}
}
#[test]
fn test_eth_to_substrate_address() {
let cases = vec![
(
"0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1",
"5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV",
),
(
"90F8bf6A479f320ead074411a4B0e7944Ea8c9C1",
"5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV",
),
(
"0x0000000000000000000000000000000000000000",
"5C4hrfjw9DjXZTzV3MwzrrAr9P1MLDHajjSidz9bR544LEq1",
),
(
"0xffffffffffffffffffffffffffffffffffffffff",
"5HrN7fHLXWcFiXPwwtq2EkSGns9eMmoUQnbVKweNz3VVr6N4",
),
];
for (eth_addr, expected_ss58) in cases {
let result = KitchensinkNode::eth_to_substrate_address(eth_addr).unwrap();
assert_eq!(
result, expected_ss58,
"Mismatch for Ethereum address {eth_addr}"
);
}
}
#[test]
fn spawn_works() {
let (config, _temp_dir) = test_config();
let mut node = KitchensinkNode::new(&config);
node.spawn(GENESIS_JSON.to_string()).unwrap();
}
#[test]
fn version_works() {
let (config, _temp_dir) = test_config();
let node = KitchensinkNode::new(&config);
let version = node.version().unwrap();
assert!(
version.starts_with("substrate-node"),
"Expected substrate-node version string, got: {version}"
);
}
#[test]
fn eth_rpc_version_works() {
let (config, _temp_dir) = test_config();
let node = KitchensinkNode::new(&config);
let version = node.eth_rpc_version().unwrap();
assert!(
version.starts_with("pallet-revive-eth-rpc"),
"Expected eth-rpc version string, got: {version}"
);
}
}
+2 -5
View File
@@ -1,10 +1,10 @@
//! This crate implements the testing nodes.
use alloy::rpc::types::{TransactionReceipt, trace::geth::DiffMode};
use revive_dt_config::Arguments;
use revive_dt_node_interaction::EthereumNode;
pub mod geth;
pub mod kitchensink;
pub mod pool;
/// The default genesis configuration.
@@ -23,14 +23,11 @@ pub trait Node: EthereumNode {
/// Prune the node instance and related data.
///
/// Blocking until it's completely stopped.
fn shutdown(self) -> anyhow::Result<()>;
fn shutdown(&mut self) -> anyhow::Result<()>;
/// Returns the nodes connection string.
fn connection_string(&self) -> String;
/// Returns the state diff of the transaction hash in the [TransactionReceipt].
fn state_diff(&self, transaction: TransactionReceipt) -> anyhow::Result<DiffMode>;
/// Returns the node version.
fn version(&self) -> anyhow::Result<String>;
}
+1 -1
View File
@@ -62,7 +62,7 @@ where
fn spawn_node<T: Node + Send>(args: &Arguments, genesis: String) -> anyhow::Result<T> {
let mut node = T::new(args);
log::info!("starting node: {}", node.connection_string());
tracing::info!("starting node: {}", node.connection_string());
node.spawn(genesis)?;
Ok(node)
}
+18
View File
@@ -0,0 +1,18 @@
[package]
name = "revive-dt-report"
version.workspace = true
authors.workspace = true
license.workspace = true
edition.workspace = true
repository.workspace = true
rust-version.workspace = true
[dependencies]
revive-dt-config = { workspace = true }
revive-dt-format = { workspace = true }
anyhow = { workspace = true }
tracing = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
revive-solc-json-interface = { workspace = true }
+94
View File
@@ -0,0 +1,94 @@
//! The report analyzer enriches the raw report data.
use serde::{Deserialize, Serialize};
use crate::reporter::CompilationTask;
/// Provides insights into how well the compilers perform.
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, PartialOrd)]
pub struct CompilerStatistics {
/// The sum of contracts observed.
pub n_contracts: usize,
/// The mean size of compiled contracts.
pub mean_code_size: usize,
/// The mean size of the optimized YUL IR.
pub mean_yul_size: usize,
/// Is a proxy because the YUL also containes a lot of comments.
pub yul_to_bytecode_size_ratio: f32,
}
impl CompilerStatistics {
/// Cumulatively update the statistics with the next compiler task.
pub fn sample(&mut self, compilation_task: &CompilationTask) {
let Some(output) = &compilation_task.json_output else {
return;
};
let Some(contracts) = &output.contracts else {
return;
};
for (_solidity, contracts) in contracts.iter() {
for (_name, contract) in contracts.iter() {
let Some(evm) = &contract.evm else {
continue;
};
let Some(deploy_code) = &evm.deployed_bytecode else {
continue;
};
// The EVM bytecode can be unlinked and thus is not necessarily a decodable hex
// string; for our statistics this is a good enough approximation.
let bytecode_size = deploy_code.object.len() / 2;
let yul_size = contract
.ir_optimized
.as_ref()
.expect("if the contract has a deploy code it should also have the opimized IR")
.len();
self.update_sizes(bytecode_size, yul_size);
}
}
}
/// Updates the size statistics cumulatively.
fn update_sizes(&mut self, bytecode_size: usize, yul_size: usize) {
let n_previous = self.n_contracts;
let n_current = self.n_contracts + 1;
self.n_contracts = n_current;
self.mean_code_size = (n_previous * self.mean_code_size + bytecode_size) / n_current;
self.mean_yul_size = (n_previous * self.mean_yul_size + yul_size) / n_current;
if self.mean_code_size > 0 {
self.yul_to_bytecode_size_ratio =
self.mean_yul_size as f32 / self.mean_code_size as f32;
}
}
}
#[cfg(test)]
mod tests {
use super::CompilerStatistics;
#[test]
fn compiler_statistics() {
let mut received = CompilerStatistics::default();
received.update_sizes(0, 0);
received.update_sizes(3, 37);
received.update_sizes(123, 456);
let mean_code_size = 41; // rounding error from integer truncation
let mean_yul_size = 164;
let expected = CompilerStatistics {
n_contracts: 3,
mean_code_size,
mean_yul_size,
yul_to_bytecode_size_ratio: mean_yul_size as f32 / mean_code_size as f32,
};
assert_eq!(received, expected);
}
}
+4
View File
@@ -0,0 +1,4 @@
//! The revive differential tests reporting facility.
pub mod analyzer;
pub mod reporter;
+243
View File
@@ -0,0 +1,243 @@
//! The reporter is the central place observing test execution by collecting data.
//!
//! The data collected gives useful insights into the outcome of the test run
//! and helps identifying and reproducing failing cases.
use std::{
collections::HashMap,
fs::{self, File, create_dir_all},
path::PathBuf,
sync::{Mutex, OnceLock},
time::{SystemTime, UNIX_EPOCH},
};
use anyhow::Context;
use serde::{Deserialize, Serialize};
use revive_dt_config::{Arguments, TestingPlatform};
use revive_dt_format::{corpus::Corpus, mode::SolcMode};
use revive_solc_json_interface::{SolcStandardJsonInput, SolcStandardJsonOutput};
use crate::analyzer::CompilerStatistics;
pub(crate) static REPORTER: OnceLock<Mutex<Report>> = OnceLock::new();
/// The `Report` datastructure stores all relevant inforamtion required for generating reports.
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct Report {
/// The configuration used during the test.
pub config: Arguments,
/// The observed test corpora.
pub corpora: Vec<Corpus>,
/// The observed test definitions.
pub metadata_files: Vec<PathBuf>,
/// The observed compilation results.
pub compiler_results: HashMap<TestingPlatform, Vec<CompilationResult>>,
/// The observed compilation statistics.
pub compiler_statistics: HashMap<TestingPlatform, CompilerStatistics>,
/// The file name this is serialized to.
#[serde(skip)]
directory: PathBuf,
}
/// Contains a compiled contract.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct CompilationTask {
/// The observed compiler input.
pub json_input: SolcStandardJsonInput,
/// The observed compiler output.
pub json_output: Option<SolcStandardJsonOutput>,
/// The observed compiler mode.
pub mode: SolcMode,
/// The observed compiler version.
pub compiler_version: String,
/// The observed error, if any.
pub error: Option<String>,
}
/// Represents a report about a compilation task.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct CompilationResult {
/// The observed compilation task.
pub compilation_task: CompilationTask,
/// The linked span.
pub span: Span,
}
/// The [Span] struct indicates the context of what is being reported.
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub struct Span {
/// The corpus index this belongs to.
corpus: usize,
/// The metadata file this belongs to.
metadata_file: usize,
/// The index of the case definition this belongs to.
case: usize,
/// The index of the case input this belongs to.
input: usize,
}
impl Report {
/// The file name where this report will be written to.
pub const FILE_NAME: &str = "report.json";
/// The [Span] is expected to initialize the reporter by providing the config.
const INITIALIZED_VIA_SPAN: &str = "requires a Span which initializes the reporter";
/// Create a new [Report].
fn new(config: Arguments) -> anyhow::Result<Self> {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_millis();
let directory = config.directory().join("report").join(format!("{now}"));
if !directory.exists() {
create_dir_all(&directory)?;
}
Ok(Self {
config,
directory,
..Default::default()
})
}
/// Add a compilation task to the report.
pub fn compilation(span: Span, platform: TestingPlatform, compilation_task: CompilationTask) {
let mut report = REPORTER
.get()
.expect(Report::INITIALIZED_VIA_SPAN)
.lock()
.unwrap();
report
.compiler_statistics
.entry(platform)
.or_default()
.sample(&compilation_task);
report
.compiler_results
.entry(platform)
.or_default()
.push(CompilationResult {
compilation_task,
span,
});
}
/// Write the report to disk.
pub fn save() -> anyhow::Result<()> {
let Some(reporter) = REPORTER.get() else {
return Ok(());
};
let report = reporter.lock().unwrap();
if let Err(error) = report.write_to_file() {
anyhow::bail!("can not write report: {error}");
}
if report.config.extract_problems {
if let Err(error) = report.save_compiler_problems() {
anyhow::bail!("can not write compiler problems: {error}");
}
}
Ok(())
}
/// Write compiler problems to disk for later debugging.
pub fn save_compiler_problems(&self) -> anyhow::Result<()> {
for (platform, results) in self.compiler_results.iter() {
for result in results {
// ignore if there were no errors
if result.compilation_task.error.is_none()
&& result
.compilation_task
.json_output
.as_ref()
.and_then(|output| output.errors.as_ref())
.map(|errors| errors.is_empty())
.unwrap_or(true)
{
continue;
}
let path = &self.metadata_files[result.span.metadata_file]
.parent()
.unwrap()
.join(format!("{platform}_errors"));
if !path.exists() {
create_dir_all(path)?;
}
if let Some(error) = result.compilation_task.error.as_ref() {
fs::write(path.join("compiler_error.txt"), error)?;
}
if let Some(errors) = result.compilation_task.json_output.as_ref() {
let file = File::create(path.join("compiler_output.txt"))?;
serde_json::to_writer_pretty(file, &errors)?;
}
}
}
Ok(())
}
fn write_to_file(&self) -> anyhow::Result<()> {
let path = self.directory.join(Self::FILE_NAME);
let file = File::create(&path).context(path.display().to_string())?;
serde_json::to_writer_pretty(file, &self)?;
tracing::info!("report written to: {}", path.display());
Ok(())
}
}
impl Span {
/// Create a new [Span] with case and input index at 0.
///
/// Initializes the reporting facility on the first call.
pub fn new(corpus: Corpus, config: Arguments) -> anyhow::Result<Self> {
let report = Mutex::new(Report::new(config)?);
let mut reporter = REPORTER.get_or_init(|| report).lock().unwrap();
reporter.corpora.push(corpus);
Ok(Self {
corpus: reporter.corpora.len() - 1,
metadata_file: 0,
case: 0,
input: 0,
})
}
/// Advance to the next metadata file: Resets the case input index to 0.
pub fn next_metadata(&mut self, metadata_file: PathBuf) {
let mut reporter = REPORTER
.get()
.expect(Report::INITIALIZED_VIA_SPAN)
.lock()
.unwrap();
reporter.metadata_files.push(metadata_file);
self.metadata_file = reporter.metadata_files.len() - 1;
self.case = 0;
self.input = 0;
}
/// Advance to the next case: Increas the case index by one and resets the input index to 0.
pub fn next_case(&mut self) {
self.case += 1;
self.input = 0;
}
/// Advance to the next input.
pub fn next_input(&mut self) {
self.input += 1;
}
}
+2 -2
View File
@@ -1,6 +1,6 @@
[package]
name = "revive-dt-solc-binaries"
dependencies = "Download and cache solc binaries"
description = "Download and cache solc binaries"
version.workspace = true
authors.workspace = true
license.workspace = true
@@ -11,7 +11,7 @@ rust-version.workspace = true
[dependencies]
anyhow = { workspace = true }
hex = { workspace = true }
log = { workspace = true }
tracing = { workspace = true }
reqwest = { workspace = true }
semver = { workspace = true }
serde = { workspace = true }
+4 -4
View File
@@ -25,7 +25,7 @@ pub(crate) fn get_or_download(
let mut cache = SOLC_CACHER.lock().unwrap();
if cache.contains(&target_file) {
log::debug!("using cached solc: {}", target_file.display());
tracing::debug!("using cached solc: {}", target_file.display());
return Ok(target_file);
}
@@ -37,10 +37,10 @@ pub(crate) fn get_or_download(
}
fn download_to_file(path: &Path, downloader: &GHDownloader) -> anyhow::Result<()> {
log::info!("caching file: {}", path.display());
tracing::info!("caching file: {}", path.display());
let Ok(file) = File::create_new(path) else {
log::debug!("cache file already exists: {}", path.display());
tracing::debug!("cache file already exists: {}", path.display());
return Ok(());
};
@@ -60,7 +60,7 @@ fn download_to_file(path: &Path, downloader: &GHDownloader) -> anyhow::Result<()
std::process::Command::new("xattr")
.arg("-d")
.arg("com.apple.quarantine")
.arg(&path)
.arg(path)
.stderr(std::process::Stdio::null())
.stdout(std::process::Stdio::null())
.stdout(std::process::Stdio::null())
+1 -1
View File
@@ -86,7 +86,7 @@ impl GHDownloader {
/// Errors out if the download fails or the digest of the downloaded file
/// mismatches the expected digest from the release [List].
pub fn download(&self) -> anyhow::Result<Vec<u8>> {
log::info!("downloading solc: {self:?}");
tracing::info!("downloading solc: {self:?}");
let expected_digest = List::download(self.list)?
.builds
.iter()
Submodule
+1
Submodule polkadot-sdk added at dc3d0e5ab7