Compare commits

...

28 Commits

Author SHA1 Message Date
pgherveou 2809c2a72c fix 2025-10-08 11:02:08 +00:00
pgherveou dffb80ac0a fixes 2025-10-08 11:18:31 +02:00
pgherveou 43a1114337 custom rpc port 2025-10-08 11:10:46 +02:00
pgherveou 3a07ea042b fix 2025-10-08 10:45:49 +02:00
pgherveou 9e2aa972db fix 2025-10-08 10:33:59 +02:00
pgherveou 86f2173e8b nit 2025-10-08 10:14:22 +02:00
pgherveou 6e658aec49 Merge branch 'pg/fmt' into pg/ml-runner 2025-10-08 10:04:38 +02:00
pgherveou 1aba74ec3e fix 2025-10-08 10:03:00 +02:00
pgherveou 180bd64bc5 Merge branch 'pg/fmt' into pg/ml-runner 2025-10-08 10:01:36 +02:00
pgherveou 967cbac349 fix 2025-10-08 10:00:32 +02:00
pgherveou a8d84c8360 fix 2025-10-08 09:59:53 +02:00
pgherveou c83a755416 Merge branch 'main' into pg/fmt 2025-10-08 09:59:42 +02:00
pgherveou 0711216539 add fmt check 2025-10-08 09:57:28 +02:00
pgherveou b40c17c0af fixes 2025-10-08 09:52:13 +02:00
pgherveou 8ae994f9de fixes 2025-10-08 09:43:36 +02:00
pgherveou 3f3cbfa934 Merge branch 'pg/fmt' into pg/ml-runner 2025-10-08 09:28:45 +02:00
pgherveou c676114fe1 apply fmt 2025-10-08 09:27:11 +02:00
pgherveou 92885351ed use polkadot-sdk rustfmt 2025-10-08 09:26:24 +02:00
pgherveou e16f8ebf59 Merge branch 'pg/fmt' into pg/ml-runner 2025-10-08 09:19:21 +02:00
pgherveou d482808eb2 add rustfmt.toml 2025-10-08 07:18:17 +00:00
pgherveou 1f84ce6f61 fix lint 2025-10-08 06:28:57 +00:00
pgherveou 765569a8b6 fix 2025-10-08 08:22:26 +02:00
pgherveou 6e64f678ee ml-runner init 2025-10-07 16:10:43 +00:00
Marios 8c412dc924 Use absolute path for zombienet to avoid conflicts (#178) 2025-10-07 16:03:18 +00:00
Omar 6da3172581 Fix/fallback gas for substrate (#177)
* Update compiler semaphore

* Fix the fallback gas for substrate chains

* Fix the concurrency & the substrate gas limit fallback value
2025-10-07 02:25:37 +00:00
Marios c6eb04b04e Support zombienet (#172)
* Basic zombie node definition

* [WIP] - Impl EthereumNode for zombie node

* Remove unused imports

* [WIP] Support substrate node in zombienet network

* Impl zombie node resolver && node for zombie node

* Spawn eth-rpc on top of collator node

* Implement ZombienetPlatform and integrate zombie node with eth rpc

* Add wourkaround to run tests

* Add few comments

* fmt

* Replace default transaction request in test

* Merge - Fix conficts with main

* fmt

* Clippy fix

* Add polkadot and parachain node to ci

* CI - Fetch polkadot binaries from releases

* Fix unit test assertion

* Minor doc improvements

* Change names from ZombieNet to Zombienet and switch from Command to Process

* Refactor ZombieNode to cache provider using OnceCell

* CI: Cache polkadot binaries and use them if available

* Fix conficts with main

* Refactor shared_node to return static reference and add shared_state for context access

* fmt

* Rename ZombienetConfiguration to PolkadotParachainConfiguration and update related usage
2025-10-07 02:24:20 +00:00
Omar e5114d31dc Update compiler semaphore (#176) 2025-10-05 22:28:57 +00:00
Omar 74fdeb4a2e Core Benchmarking Infra (#175)
* Implement a solution for the pre-fund account limit

* Update the account pre-funding handling

* Fix the lighthouse node tracing issue

* refactor existing dt infra

* Implement the platform driver

* Wire up the cleaned up driver implementation

* Implement the core benchmarking components

* Remove some debug logging

* Fix issues in the benchmarks driver

* Implement a global concurrency limit on provider requests

* Update the concurrency limit

* Update the concurrency limit

* Cleanups

* Update the lighthouse ports

* Ignore certain tests

* Update the new geth test
2025-10-05 15:09:01 +00:00
64 changed files with 8943 additions and 2189 deletions
+81
View File
@@ -15,11 +15,31 @@ concurrency:
env:
CARGO_TERM_COLOR: always
POLKADOT_VERSION: polkadot-stable2506-2
jobs:
fmt:
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@v4
- name: Setup Rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Install nightly toolchain
run: rustup toolchain install nightly
- name: Install rustfmt for nightly
run: rustup component add --toolchain nightly rustfmt
- name: Cargo fmt
run: cargo +nightly fmt --all -- --check
cache-polkadot:
name: Build and cache Polkadot binaries on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
needs: [fmt]
strategy:
matrix:
os: [ubuntu-24.04, macos-14]
@@ -65,6 +85,37 @@ jobs:
run: |
cd polkadot-sdk
cargo install --path substrate/frame/revive/rpc --bin eth-rpc
- name: Cache downloaded Polkadot binaries
id: cache-polkadot
uses: actions/cache@v3
with:
path: |
~/polkadot-cache/polkadot
~/polkadot-cache/polkadot-execute-worker
~/polkadot-cache/polkadot-prepare-worker
~/polkadot-cache/polkadot-parachain
key: polkadot-downloaded-${{ matrix.os }}-${{ env.POLKADOT_VERSION }}
- name: Download Polkadot binaries on macOS
if: matrix.os == 'macos-14' && steps.cache-polkadot.outputs.cache-hit != 'true'
run: |
mkdir -p ~/polkadot-cache
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-aarch64-apple-darwin -o ~/polkadot-cache/polkadot
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-execute-worker-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-execute-worker
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-prepare-worker-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-prepare-worker
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-parachain-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-parachain
chmod +x ~/polkadot-cache/*
- name: Download Polkadot binaries on Ubuntu
if: matrix.os == 'ubuntu-24.04' && steps.cache-polkadot.outputs.cache-hit != 'true'
run: |
mkdir -p ~/polkadot-cache
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot -o ~/polkadot-cache/polkadot
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-execute-worker -o ~/polkadot-cache/polkadot-execute-worker
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-prepare-worker -o ~/polkadot-cache/polkadot-prepare-worker
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-parachain -o ~/polkadot-cache/polkadot-parachain
chmod +x ~/polkadot-cache/*
ci:
name: CI on ${{ matrix.os }}
@@ -86,6 +137,24 @@ jobs:
~/.cargo/bin/eth-rpc
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}
- name: Restore downloaded Polkadot binaries from cache
uses: actions/cache@v3
with:
path: |
~/polkadot-cache/polkadot
~/polkadot-cache/polkadot-execute-worker
~/polkadot-cache/polkadot-prepare-worker
~/polkadot-cache/polkadot-parachain
key: polkadot-downloaded-${{ matrix.os }}-${{ env.POLKADOT_VERSION }}
- name: Install Polkadot binaries
run: |
sudo cp ~/polkadot-cache/polkadot /usr/local/bin/
sudo cp ~/polkadot-cache/polkadot-execute-worker /usr/local/bin/
sudo cp ~/polkadot-cache/polkadot-prepare-worker /usr/local/bin/
sudo cp ~/polkadot-cache/polkadot-parachain /usr/local/bin/
sudo chmod +x /usr/local/bin/polkadot*
- name: Setup Rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
@@ -170,6 +239,18 @@ jobs:
- name: Check resolc version
run: resolc --version
- name: Check polkadot version
run: polkadot --version
- name: Check polkadot-parachain version
run: polkadot-parachain --version
- name: Check polkadot-execute-worker version
run: polkadot-execute-worker --version
- name: Check polkadot-prepare-worker version
run: polkadot-prepare-worker --version
- name: Test Formatting
run: make format
-1
View File
@@ -13,4 +13,3 @@ resolc-compiler-tests
workdir
!/schema.json
!/dev-genesis.json
Generated
+2267 -122
View File
File diff suppressed because it is too large Load Diff
+7 -4
View File
@@ -22,8 +22,6 @@ revive-dt-node-pool = { version = "0.1.0", path = "crates/node-pool" }
revive-dt-report = { version = "0.1.0", path = "crates/report" }
revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
alloy-primitives = "1.2.1"
alloy-sol-types = "1.2.1"
anyhow = "1.0"
bson = { version = "2.15.0" }
cacache = { version = "13.1.0" }
@@ -59,6 +57,7 @@ tokio = { version = "1.47.0", default-features = false, features = [
"process",
"rt",
] }
tower = { version = "0.5.2", features = ["limit"] }
uuid = { version = "1.8", features = ["v4"] }
tracing = { version = "0.1.41" }
tracing-appender = { version = "0.2.3" }
@@ -74,14 +73,17 @@ revive-solc-json-interface = { git = "https://github.com/paritytech/revive", rev
revive-common = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
revive-differential = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
zombienet-sdk = { git = "https://github.com/paritytech/zombienet-sdk.git", rev ="891f6554354ce466abd496366dbf8b4f82141241" }
[workspace.dependencies.alloy]
version = "1.0.22"
version = "1.0.37"
default-features = false
features = [
"json-abi",
"providers",
"provider-ipc",
"provider-ws",
"provider-ipc",
"provider-http",
"provider-debug-api",
"reqwest",
"rpc-types",
@@ -91,6 +93,7 @@ features = [
"serde",
"rpc-types-eth",
"genesis",
"sol-types",
]
[profile.bench]
-1
View File
@@ -10,7 +10,6 @@ rust-version.workspace = true
[dependencies]
alloy = { workspace = true }
alloy-primitives = { workspace = true }
anyhow = { workspace = true }
clap = { workspace = true }
moka = { workspace = true, features = ["sync"] }
+5 -3
View File
@@ -1,9 +1,11 @@
//! This module implements a cached file system allowing for results to be stored in-memory rather
//! rather being queried from the file system again.
use std::fs;
use std::io::{Error, Result};
use std::path::{Path, PathBuf};
use std::{
fs,
io::{Error, Result},
path::{Path, PathBuf},
};
use moka::sync::Cache;
use once_cell::sync::Lazy;
+1 -2
View File
@@ -1,5 +1,4 @@
use std::ops::ControlFlow;
use std::time::Duration;
use std::{ops::ControlFlow, time::Duration};
use anyhow::{Context as _, Result, anyhow};
@@ -135,6 +135,6 @@ macro_rules! define_wrapper_type {
};
}
/// Technically not needed but this allows for the macro to be found in the `macros` module of the
/// crate in addition to being found in the root of the crate.
/// Technically not needed but this allows for the macro to be found in the `macros` module of
/// the crate in addition to being found in the root of the crate.
pub use {define_wrapper_type, impl_for_wrapper};
+6
View File
@@ -39,6 +39,10 @@ pub enum PlatformIdentifier {
ReviveDevNodePolkavmResolc,
/// The revive dev node with the REVM backend with the solc compiler.
ReviveDevNodeRevmSolc,
/// A zombienet based Substrate/Polkadot node with the PolkaVM backend with the resolc compiler.
ZombienetPolkavmResolc,
/// A zombienet based Substrate/Polkadot node with the REVM backend with the solc compiler.
ZombienetRevmSolc,
}
/// An enum of the platform identifiers of all of the platforms supported by this framework.
@@ -95,6 +99,8 @@ pub enum NodeIdentifier {
Kitchensink,
/// The revive dev node implementation.
ReviveDevNode,
/// A zombienet spawned nodes
Zombienet,
}
/// An enum representing the identifiers of the supported VMs.
+2
View File
@@ -1,9 +1,11 @@
mod identifiers;
mod mode;
mod private_key_allocator;
mod round_robin_pool;
mod version_or_requirement;
pub use identifiers::*;
pub use mode::*;
pub use private_key_allocator::*;
pub use round_robin_pool::*;
pub use version_or_requirement::*;
+1 -3
View File
@@ -1,9 +1,7 @@
use crate::types::VersionOrRequirement;
use semver::Version;
use serde::{Deserialize, Serialize};
use std::fmt::Display;
use std::str::FromStr;
use std::sync::LazyLock;
use std::{fmt::Display, str::FromStr, sync::LazyLock};
/// This represents a mode that a given test should be run with, if possible.
///
@@ -1,6 +1,5 @@
use alloy::signers::local::PrivateKeySigner;
use alloy_primitives::U256;
use anyhow::{Result, bail};
use alloy::{primitives::U256, signers::local::PrivateKeySigner};
use anyhow::{Context, Result, bail};
/// This is a sequential private key allocator. When instantiated, it allocated private keys in
/// sequentially and in order until the maximum private key specified is reached.
@@ -10,25 +9,26 @@ pub struct PrivateKeyAllocator {
next_private_key: U256,
/// The highest private key (exclusive) that can be returned by this allocator.
highest_private_key_exclusive: U256,
highest_private_key_inclusive: U256,
}
impl PrivateKeyAllocator {
/// Creates a new instance of the private key allocator.
pub fn new(highest_private_key_exclusive: U256) -> Self {
pub fn new(highest_private_key_inclusive: U256) -> Self {
Self {
next_private_key: U256::ZERO,
highest_private_key_exclusive,
next_private_key: U256::ONE,
highest_private_key_inclusive,
}
}
/// Allocates a new private key and errors out if the maximum private key has been reached.
pub fn allocate(&mut self) -> Result<PrivateKeySigner> {
if self.next_private_key >= self.highest_private_key_exclusive {
if self.next_private_key > self.highest_private_key_inclusive {
bail!("Attempted to allocate a private key but failed since all have been allocated");
};
let private_key =
PrivateKeySigner::from_slice(self.next_private_key.to_be_bytes::<32>().as_slice())?;
PrivateKeySigner::from_slice(self.next_private_key.to_be_bytes::<32>().as_slice())
.context("Failed to convert the private key digits into a private key")?;
self.next_private_key += U256::ONE;
Ok(private_key)
}
@@ -0,0 +1,24 @@
use std::sync::atomic::{AtomicUsize, Ordering};
pub struct RoundRobinPool<T> {
next_index: AtomicUsize,
items: Vec<T>,
}
impl<T> RoundRobinPool<T> {
pub fn new(items: Vec<T>) -> Self {
Self {
next_index: Default::default(),
items,
}
}
pub fn round_robin(&self) -> &T {
let current = self.next_index.fetch_add(1, Ordering::SeqCst) % self.items.len();
self.items.get(current).unwrap()
}
pub fn iter(&self) -> impl Iterator<Item = &T> {
self.items.iter()
}
}
-1
View File
@@ -16,7 +16,6 @@ revive-dt-solc-binaries = { workspace = true }
revive-common = { workspace = true }
alloy = { workspace = true }
alloy-primitives = { workspace = true }
anyhow = { workspace = true }
dashmap = { workspace = true }
foundry-compilers-artifacts = { workspace = true }
+1 -2
View File
@@ -10,8 +10,7 @@ use std::{
pin::Pin,
};
use alloy::json_abi::JsonAbi;
use alloy_primitives::Address;
use alloy::{json_abi::JsonAbi, primitives::Address};
use anyhow::{Context as _, Result};
use semver::Version;
use serde::{Deserialize, Serialize};
+51 -38
View File
@@ -16,6 +16,7 @@ use revive_solc_json_interface::{
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection,
SolcStandardJsonOutput,
};
use tracing::{Span, field::display};
use crate::{
CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler, solc::Solc,
@@ -80,6 +81,16 @@ impl SolidityCompiler for Resolc {
}
#[tracing::instrument(level = "debug", ret)]
#[tracing::instrument(
level = "error",
skip_all,
fields(
resolc_version = %self.version(),
solc_version = %self.0.solc.version(),
json_in = tracing::field::Empty
),
err(Debug)
)]
fn build(
&self,
CompilerInput {
@@ -141,6 +152,7 @@ impl SolidityCompiler for Resolc {
polkavm: None,
},
};
Span::current().record("json_in", display(serde_json::to_string(&input).unwrap()));
let path = &self.0.resolc_path;
let mut command = AsyncCommand::new(path);
@@ -148,6 +160,8 @@ impl SolidityCompiler for Resolc {
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.arg("--solc")
.arg(self.0.solc.path())
.arg("--standard-json");
if let Some(ref base_path) = base_path {
@@ -239,45 +253,44 @@ impl SolidityCompiler for Resolc {
.evm
.and_then(|evm| evm.bytecode.clone())
.context("Unexpected - Contract compiled with resolc has no bytecode")?;
let abi = {
let metadata = contract_information
.metadata
.as_ref()
.context("No metadata found for the contract")?;
let solc_metadata_str = match metadata {
serde_json::Value::String(solc_metadata_str) => {
solc_metadata_str.as_str()
}
serde_json::Value::Object(metadata_object) => {
let solc_metadata_value = metadata_object
.get("solc_metadata")
.context("Contract doesn't have a 'solc_metadata' field")?;
solc_metadata_value
.as_str()
.context("The 'solc_metadata' field is not a string")?
}
serde_json::Value::Null
| serde_json::Value::Bool(_)
| serde_json::Value::Number(_)
| serde_json::Value::Array(_) => {
anyhow::bail!("Unsupported type of metadata {metadata:?}")
}
let abi =
{
let metadata = contract_information
.metadata
.as_ref()
.context("No metadata found for the contract")?;
let solc_metadata_str = match metadata {
serde_json::Value::String(solc_metadata_str) => {
solc_metadata_str.as_str()
}
serde_json::Value::Object(metadata_object) => {
let solc_metadata_value = metadata_object
.get("solc_metadata")
.context("Contract doesn't have a 'solc_metadata' field")?;
solc_metadata_value
.as_str()
.context("The 'solc_metadata' field is not a string")?
}
serde_json::Value::Null
| serde_json::Value::Bool(_)
| serde_json::Value::Number(_)
| serde_json::Value::Array(_) => {
anyhow::bail!("Unsupported type of metadata {metadata:?}")
}
};
let solc_metadata =
serde_json::from_str::<serde_json::Value>(solc_metadata_str).context(
"Failed to deserialize the solc_metadata as a serde_json generic value",
)?;
let output_value = solc_metadata
.get("output")
.context("solc_metadata doesn't have an output field")?;
let abi_value = output_value
.get("abi")
.context("solc_metadata output doesn't contain an abi field")?;
serde_json::from_value::<JsonAbi>(abi_value.clone())
.context("ABI found in solc_metadata output is not valid ABI")?
};
let solc_metadata = serde_json::from_str::<serde_json::Value>(
solc_metadata_str,
)
.context(
"Failed to deserialize the solc_metadata as a serde_json generic value",
)?;
let output_value = solc_metadata
.get("output")
.context("solc_metadata doesn't have an output field")?;
let abi_value = output_value
.get("abi")
.context("solc_metadata output doesn't contain an abi field")?;
serde_json::from_value::<JsonAbi>(abi_value.clone())
.context("ABI found in solc_metadata output is not valid ABI")?
};
map.insert(contract_name, (bytecode.object, abi));
}
}
+23 -13
View File
@@ -10,8 +10,9 @@ use std::{
use dashmap::DashMap;
use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
use revive_dt_config::{SolcConfiguration, WorkingDirectoryConfiguration};
use revive_dt_solc_binaries::download_solc;
use tracing::{Span, field::display, info};
use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
@@ -20,8 +21,7 @@ use foundry_compilers_artifacts::{
output_selection::{
BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection,
},
solc::CompilerOutput as SolcOutput,
solc::*,
solc::{CompilerOutput as SolcOutput, *},
};
use semver::Version;
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
@@ -39,9 +39,7 @@ struct SolcInner {
impl Solc {
pub async fn new(
context: impl AsRef<SolcConfiguration>
+ AsRef<ResolcConfiguration>
+ AsRef<WorkingDirectoryConfiguration>,
context: impl AsRef<SolcConfiguration> + AsRef<WorkingDirectoryConfiguration>,
version: impl Into<Option<VersionOrRequirement>>,
) -> Result<Self> {
// This is a cache for the compiler objects so that whenever the same compiler version is
@@ -69,6 +67,11 @@ impl Solc {
Ok(COMPILERS_CACHE
.entry((path.clone(), version.clone()))
.or_insert_with(|| {
info!(
solc_path = %path.display(),
solc_version = %version,
"Created a new solc compiler object"
);
Self(Arc::new(SolcInner {
solc_path: path,
solc_version: version,
@@ -88,6 +91,12 @@ impl SolidityCompiler for Solc {
}
#[tracing::instrument(level = "debug", ret)]
#[tracing::instrument(
level = "error",
skip_all,
fields(json_in = tracing::field::Empty),
err(Debug)
)]
fn build(
&self,
CompilerInput {
@@ -166,12 +175,14 @@ impl SolidityCompiler for Solc {
},
};
Span::current().record("json_in", display(serde_json::to_string(&input).unwrap()));
let path = &self.0.solc_path;
let mut command = AsyncCommand::new(path);
command
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.stderr(Stdio::null())
.arg("--standard-json");
if let Some(ref base_path) = base_path {
@@ -205,20 +216,18 @@ impl SolidityCompiler for Solc {
if !output.status.success() {
let json_in = serde_json::to_string_pretty(&input)
.context("Failed to pretty-print Standard JSON input for logging")?;
let message = String::from_utf8_lossy(&output.stderr);
tracing::error!(
status = %output.status,
message = %message,
json_input = json_in,
"Compilation using solc failed"
);
anyhow::bail!("Compilation failed with an error: {message}");
anyhow::bail!("Compilation failed");
}
let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout)
.map_err(|e| {
anyhow::anyhow!(
"failed to parse resolc JSON output: {e}\nstderr: {}",
"failed to parse resolc JSON output: {e}\nstdout: {}",
String::from_utf8_lossy(&output.stdout)
)
})
@@ -274,8 +283,9 @@ impl SolidityCompiler for Solc {
_optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline,
) -> bool {
// solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support mode E
// (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler is new enough.
// solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support
// mode E (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler
// is new enough.
pipeline == ModePipeline::ViaEVMAssembly
|| (pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul())
}
+289 -32
View File
@@ -28,7 +28,11 @@ use temp_dir::TempDir;
#[command(name = "retester")]
pub enum Context {
/// Executes tests in the MatterLabs format differentially on multiple targets concurrently.
ExecuteTests(Box<TestExecutionContext>),
Test(Box<TestExecutionContext>),
/// Executes differential benchmarks on various platforms.
Benchmark(Box<BenchmarkingContext>),
/// Exports the JSON schema of the MatterLabs test format used by the tool.
ExportJsonSchema,
}
@@ -46,7 +50,18 @@ impl Context {
impl AsRef<WorkingDirectoryConfiguration> for Context {
fn as_ref(&self) -> &WorkingDirectoryConfiguration {
match self {
Self::ExecuteTests(context) => context.as_ref().as_ref(),
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
}
impl AsRef<CorpusConfiguration> for Context {
fn as_ref(&self) -> &CorpusConfiguration {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
@@ -55,7 +70,8 @@ impl AsRef<WorkingDirectoryConfiguration> for Context {
impl AsRef<SolcConfiguration> for Context {
fn as_ref(&self) -> &SolcConfiguration {
match self {
Self::ExecuteTests(context) => context.as_ref().as_ref(),
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
@@ -64,7 +80,8 @@ impl AsRef<SolcConfiguration> for Context {
impl AsRef<ResolcConfiguration> for Context {
fn as_ref(&self) -> &ResolcConfiguration {
match self {
Self::ExecuteTests(context) => context.as_ref().as_ref(),
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
@@ -73,7 +90,8 @@ impl AsRef<ResolcConfiguration> for Context {
impl AsRef<GethConfiguration> for Context {
fn as_ref(&self) -> &GethConfiguration {
match self {
Self::ExecuteTests(context) => context.as_ref().as_ref(),
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
@@ -82,7 +100,18 @@ impl AsRef<GethConfiguration> for Context {
impl AsRef<KurtosisConfiguration> for Context {
fn as_ref(&self) -> &KurtosisConfiguration {
match self {
Self::ExecuteTests(context) => context.as_ref().as_ref(),
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
}
impl AsRef<PolkadotParachainConfiguration> for Context {
fn as_ref(&self) -> &PolkadotParachainConfiguration {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
@@ -91,7 +120,8 @@ impl AsRef<KurtosisConfiguration> for Context {
impl AsRef<KitchensinkConfiguration> for Context {
fn as_ref(&self) -> &KitchensinkConfiguration {
match self {
Self::ExecuteTests(context) => context.as_ref().as_ref(),
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
@@ -100,7 +130,8 @@ impl AsRef<KitchensinkConfiguration> for Context {
impl AsRef<ReviveDevNodeConfiguration> for Context {
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
match self {
Self::ExecuteTests(context) => context.as_ref().as_ref(),
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
@@ -109,7 +140,8 @@ impl AsRef<ReviveDevNodeConfiguration> for Context {
impl AsRef<EthRpcConfiguration> for Context {
fn as_ref(&self) -> &EthRpcConfiguration {
match self {
Self::ExecuteTests(context) => context.as_ref().as_ref(),
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
@@ -118,7 +150,11 @@ impl AsRef<EthRpcConfiguration> for Context {
impl AsRef<GenesisConfiguration> for Context {
fn as_ref(&self) -> &GenesisConfiguration {
match self {
Self::ExecuteTests(context) => context.as_ref().as_ref(),
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(..) => {
static GENESIS: LazyLock<GenesisConfiguration> = LazyLock::new(Default::default);
&GENESIS
}
Self::ExportJsonSchema => unreachable!(),
}
}
@@ -127,7 +163,8 @@ impl AsRef<GenesisConfiguration> for Context {
impl AsRef<WalletConfiguration> for Context {
fn as_ref(&self) -> &WalletConfiguration {
match self {
Self::ExecuteTests(context) => context.as_ref().as_ref(),
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
@@ -136,7 +173,8 @@ impl AsRef<WalletConfiguration> for Context {
impl AsRef<ConcurrencyConfiguration> for Context {
fn as_ref(&self) -> &ConcurrencyConfiguration {
match self {
Self::ExecuteTests(context) => context.as_ref().as_ref(),
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
@@ -145,7 +183,8 @@ impl AsRef<ConcurrencyConfiguration> for Context {
impl AsRef<CompilationConfiguration> for Context {
fn as_ref(&self) -> &CompilationConfiguration {
match self {
Self::ExecuteTests(context) => context.as_ref().as_ref(),
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
@@ -154,7 +193,8 @@ impl AsRef<CompilationConfiguration> for Context {
impl AsRef<ReportConfiguration> for Context {
fn as_ref(&self) -> &ReportConfiguration {
match self {
Self::ExecuteTests(context) => context.as_ref().as_ref(),
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
@@ -162,11 +202,11 @@ impl AsRef<ReportConfiguration> for Context {
#[derive(Clone, Debug, Parser, Serialize)]
pub struct TestExecutionContext {
/// The working directory that the program will use for all of the temporary artifacts needed at
/// runtime.
/// The working directory that the program will use for all of the temporary artifacts needed
/// at runtime.
///
/// If not specified, then a temporary directory will be created and used by the program for all
/// temporary artifacts.
/// If not specified, then a temporary directory will be created and used by the program for
/// all temporary artifacts.
#[clap(
short,
long,
@@ -183,9 +223,9 @@ pub struct TestExecutionContext {
)]
pub platforms: Vec<PlatformIdentifier>,
/// A list of test corpus JSON files to be tested.
#[arg(long = "corpus", short)]
pub corpus: Vec<PathBuf>,
/// Configuration parameters for the corpus files to use.
#[clap(flatten, next_help_heading = "Corpus Configuration")]
pub corpus_configuration: CorpusConfiguration,
/// Configuration parameters for the solc compiler.
#[clap(flatten, next_help_heading = "Solc Configuration")]
@@ -195,6 +235,10 @@ pub struct TestExecutionContext {
#[clap(flatten, next_help_heading = "Resolc Configuration")]
pub resolc_configuration: ResolcConfiguration,
/// Configuration parameters for the Polkadot Parachain.
#[clap(flatten, next_help_heading = "Polkadot Parachain Configuration")]
pub polkadot_parachain_configuration: PolkadotParachainConfiguration,
/// Configuration parameters for the geth node.
#[clap(flatten, next_help_heading = "Geth Configuration")]
pub geth_configuration: GethConfiguration,
@@ -236,6 +280,87 @@ pub struct TestExecutionContext {
pub report_configuration: ReportConfiguration,
}
#[derive(Clone, Debug, Parser, Serialize)]
pub struct BenchmarkingContext {
/// The working directory that the program will use for all of the temporary artifacts needed
/// at runtime.
///
/// If not specified, then a temporary directory will be created and used by the program for
/// all temporary artifacts.
#[clap(
short,
long,
default_value = "",
value_hint = ValueHint::DirPath,
)]
pub working_directory: WorkingDirectoryConfiguration,
/// The set of platforms that the differential tests should run on.
#[arg(
short = 'p',
long = "platform",
default_values = ["geth-evm-solc", "revive-dev-node-polkavm-resolc"]
)]
pub platforms: Vec<PlatformIdentifier>,
/// The default repetition count for any workload specified but that doesn't contain a repeat
/// step.
#[arg(short = 'r', long = "default-repetition-count", default_value_t = 1000)]
pub default_repetition_count: usize,
/// Configuration parameters for the corpus files to use.
#[clap(flatten, next_help_heading = "Corpus Configuration")]
pub corpus_configuration: CorpusConfiguration,
/// Configuration parameters for the solc compiler.
#[clap(flatten, next_help_heading = "Solc Configuration")]
pub solc_configuration: SolcConfiguration,
/// Configuration parameters for the resolc compiler.
#[clap(flatten, next_help_heading = "Resolc Configuration")]
pub resolc_configuration: ResolcConfiguration,
/// Configuration parameters for the geth node.
#[clap(flatten, next_help_heading = "Geth Configuration")]
pub geth_configuration: GethConfiguration,
/// Configuration parameters for the lighthouse node.
#[clap(flatten, next_help_heading = "Lighthouse Configuration")]
pub lighthouse_configuration: KurtosisConfiguration,
/// Configuration parameters for the Kitchensink.
#[clap(flatten, next_help_heading = "Kitchensink Configuration")]
pub kitchensink_configuration: KitchensinkConfiguration,
/// Configuration parameters for the Polkadot Parachain.
#[clap(flatten, next_help_heading = "Polkadot Parachain Configuration")]
pub polkadot_parachain_configuration: PolkadotParachainConfiguration,
/// Configuration parameters for the Revive Dev Node.
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
/// Configuration parameters for the Eth Rpc.
#[clap(flatten, next_help_heading = "Eth RPC Configuration")]
pub eth_rpc_configuration: EthRpcConfiguration,
/// Configuration parameters for the wallet.
#[clap(flatten, next_help_heading = "Wallet Configuration")]
pub wallet_configuration: WalletConfiguration,
/// Configuration parameters for concurrency.
#[clap(flatten, next_help_heading = "Concurrency Configuration")]
pub concurrency_configuration: ConcurrencyConfiguration,
/// Configuration parameters for the compilers and compilation.
#[clap(flatten, next_help_heading = "Compilation Configuration")]
pub compilation_configuration: CompilationConfiguration,
/// Configuration parameters for the report.
#[clap(flatten, next_help_heading = "Report Configuration")]
pub report_configuration: ReportConfiguration,
}
impl Default for TestExecutionContext {
fn default() -> Self {
Self::parse_from(["execution-context"])
@@ -248,6 +373,12 @@ impl AsRef<WorkingDirectoryConfiguration> for TestExecutionContext {
}
}
impl AsRef<CorpusConfiguration> for TestExecutionContext {
fn as_ref(&self) -> &CorpusConfiguration {
&self.corpus_configuration
}
}
impl AsRef<SolcConfiguration> for TestExecutionContext {
fn as_ref(&self) -> &SolcConfiguration {
&self.solc_configuration
@@ -266,6 +397,12 @@ impl AsRef<GethConfiguration> for TestExecutionContext {
}
}
impl AsRef<PolkadotParachainConfiguration> for TestExecutionContext {
fn as_ref(&self) -> &PolkadotParachainConfiguration {
&self.polkadot_parachain_configuration
}
}
impl AsRef<KurtosisConfiguration> for TestExecutionContext {
fn as_ref(&self) -> &KurtosisConfiguration {
&self.lighthouse_configuration
@@ -320,6 +457,104 @@ impl AsRef<ReportConfiguration> for TestExecutionContext {
}
}
impl Default for BenchmarkingContext {
fn default() -> Self {
Self::parse_from(["execution-context"])
}
}
impl AsRef<WorkingDirectoryConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &WorkingDirectoryConfiguration {
&self.working_directory
}
}
impl AsRef<CorpusConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &CorpusConfiguration {
&self.corpus_configuration
}
}
impl AsRef<SolcConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &SolcConfiguration {
&self.solc_configuration
}
}
impl AsRef<ResolcConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &ResolcConfiguration {
&self.resolc_configuration
}
}
impl AsRef<GethConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &GethConfiguration {
&self.geth_configuration
}
}
impl AsRef<KurtosisConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &KurtosisConfiguration {
&self.lighthouse_configuration
}
}
impl AsRef<PolkadotParachainConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &PolkadotParachainConfiguration {
&self.polkadot_parachain_configuration
}
}
impl AsRef<KitchensinkConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &KitchensinkConfiguration {
&self.kitchensink_configuration
}
}
impl AsRef<ReviveDevNodeConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
&self.revive_dev_node_configuration
}
}
impl AsRef<EthRpcConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &EthRpcConfiguration {
&self.eth_rpc_configuration
}
}
impl AsRef<WalletConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &WalletConfiguration {
&self.wallet_configuration
}
}
impl AsRef<ConcurrencyConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &ConcurrencyConfiguration {
&self.concurrency_configuration
}
}
impl AsRef<CompilationConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &CompilationConfiguration {
&self.compilation_configuration
}
}
impl AsRef<ReportConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &ReportConfiguration {
&self.report_configuration
}
}
/// A set of configuration parameters for the corpus files to use for the execution.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct CorpusConfiguration {
/// A list of test corpus JSON files to be tested.
#[arg(short = 'c', long = "corpus")]
pub paths: Vec<PathBuf>,
}
/// A set of configuration parameters for Solc.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct SolcConfiguration {
@@ -340,6 +575,30 @@ pub struct ResolcConfiguration {
pub path: PathBuf,
}
/// A set of configuration parameters for Polkadot Parachain.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct PolkadotParachainConfiguration {
/// Specifies the path of the polkadot-parachain node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the polkadot-parachain
/// binary that's provided in the user's $PATH.
#[clap(
id = "polkadot-parachain.path",
long = "polkadot-parachain.path",
default_value = "polkadot-parachain"
)]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "polkadot-parachain.start-timeout-ms",
long = "polkadot-parachain.start-timeout-ms",
default_value = "5000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
}
/// A set of configuration parameters for Geth.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct GethConfiguration {
@@ -365,8 +624,8 @@ pub struct GethConfiguration {
pub struct KurtosisConfiguration {
/// Specifies the path of the kurtosis node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the kurtosis binary that's
/// provided in the user's $PATH.
/// If this is not specified, then the tool assumes that it should use the kurtosis binary
/// that's provided in the user's $PATH.
#[clap(
id = "kurtosis.path",
long = "kurtosis.path",
@@ -397,10 +656,6 @@ pub struct KitchensinkConfiguration {
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
/// This configures the tool to use Kitchensink instead of using the revive-dev-node.
#[clap(long = "kitchensink.dont-use-dev-node")]
pub use_kitchensink: bool,
}
/// A set of configuration parameters for the revive dev node.
@@ -408,8 +663,8 @@ pub struct KitchensinkConfiguration {
pub struct ReviveDevNodeConfiguration {
/// Specifies the path of the revive dev node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the revive dev node binary
/// that's provided in the user's $PATH.
/// If this is not specified, then the tool assumes that it should use the revive dev node
/// binary that's provided in the user's $PATH.
#[clap(
id = "revive-dev-node.path",
long = "revive-dev-node.path",
@@ -448,7 +703,7 @@ pub struct EthRpcConfiguration {
}
/// A set of configuration parameters for the genesis.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Default, Parser, Serialize)]
pub struct GenesisConfiguration {
/// Specifies the path of the genesis file to use for the nodes that are started.
///
@@ -499,8 +754,8 @@ pub struct WalletConfiguration {
/// This argument controls which private keys the nodes should have access to and be added to
/// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set
/// of the node.
#[clap(long = "wallet.additional-keys", default_value_t = 200)]
additional_keys: usize,
#[clap(long = "wallet.additional-keys", default_value_t = 100_000)]
pub additional_keys: usize,
/// The wallet object that will be used.
#[clap(skip)]
@@ -692,4 +947,6 @@ pub enum TestingPlatform {
Geth,
/// The kitchensink runtime provides the PolkaVM (PVM) based node implementation.
Kitchensink,
/// A polkadot/Substrate based network
Zombienet,
}
@@ -0,0 +1,770 @@
use std::{
collections::HashMap,
ops::ControlFlow,
sync::{
Arc,
atomic::{AtomicUsize, Ordering},
},
time::Duration,
};
use alloy::{
hex,
json_abi::JsonAbi,
network::{Ethereum, TransactionBuilder},
primitives::{Address, TxHash, U256},
rpc::types::{
TransactionReceipt, TransactionRequest,
trace::geth::{
CallFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType,
GethDebugTracingOptions,
},
},
};
use anyhow::{Context as _, Result, bail};
use indexmap::IndexMap;
use revive_dt_common::{
futures::{PollingWaitBehavior, poll},
types::PrivateKeyAllocator,
};
use revive_dt_format::{
metadata::{ContractInstance, ContractPathAndIdent},
steps::{
AllocateAccountStep, BalanceAssertionStep, Calldata, EtherValue, FunctionCallStep, Method,
RepeatStep, Step, StepAddress, StepIdx, StepPath, StorageEmptyAssertionStep,
},
traits::{ResolutionContext, ResolverApi},
};
use tokio::sync::{Mutex, mpsc::UnboundedSender};
use tracing::{Instrument, Span, debug, error, field::display, info, info_span, instrument};
use crate::{
differential_benchmarks::{ExecutionState, WatcherEvent},
helpers::{CachedCompiler, TestDefinition, TestPlatformInformation},
};
static DRIVER_COUNT: AtomicUsize = AtomicUsize::new(0);
/// The differential tests driver for a single platform.
pub struct Driver<'a, I> {
/// The id of the driver.
driver_id: usize,
/// The information of the platform that this driver is for.
platform_information: &'a TestPlatformInformation<'a>,
/// The resolver of the platform.
resolver: Arc<dyn ResolverApi + 'a>,
/// The definition of the test that the driver is instructed to execute.
test_definition: &'a TestDefinition<'a>,
/// The private key allocator used by this driver and other drivers when account allocations
/// are needed.
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
/// The execution state associated with the platform.
execution_state: ExecutionState,
/// The send side of the watcher's unbounded channel associated with this driver.
watcher_tx: UnboundedSender<WatcherEvent>,
/// The number of steps that were executed on the driver.
steps_executed: usize,
/// This is the queue of steps that are to be executed by the driver for this test case. Each
/// time `execute_step` is called one of the steps is executed.
steps_iterator: I,
}
impl<'a, I> Driver<'a, I>
where
I: Iterator<Item = (StepPath, Step)>,
{
// region:Constructors & Initialization
pub async fn new(
platform_information: &'a TestPlatformInformation<'a>,
test_definition: &'a TestDefinition<'a>,
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
cached_compiler: &CachedCompiler<'a>,
watcher_tx: UnboundedSender<WatcherEvent>,
steps: I,
) -> Result<Self> {
let mut this = Driver {
driver_id: DRIVER_COUNT.fetch_add(1, Ordering::SeqCst),
platform_information,
resolver: platform_information
.node
.resolver()
.await
.context("Failed to create resolver")?,
test_definition,
private_key_allocator,
execution_state: ExecutionState::empty(),
steps_executed: 0,
steps_iterator: steps,
watcher_tx,
};
this.init_execution_state(cached_compiler)
.await
.context("Failed to initialize the execution state of the platform")?;
Ok(this)
}
async fn init_execution_state(&mut self, cached_compiler: &CachedCompiler<'a>) -> Result<()> {
let compiler_output = cached_compiler
.compile_contracts(
self.test_definition.metadata,
self.test_definition.metadata_file_path,
self.test_definition.mode.clone(),
None,
self.platform_information.compiler.as_ref(),
self.platform_information.platform,
&self.platform_information.reporter,
)
.await
.inspect_err(|err| {
error!(
?err,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Pre-linking compilation failed"
)
})
.context("Failed to produce the pre-linking compiled contracts")?;
let mut deployed_libraries = None::<HashMap<_, _>>;
let mut contract_sources = self
.test_definition
.metadata
.contract_sources()
.inspect_err(|err| {
error!(
?err,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Failed to retrieve contract sources from metadata"
)
})
.context("Failed to get the contract instances from the metadata file")?;
for library_instance in self
.test_definition
.metadata
.libraries
.iter()
.flatten()
.flat_map(|(_, map)| map.values())
{
debug!(%library_instance, "Deploying Library Instance");
let ContractPathAndIdent {
contract_source_path: library_source_path,
contract_ident: library_ident,
} = contract_sources
.remove(library_instance)
.context("Failed to get the contract sources of the contract instance")?;
let (code, abi) = compiler_output
.contracts
.get(&library_source_path)
.and_then(|contracts| contracts.get(library_ident.as_str()))
.context("Failed to get the code and abi for the instance")?;
let code = alloy::hex::decode(code)?;
// Getting the deployer address from the cases themselves. This is to ensure
// that we're doing the deployments from different accounts and therefore we're
// not slowed down by the nonce.
let deployer_address = self
.test_definition
.case
.steps
.iter()
.filter_map(|step| match step {
Step::FunctionCall(input) => input.caller.as_address().copied(),
Step::BalanceAssertion(..) => None,
Step::StorageEmptyAssertion(..) => None,
Step::Repeat(..) => None,
Step::AllocateAccount(..) => None,
})
.next()
.unwrap_or(FunctionCallStep::default_caller_address());
let tx = TransactionBuilder::<Ethereum>::with_deploy_code(
TransactionRequest::default().from(deployer_address),
code,
);
let receipt = self.execute_transaction(tx).await.inspect_err(|err| {
error!(
?err,
%library_instance,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Failed to deploy the library"
)
})?;
debug!(
?library_instance,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Deployed library"
);
let library_address = receipt
.contract_address
.expect("Failed to deploy the library");
deployed_libraries.get_or_insert_default().insert(
library_instance.clone(),
(library_ident.clone(), library_address, abi.clone()),
);
}
let compiler_output = cached_compiler
.compile_contracts(
self.test_definition.metadata,
self.test_definition.metadata_file_path,
self.test_definition.mode.clone(),
deployed_libraries.as_ref(),
self.platform_information.compiler.as_ref(),
self.platform_information.platform,
&self.platform_information.reporter,
)
.await
.inspect_err(|err| {
error!(
?err,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Post-linking compilation failed"
)
})
.context("Failed to compile the post-link contracts")?;
self.execution_state = ExecutionState::new(
compiler_output.contracts,
deployed_libraries.unwrap_or_default(),
);
Ok(())
}
// endregion:Constructors & Initialization
// region:Step Handling
pub async fn execute_all(mut self) -> Result<usize> {
while let Some(result) = self.execute_next_step().await {
result?
}
Ok(self.steps_executed)
}
pub async fn execute_next_step(&mut self) -> Option<Result<()>> {
let (step_path, step) = self.steps_iterator.next()?;
info!(%step_path, "Executing Step");
Some(
self.execute_step(&step_path, &step)
.await
.inspect(|_| info!(%step_path, "Step execution succeeded"))
.inspect_err(|err| error!(%step_path, ?err, "Step execution failed")),
)
}
#[instrument(
level = "info",
skip_all,
fields(
driver_id = self.driver_id,
platform_identifier = %self.platform_information.platform.platform_identifier(),
%step_path,
),
err(Debug),
)]
async fn execute_step(&mut self, step_path: &StepPath, step: &Step) -> Result<()> {
let steps_executed = match step {
Step::FunctionCall(step) => self
.execute_function_call(step_path, step.as_ref())
.await
.context("Function call step Failed"),
Step::Repeat(step) => self
.execute_repeat_step(step_path, step.as_ref())
.await
.context("Repetition Step Failed"),
Step::AllocateAccount(step) => self
.execute_account_allocation(step_path, step.as_ref())
.await
.context("Account Allocation Step Failed"),
// The following steps are disabled in the benchmarking driver.
Step::BalanceAssertion(..) | Step::StorageEmptyAssertion(..) => Ok(0),
}?;
self.steps_executed += steps_executed;
Ok(())
}
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id))]
pub async fn execute_function_call(
&mut self,
_: &StepPath,
step: &FunctionCallStep,
) -> Result<usize> {
let deployment_receipts = self
.handle_function_call_contract_deployment(step)
.await
.context("Failed to deploy contracts for the function call step")?;
let execution_receipt = self
.handle_function_call_execution(step, deployment_receipts)
.await
.context("Failed to handle the function call execution")?;
let tracing_result = self
.handle_function_call_call_frame_tracing(execution_receipt.transaction_hash)
.await
.context("Failed to handle the function call call frame tracing")?;
self.handle_function_call_variable_assignment(step, &tracing_result)
.await
.context("Failed to handle function call variable assignment")?;
Ok(1)
}
async fn handle_function_call_contract_deployment(
&mut self,
step: &FunctionCallStep,
) -> Result<HashMap<ContractInstance, TransactionReceipt>> {
let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new();
for instance in step.find_all_contract_instances().into_iter() {
if !self
.execution_state
.deployed_contracts
.contains_key(&instance)
{
instances_we_must_deploy.entry(instance).or_insert(false);
}
}
if let Method::Deployer = step.method {
instances_we_must_deploy.swap_remove(&step.instance);
instances_we_must_deploy.insert(step.instance.clone(), true);
}
let mut receipts = HashMap::new();
for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() {
let calldata = deploy_with_constructor_arguments.then_some(&step.calldata);
let value = deploy_with_constructor_arguments
.then_some(step.value)
.flatten();
let caller = {
let context = self.default_resolution_context();
step.caller
.resolve_address(self.resolver.as_ref(), context)
.await?
};
if let (_, _, Some(receipt)) = self
.get_or_deploy_contract_instance(&instance, caller, calldata, value)
.await
.context("Failed to get or deploy contract instance during input execution")?
{
receipts.insert(instance.clone(), receipt);
}
}
Ok(receipts)
}
async fn handle_function_call_execution(
&mut self,
step: &FunctionCallStep,
mut deployment_receipts: HashMap<ContractInstance, TransactionReceipt>,
) -> Result<TransactionReceipt> {
match step.method {
// This step was already executed when `handle_step` was called. We just need to
// lookup the transaction receipt in this case and continue on.
Method::Deployer => deployment_receipts
.remove(&step.instance)
.context("Failed to find deployment receipt for constructor call"),
Method::Fallback | Method::FunctionName(_) => {
let tx = step
.as_transaction(self.resolver.as_ref(), self.default_resolution_context())
.await?;
self.execute_transaction(tx).await
}
}
}
async fn handle_function_call_call_frame_tracing(
&mut self,
tx_hash: TxHash,
) -> Result<CallFrame> {
self.platform_information
.node
.trace_transaction(
tx_hash,
GethDebugTracingOptions {
tracer: Some(GethDebugTracerType::BuiltInTracer(
GethDebugBuiltInTracerType::CallTracer,
)),
tracer_config: GethDebugTracerConfig(serde_json::json! {{
"onlyTopCall": true,
"withLog": false,
"withStorage": false,
"withMemory": false,
"withStack": false,
"withReturnData": true
}}),
..Default::default()
},
)
.await
.map(|trace| {
trace
.try_into_call_frame()
.expect("Impossible - we requested a callframe trace so we must get it back")
})
}
async fn handle_function_call_variable_assignment(
&mut self,
step: &FunctionCallStep,
tracing_result: &CallFrame,
) -> Result<()> {
let Some(ref assignments) = step.variable_assignments else {
return Ok(());
};
// Handling the return data variable assignments.
for (variable_name, output_word) in assignments.return_data.iter().zip(
tracing_result
.output
.as_ref()
.unwrap_or_default()
.to_vec()
.chunks(32),
) {
let value = U256::from_be_slice(output_word);
self.execution_state
.variables
.insert(variable_name.clone(), value);
tracing::info!(
variable_name,
variable_value = hex::encode(value.to_be_bytes::<32>()),
"Assigned variable"
);
}
Ok(())
}
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id))]
pub async fn execute_balance_assertion(
&mut self,
_: &StepPath,
_: &BalanceAssertionStep,
) -> anyhow::Result<usize> {
// Kept empty intentionally for the benchmark driver.
Ok(1)
}
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id), err(Debug))]
async fn execute_storage_empty_assertion_step(
&mut self,
_: &StepPath,
_: &StorageEmptyAssertionStep,
) -> Result<usize> {
// Kept empty intentionally for the benchmark driver.
Ok(1)
}
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id), err(Debug))]
async fn execute_repeat_step(
&mut self,
step_path: &StepPath,
step: &RepeatStep,
) -> Result<usize> {
let tasks = (0..step.repeat)
.map(|_| Driver {
driver_id: DRIVER_COUNT.fetch_add(1, Ordering::SeqCst),
platform_information: self.platform_information,
resolver: self.resolver.clone(),
test_definition: self.test_definition,
private_key_allocator: self.private_key_allocator.clone(),
execution_state: self.execution_state.clone(),
steps_executed: 0,
steps_iterator: {
let steps = step
.steps
.iter()
.cloned()
.enumerate()
.map(|(step_idx, step)| {
let step_idx = StepIdx::new(step_idx);
let step_path = step_path.append(step_idx);
(step_path, step)
})
.collect::<Vec<_>>();
steps.into_iter()
},
watcher_tx: self.watcher_tx.clone(),
})
.map(|driver| driver.execute_all());
// TODO: Determine how we want to know the `ignore_block_before` and if it's through the
// receipt and how this would impact the architecture and the possibility of us not waiting
// for receipts in the future.
self.watcher_tx
.send(WatcherEvent::RepetitionStartEvent {
ignore_block_before: 0,
})
.context("Failed to send message on the watcher's tx")?;
let res = futures::future::try_join_all(tasks)
.await
.context("Repetition execution failed")?;
Ok(res.into_iter().sum())
}
#[instrument(level = "info", fields(driver_id = self.driver_id), skip_all, err(Debug))]
pub async fn execute_account_allocation(
&mut self,
_: &StepPath,
step: &AllocateAccountStep,
) -> Result<usize> {
let Some(variable_name) = step.variable_name.strip_prefix("$VARIABLE:") else {
bail!("Account allocation must start with $VARIABLE:");
};
let private_key = self
.private_key_allocator
.lock()
.await
.allocate()
.context("Account allocation through the private key allocator failed")?;
let account = private_key.address();
let variable = U256::from_be_slice(account.0.as_slice());
self.execution_state
.variables
.insert(variable_name.to_string(), variable);
Ok(1)
}
// endregion:Step Handling
// region:Contract Deployment
#[instrument(
level = "info",
skip_all,
fields(
driver_id = self.driver_id,
platform_identifier = %self.platform_information.platform.platform_identifier(),
%contract_instance,
%deployer
),
err(Debug),
)]
async fn get_or_deploy_contract_instance(
&mut self,
contract_instance: &ContractInstance,
deployer: Address,
calldata: Option<&Calldata>,
value: Option<EtherValue>,
) -> Result<(Address, JsonAbi, Option<TransactionReceipt>)> {
if let Some((_, address, abi)) = self
.execution_state
.deployed_contracts
.get(contract_instance)
{
info!(
%address,
"Contract instance already deployed."
);
Ok((*address, abi.clone(), None))
} else {
info!("Contract instance requires deployment.");
let (address, abi, receipt) = self
.deploy_contract(contract_instance, deployer, calldata, value)
.await
.context("Failed to deploy contract")?;
info!(
%address,
"Contract instance has been deployed."
);
Ok((address, abi, Some(receipt)))
}
}
#[instrument(
level = "info",
skip_all,
fields(
driver_id = self.driver_id,
platform_identifier = %self.platform_information.platform.platform_identifier(),
%contract_instance,
%deployer
),
err(Debug),
)]
async fn deploy_contract(
&mut self,
contract_instance: &ContractInstance,
deployer: Address,
calldata: Option<&Calldata>,
value: Option<EtherValue>,
) -> Result<(Address, JsonAbi, TransactionReceipt)> {
let Some(ContractPathAndIdent {
contract_source_path,
contract_ident,
}) = self
.test_definition
.metadata
.contract_sources()?
.remove(contract_instance)
else {
anyhow::bail!(
"Contract source not found for instance {:?}",
contract_instance
)
};
let Some((code, abi)) = self
.execution_state
.compiled_contracts
.get(&contract_source_path)
.and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref()))
.cloned()
else {
anyhow::bail!(
"Failed to find information for contract {:?}",
contract_instance
)
};
let mut code = match alloy::hex::decode(&code) {
Ok(code) => code,
Err(error) => {
tracing::error!(
?error,
contract_source_path = contract_source_path.display().to_string(),
contract_ident = contract_ident.as_ref(),
"Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking"
);
anyhow::bail!("Failed to hex-decode the byte code {}", error)
}
};
if let Some(calldata) = calldata {
let calldata = calldata
.calldata(self.resolver.as_ref(), self.default_resolution_context())
.await?;
code.extend(calldata);
}
let tx = {
let tx = TransactionRequest::default().from(deployer);
let tx = match value {
Some(ref value) => tx.value(value.into_inner()),
_ => tx,
};
TransactionBuilder::<Ethereum>::with_deploy_code(tx, code)
};
let receipt = match self.execute_transaction(tx).await {
Ok(receipt) => receipt,
Err(error) => {
tracing::error!(?error, "Contract deployment transaction failed.");
return Err(error);
}
};
let Some(address) = receipt.contract_address else {
anyhow::bail!("Contract deployment didn't return an address");
};
tracing::info!(
instance_name = ?contract_instance,
instance_address = ?address,
"Deployed contract"
);
self.platform_information
.reporter
.report_contract_deployed_event(contract_instance.clone(), address)?;
self.execution_state.deployed_contracts.insert(
contract_instance.clone(),
(contract_ident, address, abi.clone()),
);
Ok((address, abi, receipt))
}
#[instrument(level = "info", fields(driver_id = self.driver_id), skip_all)]
async fn step_address_auto_deployment(
&mut self,
step_address: &StepAddress,
) -> Result<Address> {
match step_address {
StepAddress::Address(address) => Ok(*address),
StepAddress::ResolvableAddress(resolvable) => {
let Some(instance) = resolvable
.strip_suffix(".address")
.map(ContractInstance::new)
else {
bail!("Not an address variable");
};
self.get_or_deploy_contract_instance(
&instance,
FunctionCallStep::default_caller_address(),
None,
None,
)
.await
.map(|v| v.0)
}
}
}
// endregion:Contract Deployment
// region:Resolution & Resolver
fn default_resolution_context(&self) -> ResolutionContext<'_> {
ResolutionContext::default()
.with_deployed_contracts(&self.execution_state.deployed_contracts)
.with_variables(&self.execution_state.variables)
}
// endregion:Resolution & Resolver
// region:Transaction Execution
/// Executes the transaction on the driver's node with some custom waiting logic for the receipt
#[instrument(
level = "info",
skip_all,
fields(driver_id = self.driver_id, transaction_hash = tracing::field::Empty)
)]
async fn execute_transaction(
&self,
transaction: TransactionRequest,
) -> anyhow::Result<TransactionReceipt> {
let node = self.platform_information.node;
let transaction_hash = node
.submit_transaction(transaction)
.await
.context("Failed to submit transaction")?;
Span::current().record("transaction_hash", display(transaction_hash));
info!("Submitted transaction");
self.watcher_tx
.send(WatcherEvent::SubmittedTransaction { transaction_hash })
.context("Failed to send the transaction hash to the watcher")?;
info!("Starting to poll for transaction receipt");
poll(
Duration::from_secs(30 * 60),
PollingWaitBehavior::Constant(Duration::from_secs(1)),
|| {
async move {
match node.get_receipt(transaction_hash).await {
Ok(receipt) => {
info!("Polling succeeded, receipt found");
Ok(ControlFlow::Break(receipt))
}
Err(_) => Ok(ControlFlow::Continue(())),
}
}
.instrument(info_span!("Polling for receipt"))
},
)
.await
}
// endregion:Transaction Execution
}
@@ -0,0 +1,177 @@
//! The main entry point for differential benchmarking.
use std::{collections::BTreeMap, sync::Arc};
use anyhow::Context as _;
use futures::{FutureExt, StreamExt};
use revive_dt_common::types::PrivateKeyAllocator;
use revive_dt_core::Platform;
use revive_dt_format::steps::{Step, StepIdx, StepPath};
use tokio::sync::Mutex;
use tracing::{error, info, info_span, instrument, warn};
use revive_dt_config::{BenchmarkingContext, Context};
use revive_dt_report::Reporter;
use crate::{
differential_benchmarks::{Driver, Watcher, WatcherEvent},
helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream},
};
/// Handles the differential testing executing it according to the information defined in the
/// context
#[instrument(level = "info", err(Debug), skip_all)]
pub async fn handle_differential_benchmarks(
mut context: BenchmarkingContext,
reporter: Reporter,
) -> anyhow::Result<()> {
// A bit of a hack but we need to override the number of nodes specified through the CLI since
// benchmarks can only be run on a single node. Perhaps in the future we'd have a cleaner way to
// do this. But, for the time being, we need to override the cli arguments.
if context.concurrency_configuration.number_of_nodes != 1 {
warn!(
specified_number_of_nodes = context.concurrency_configuration.number_of_nodes,
updated_number_of_nodes = 1,
"Invalid number of nodes specified through the CLI. Benchmarks can only be run on a single node. Updated the arguments."
);
context.concurrency_configuration.number_of_nodes = 1;
};
let full_context = Context::Benchmark(Box::new(context.clone()));
// Discover all of the metadata files that are defined in the context.
let metadata_files = collect_metadata_files(&context)
.context("Failed to collect metadata files for differential testing")?;
info!(len = metadata_files.len(), "Discovered metadata files");
// Discover the list of platforms that the tests should run on based on the context.
let platforms = context
.platforms
.iter()
.copied()
.map(Into::<&dyn Platform>::into)
.collect::<Vec<_>>();
// Starting the nodes of the various platforms specified in the context. Note that we use the
// node pool since it contains all of the code needed to spawn nodes from A to Z and therefore
// it's the preferred way for us to start nodes even when we're starting just a single node. The
// added overhead from it is quite small (performance wise) since it's involved only when we're
// creating the test definitions, but it might have other maintenance overhead as it obscures
// the fact that only a single node is spawned.
let platforms_and_nodes = {
let mut map = BTreeMap::new();
for platform in platforms.iter() {
let platform_identifier = platform.platform_identifier();
let node_pool = NodePool::new(full_context.clone(), *platform)
.await
.inspect_err(|err| {
error!(
?err,
%platform_identifier,
"Failed to initialize the node pool for the platform."
)
})
.context("Failed to initialize the node pool")?;
map.insert(platform_identifier, (*platform, node_pool));
}
map
};
info!("Spawned the platform nodes");
// Preparing test definitions for the execution.
let test_definitions = create_test_definitions_stream(
&full_context,
metadata_files.iter(),
&platforms_and_nodes,
reporter.clone(),
)
.await
.collect::<Vec<_>>()
.await;
info!(len = test_definitions.len(), "Created test definitions");
// Creating the objects that will be shared between the various runs. The cached compiler is the
// only one at the current moment of time that's safe to share between runs.
let cached_compiler = CachedCompiler::new(
context
.working_directory
.as_path()
.join("compilation_cache"),
context
.compilation_configuration
.invalidate_compilation_cache,
)
.await
.map(Arc::new)
.context("Failed to initialize cached compiler")?;
// Note: we do not want to run all of the workloads concurrently on all platforms. Rather, we'd
// like to run all of the workloads for one platform, and then the next sequentially as we'd
// like for the effect of concurrency to be minimized when we're doing the benchmarking.
for platform in platforms.iter() {
let platform_identifier = platform.platform_identifier();
let span = info_span!("Benchmarking for the platform", %platform_identifier);
let _guard = span.enter();
for test_definition in test_definitions.iter() {
let platform_information = &test_definition.platforms[&platform_identifier];
let span = info_span!(
"Executing workload",
metadata_file_path = %test_definition.metadata_file_path.display(),
case_idx = %test_definition.case_idx,
mode = %test_definition.mode,
);
let _guard = span.enter();
// Initializing all of the components requires to execute this particular workload.
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
context.wallet_configuration.highest_private_key_exclusive(),
)));
let (watcher, watcher_tx) = Watcher::new(
platform_identifier,
platform_information
.node
.subscribe_to_full_blocks_information()
.await
.context("Failed to subscribe to full blocks information from the node")?,
);
let driver = Driver::new(
platform_information,
test_definition,
private_key_allocator,
cached_compiler.as_ref(),
watcher_tx.clone(),
test_definition
.case
.steps_iterator_for_benchmarks(context.default_repetition_count)
.enumerate()
.map(|(step_idx, step)| -> (StepPath, Step) {
(StepPath::new(vec![StepIdx::new(step_idx)]), step)
}),
)
.await
.context("Failed to create the benchmarks driver")?;
futures::future::try_join(
watcher.run(),
driver.execute_all().inspect(|_| {
info!("All transactions submitted - driver completed execution");
watcher_tx
.send(WatcherEvent::AllTransactionsSubmitted)
.unwrap()
}),
)
.await
.context("Failed to run the driver and executor")
.inspect(|(_, steps_executed)| info!(steps_executed, "Workload Execution Succeeded"))
.inspect_err(|err| error!(?err, "Workload Execution Failed"))?;
}
}
Ok(())
}
@@ -0,0 +1,44 @@
use std::{collections::HashMap, path::PathBuf};
use alloy::{
json_abi::JsonAbi,
primitives::{Address, U256},
};
use revive_dt_format::metadata::{ContractIdent, ContractInstance};
#[derive(Clone)]
/// The state associated with the test execution of one of the workloads.
pub struct ExecutionState {
/// The compiled contracts, these contracts have been compiled and have had the libraries
/// linked against them and therefore they're ready to be deployed on-demand.
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
/// A map of all of the deployed contracts and information about them.
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
/// This map stores the variables used for each one of the cases contained in the metadata
/// file.
pub variables: HashMap<String, U256>,
}
impl ExecutionState {
pub fn new(
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
) -> Self {
Self {
compiled_contracts,
deployed_contracts,
variables: Default::default(),
}
}
pub fn empty() -> Self {
Self {
compiled_contracts: Default::default(),
deployed_contracts: Default::default(),
variables: Default::default(),
}
}
}
@@ -0,0 +1,9 @@
mod driver;
mod entry_point;
mod execution_state;
mod watcher;
pub use driver::*;
pub use entry_point::*;
pub use execution_state::*;
pub use watcher::*;
@@ -0,0 +1,207 @@
use std::{collections::HashSet, pin::Pin, sync::Arc};
use alloy::primitives::{BlockNumber, TxHash};
use anyhow::Result;
use futures::{Stream, StreamExt};
use revive_dt_common::types::PlatformIdentifier;
use revive_dt_node_interaction::MinedBlockInformation;
use tokio::sync::{
RwLock,
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
};
use tracing::{info, instrument};
/// This struct defines the watcher used in the benchmarks. A watcher is only valid for 1 workload
/// and MUST NOT be re-used between workloads since it holds important internal state for a given
/// workload and is not designed for reuse.
pub struct Watcher {
/// The identifier of the platform that this watcher is for.
platform_identifier: PlatformIdentifier,
/// The receive side of the channel that all of the drivers and various other parts of the code
/// send events to the watcher on.
rx: UnboundedReceiver<WatcherEvent>,
/// This is a stream of the blocks that were mined by the node. This is for a single platform
/// and a single node from that platform.
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
}
impl Watcher {
pub fn new(
platform_identifier: PlatformIdentifier,
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
) -> (Self, UnboundedSender<WatcherEvent>) {
let (tx, rx) = unbounded_channel::<WatcherEvent>();
(
Self {
platform_identifier,
rx,
blocks_stream,
},
tx,
)
}
#[instrument(level = "info", skip_all)]
pub async fn run(mut self) -> Result<()> {
// The first event that the watcher receives must be a `RepetitionStartEvent` that informs
// the watcher of the last block number that it should ignore and what the block number is
// for the first important block that it should look for.
let ignore_block_before = loop {
let Some(WatcherEvent::RepetitionStartEvent {
ignore_block_before,
}) = self.rx.recv().await
else {
continue;
};
break ignore_block_before;
};
// This is the set of the transaction hashes that the watcher should be looking for and
// watch for them in the blocks. The watcher will keep watching for blocks until it sees
// that all of the transactions that it was watching for has been seen in the mined blocks.
let watch_for_transaction_hashes = Arc::new(RwLock::new(HashSet::<TxHash>::new()));
// A boolean that keeps track of whether all of the transactions were submitted or if more
// txs are expected to come through the receive side of the channel. We do not want to rely
// on the channel closing alone for the watcher to know that all of the transactions were
// submitted and for there to be an explicit event sent by the core orchestrator that
// informs the watcher that no further transactions are to be expected and that it can
// safely ignore the channel.
let all_transactions_submitted = Arc::new(RwLock::new(false));
let watcher_event_watching_task = {
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
let all_transactions_submitted = all_transactions_submitted.clone();
async move {
while let Some(watcher_event) = self.rx.recv().await {
match watcher_event {
// Subsequent repetition starts are ignored since certain workloads can
// contain nested repetitions and therefore there's no use in doing any
// action if the repetitions are nested.
WatcherEvent::RepetitionStartEvent { .. } => {}
WatcherEvent::SubmittedTransaction { transaction_hash } => {
watch_for_transaction_hashes
.write()
.await
.insert(transaction_hash);
}
WatcherEvent::AllTransactionsSubmitted => {
*all_transactions_submitted.write().await = true;
self.rx.close();
info!("Watcher's Events Watching Task Finished");
break;
}
}
}
}
};
let block_information_watching_task = {
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
let all_transactions_submitted = all_transactions_submitted.clone();
let mut blocks_information_stream = self.blocks_stream;
async move {
let mut mined_blocks_information = Vec::new();
while let Some(block) = blocks_information_stream.next().await {
// If the block number is equal to or less than the last block before the
// repetition then we ignore it and continue on to the next block.
if block.block_number <= ignore_block_before {
continue;
}
if *all_transactions_submitted.read().await
&& watch_for_transaction_hashes.read().await.is_empty()
{
break;
}
info!(
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
block_tx_count = block.transaction_hashes.len(),
"Observed a block"
);
// Remove all of the transaction hashes observed in this block from the txs we
// are currently watching for.
let mut watch_for_transaction_hashes =
watch_for_transaction_hashes.write().await;
for tx_hash in block.transaction_hashes.iter() {
watch_for_transaction_hashes.remove(tx_hash);
}
mined_blocks_information.push(block);
}
info!("Watcher's Block Watching Task Finished");
mined_blocks_information
}
};
let (_, mined_blocks_information) =
futures::future::join(watcher_event_watching_task, block_information_watching_task)
.await;
// region:TEMPORARY
{
// TODO: The following core is TEMPORARY and will be removed once we have proper
// reporting in place and then it can be removed. This serves as as way of doing some
// very simple reporting for the time being.
use std::io::Write;
let mut stderr = std::io::stderr().lock();
writeln!(
stderr,
"Watcher information for {}",
self.platform_identifier
)?;
writeln!(
stderr,
"block_number,block_timestamp,mined_gas,block_gas_limit,tx_count"
)?;
for block in mined_blocks_information {
writeln!(
stderr,
"{},{},{},{},{}",
block.block_number,
block.block_timestamp,
block.mined_gas,
block.block_gas_limit,
block.transaction_hashes.len()
)?
}
}
// endregion:TEMPORARY
Ok(())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum WatcherEvent {
/// Informs the watcher that it should begin watching for the blocks mined by the platforms.
/// Before the watcher receives this event it will not be watching for the mined blocks. The
/// reason behind this is that we do not want the initialization transactions (e.g., contract
/// deployments) to be included in the overall TPS and GPS measurements since these blocks will
/// most likely only contain a single transaction since they're just being used for
/// initialization.
RepetitionStartEvent {
/// This is the block number of the last block seen before the repetition started. This is
/// used to instruct the watcher to ignore all block prior to this block when it starts
/// streaming the blocks.
ignore_block_before: BlockNumber,
},
/// Informs the watcher that a transaction was submitted and that the watcher should watch for a
/// transaction with this hash in the blocks that it watches.
SubmittedTransaction {
/// The hash of the submitted transaction.
transaction_hash: TxHash,
},
/// Informs the watcher that all of the transactions of this benchmark have been submitted and
/// that it can expect to receive no further transaction hashes and not even watch the channel
/// any longer.
AllTransactionsSubmitted,
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,277 @@
//! The main entry point into differential testing.
use std::{
collections::{BTreeMap, BTreeSet},
io::{BufWriter, Write, stderr},
sync::Arc,
time::{Duration, Instant},
};
use crate::Platform;
use anyhow::Context as _;
use futures::{FutureExt, StreamExt};
use revive_dt_common::types::PrivateKeyAllocator;
use tokio::sync::{Mutex, RwLock, Semaphore};
use tracing::{Instrument, error, info, info_span, instrument};
use revive_dt_config::{Context, TestExecutionContext};
use revive_dt_report::{Reporter, ReporterEvent, TestCaseStatus};
use crate::{
differential_tests::Driver,
helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream},
};
/// Handles the differential testing executing it according to the information defined in the
/// context
#[instrument(level = "info", err(Debug), skip_all)]
pub async fn handle_differential_tests(
context: TestExecutionContext,
reporter: Reporter,
) -> anyhow::Result<()> {
let reporter_clone = reporter.clone();
// Discover all of the metadata files that are defined in the context.
let metadata_files = collect_metadata_files(&context)
.context("Failed to collect metadata files for differential testing")?;
info!(len = metadata_files.len(), "Discovered metadata files");
// Discover the list of platforms that the tests should run on based on the context.
let platforms = context
.platforms
.iter()
.copied()
.map(Into::<&dyn Platform>::into)
.collect::<Vec<_>>();
// Starting the nodes of the various platforms specified in the context.
let platforms_and_nodes = {
let mut map = BTreeMap::new();
for platform in platforms.iter() {
let platform_identifier = platform.platform_identifier();
let context = Context::Test(Box::new(context.clone()));
let node_pool = NodePool::new(context, *platform)
.await
.inspect_err(|err| {
error!(
?err,
%platform_identifier,
"Failed to initialize the node pool for the platform."
)
})
.context("Failed to initialize the node pool")?;
map.insert(platform_identifier, (*platform, node_pool));
}
map
};
info!("Spawned the platform nodes");
// Preparing test definitions.
let full_context = Context::Test(Box::new(context.clone()));
let test_definitions = create_test_definitions_stream(
&full_context,
metadata_files.iter(),
&platforms_and_nodes,
reporter.clone(),
)
.await
.collect::<Vec<_>>()
.await;
info!(len = test_definitions.len(), "Created test definitions");
// Creating everything else required for the driver to run.
let cached_compiler = CachedCompiler::new(
context
.working_directory
.as_path()
.join("compilation_cache"),
context
.compilation_configuration
.invalidate_compilation_cache,
)
.await
.map(Arc::new)
.context("Failed to initialize cached compiler")?;
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
context.wallet_configuration.highest_private_key_exclusive(),
)));
// Creating the driver and executing all of the steps.
let semaphore = context
.concurrency_configuration
.concurrency_limit()
.map(Semaphore::new)
.map(Arc::new);
let running_task_list = Arc::new(RwLock::new(BTreeSet::<usize>::new()));
let driver_task = futures::future::join_all(test_definitions.iter().enumerate().map(
|(test_id, test_definition)| {
let running_task_list = running_task_list.clone();
let semaphore = semaphore.clone();
let private_key_allocator = private_key_allocator.clone();
let cached_compiler = cached_compiler.clone();
let mode = test_definition.mode.clone();
let span = info_span!(
"Executing Test Case",
test_id,
metadata_file_path = %test_definition.metadata_file_path.display(),
case_idx = %test_definition.case_idx,
mode = %mode,
);
async move {
let permit = match semaphore.as_ref() {
Some(semaphore) => Some(semaphore.acquire().await.expect("Can't fail")),
None => None,
};
running_task_list.write().await.insert(test_id);
let driver = match Driver::new_root(
test_definition,
private_key_allocator,
&cached_compiler,
)
.await
{
Ok(driver) => driver,
Err(error) => {
test_definition
.reporter
.report_test_failed_event(format!("{error:#}"))
.expect("Can't fail");
error!("Test Case Failed");
drop(permit);
running_task_list.write().await.remove(&test_id);
return;
}
};
info!("Created the driver for the test case");
match driver.execute_all().await {
Ok(steps_executed) => test_definition
.reporter
.report_test_succeeded_event(steps_executed)
.expect("Can't fail"),
Err(error) => {
test_definition
.reporter
.report_test_failed_event(format!("{error:#}"))
.expect("Can't fail");
error!("Test Case Failed");
}
};
info!("Finished the execution of the test case");
drop(permit);
running_task_list.write().await.remove(&test_id);
}
.instrument(span)
},
))
.inspect(|_| {
info!("Finished executing all test cases");
reporter_clone
.report_completion_event()
.expect("Can't fail")
});
let cli_reporting_task = start_cli_reporting_task(reporter);
tokio::task::spawn(async move {
loop {
let remaining_tasks = running_task_list.read().await;
info!(
count = remaining_tasks.len(),
?remaining_tasks,
"Remaining Tests"
);
tokio::time::sleep(Duration::from_secs(10)).await
}
});
futures::future::join(driver_task, cli_reporting_task).await;
Ok(())
}
#[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
async fn start_cli_reporting_task(reporter: Reporter) {
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
drop(reporter);
let start = Instant::now();
const GREEN: &str = "\x1B[32m";
const RED: &str = "\x1B[31m";
const GREY: &str = "\x1B[90m";
const COLOR_RESET: &str = "\x1B[0m";
const BOLD: &str = "\x1B[1m";
const BOLD_RESET: &str = "\x1B[22m";
let mut number_of_successes = 0;
let mut number_of_failures = 0;
let mut buf = BufWriter::new(stderr());
while let Ok(event) = aggregator_events_rx.recv().await {
let ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
metadata_file_path,
mode,
case_status,
} = event
else {
continue;
};
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
for (case_idx, case_status) in case_status.into_iter() {
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
let _ = match case_status {
TestCaseStatus::Succeeded { steps_executed } => {
number_of_successes += 1;
writeln!(
buf,
"{}{}Case Succeeded{} - Steps Executed: {}{}",
GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET
)
}
TestCaseStatus::Failed { reason } => {
number_of_failures += 1;
writeln!(
buf,
"{}{}Case Failed{} - Reason: {}{}",
RED,
BOLD,
BOLD_RESET,
reason.trim(),
COLOR_RESET,
)
}
TestCaseStatus::Ignored { reason, .. } => writeln!(
buf,
"{}{}Case Ignored{} - Reason: {}{}",
GREY,
BOLD,
BOLD_RESET,
reason.trim(),
COLOR_RESET,
),
};
}
let _ = writeln!(buf);
}
// Summary at the end.
let _ = writeln!(
buf,
"{} cases: {}{}{} cases succeeded, {}{}{} cases failed in {} seconds",
number_of_successes + number_of_failures,
GREEN,
number_of_successes,
COLOR_RESET,
RED,
number_of_failures,
COLOR_RESET,
start.elapsed().as_secs()
);
}
@@ -0,0 +1,36 @@
use std::{collections::HashMap, path::PathBuf};
use alloy::{
json_abi::JsonAbi,
primitives::{Address, U256},
};
use revive_dt_format::metadata::{ContractIdent, ContractInstance};
#[derive(Clone)]
/// The state associated with the test execution of one of the tests.
pub struct ExecutionState {
/// The compiled contracts, these contracts have been compiled and have had the libraries
/// linked against them and therefore they're ready to be deployed on-demand.
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
/// A map of all of the deployed contracts and information about them.
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
/// This map stores the variables used for each one of the cases contained in the metadata
/// file.
pub variables: HashMap<String, U256>,
}
impl ExecutionState {
pub fn new(
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
) -> Self {
Self {
compiled_contracts,
deployed_contracts,
variables: Default::default(),
}
}
}
+11
View File
@@ -0,0 +1,11 @@
//! This module contains all of the code responsible for performing differential tests including the
//! driver implementation, state implementation, and the core logic that allows for tests to be
//! executed.
mod driver;
mod entry_point;
mod execution_state;
pub use driver::*;
pub use entry_point::*;
pub use execution_state::*;
-900
View File
@@ -1,900 +0,0 @@
//! The test driver handles the compilation and execution of the test cases.
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use alloy::consensus::EMPTY_ROOT_HASH;
use alloy::hex;
use alloy::json_abi::JsonAbi;
use alloy::network::{Ethereum, TransactionBuilder};
use alloy::primitives::{TxHash, U256};
use alloy::rpc::types::TransactionReceipt;
use alloy::rpc::types::trace::geth::{
CallFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType,
GethDebugTracingOptions, GethTrace, PreStateConfig,
};
use alloy::{
primitives::Address,
rpc::types::{TransactionRequest, trace::geth::DiffMode},
};
use anyhow::{Context as _, bail};
use futures::{TryStreamExt, future::try_join_all};
use indexmap::IndexMap;
use revive_dt_common::types::{PlatformIdentifier, PrivateKeyAllocator};
use revive_dt_format::traits::{ResolutionContext, ResolverApi};
use revive_dt_report::ExecutionSpecificReporter;
use semver::Version;
use revive_dt_format::case::Case;
use revive_dt_format::metadata::{ContractIdent, ContractInstance, ContractPathAndIdent};
use revive_dt_format::steps::{
BalanceAssertionStep, Calldata, EtherValue, Expected, ExpectedOutput, FunctionCallStep, Method,
StepIdx, StepPath, StorageEmptyAssertionStep,
};
use revive_dt_format::{metadata::Metadata, steps::Step};
use revive_dt_node_interaction::EthereumNode;
use tokio::sync::Mutex;
use tokio::try_join;
use tracing::{Instrument, info, info_span, instrument};
#[derive(Clone)]
pub struct CaseState {
/// A map of all of the compiled contracts for the given metadata file.
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
/// This map stores the contracts deployments for this case.
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
/// This map stores the variables used for each one of the cases contained in the metadata
/// file.
variables: HashMap<String, U256>,
/// Stores the version used for the current case.
compiler_version: Version,
/// The execution reporter.
execution_reporter: ExecutionSpecificReporter,
/// The private key allocator used for this case state. This is an Arc Mutex to allow for the
/// state to be cloned and for all of the clones to refer to the same allocator.
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
}
impl CaseState {
pub fn new(
compiler_version: Version,
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
execution_reporter: ExecutionSpecificReporter,
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
) -> Self {
Self {
compiled_contracts,
deployed_contracts,
variables: Default::default(),
compiler_version,
execution_reporter,
private_key_allocator,
}
}
pub async fn handle_step(
&mut self,
metadata: &Metadata,
step: &Step,
step_path: &StepPath,
node: &dyn EthereumNode,
) -> anyhow::Result<StepOutput> {
match step {
Step::FunctionCall(input) => {
let (receipt, geth_trace, diff_mode) = self
.handle_input(metadata, input, node)
.await
.context("Failed to handle function call step")?;
Ok(StepOutput::FunctionCall(receipt, geth_trace, diff_mode))
}
Step::BalanceAssertion(balance_assertion) => {
self.handle_balance_assertion(metadata, balance_assertion, node)
.await
.context("Failed to handle balance assertion step")?;
Ok(StepOutput::BalanceAssertion)
}
Step::StorageEmptyAssertion(storage_empty) => {
self.handle_storage_empty(metadata, storage_empty, node)
.await
.context("Failed to handle storage empty assertion step")?;
Ok(StepOutput::StorageEmptyAssertion)
}
Step::Repeat(repetition_step) => {
self.handle_repeat(
metadata,
repetition_step.repeat,
&repetition_step.steps,
step_path,
node,
)
.await
.context("Failed to handle the repetition step")?;
Ok(StepOutput::Repetition)
}
Step::AllocateAccount(account_allocation) => {
self.handle_account_allocation(account_allocation.variable_name.as_str())
.await
.context("Failed to allocate account")?;
Ok(StepOutput::AccountAllocation)
}
}
.inspect(|_| info!("Step Succeeded"))
}
#[instrument(level = "info", name = "Handling Input", skip_all)]
pub async fn handle_input(
&mut self,
metadata: &Metadata,
input: &FunctionCallStep,
node: &dyn EthereumNode,
) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> {
let resolver = node.resolver().await?;
let deployment_receipts = self
.handle_input_contract_deployment(metadata, input, node)
.await
.context("Failed during contract deployment phase of input handling")?;
let execution_receipt = self
.handle_input_execution(input, deployment_receipts, node)
.await
.context("Failed during transaction execution phase of input handling")?;
let tracing_result = self
.handle_input_call_frame_tracing(execution_receipt.transaction_hash, node)
.await
.context("Failed during callframe tracing phase of input handling")?;
self.handle_input_variable_assignment(input, &tracing_result)
.context("Failed to assign variables from callframe output")?;
let (_, (geth_trace, diff_mode)) = try_join!(
self.handle_input_expectations(
input,
&execution_receipt,
resolver.as_ref(),
&tracing_result
),
self.handle_input_diff(execution_receipt.transaction_hash, node)
)
.context("Failed while evaluating expectations and diffs in parallel")?;
Ok((execution_receipt, geth_trace, diff_mode))
}
#[instrument(level = "info", name = "Handling Balance Assertion", skip_all)]
pub async fn handle_balance_assertion(
&mut self,
metadata: &Metadata,
balance_assertion: &BalanceAssertionStep,
node: &dyn EthereumNode,
) -> anyhow::Result<()> {
self.handle_balance_assertion_contract_deployment(metadata, balance_assertion, node)
.await
.context("Failed to deploy contract for balance assertion")?;
self.handle_balance_assertion_execution(balance_assertion, node)
.await
.context("Failed to execute balance assertion")?;
Ok(())
}
#[instrument(level = "info", name = "Handling Storage Assertion", skip_all)]
pub async fn handle_storage_empty(
&mut self,
metadata: &Metadata,
storage_empty: &StorageEmptyAssertionStep,
node: &dyn EthereumNode,
) -> anyhow::Result<()> {
self.handle_storage_empty_assertion_contract_deployment(metadata, storage_empty, node)
.await
.context("Failed to deploy contract for storage empty assertion")?;
self.handle_storage_empty_assertion_execution(storage_empty, node)
.await
.context("Failed to execute storage empty assertion")?;
Ok(())
}
#[instrument(level = "info", name = "Handling Repetition", skip_all)]
pub async fn handle_repeat(
&mut self,
metadata: &Metadata,
repetitions: usize,
steps: &[Step],
step_path: &StepPath,
node: &dyn EthereumNode,
) -> anyhow::Result<()> {
let tasks = (0..repetitions).map(|_| {
let mut state = self.clone();
async move {
for (step_idx, step) in steps.iter().enumerate() {
let step_path = step_path.append(step_idx);
state.handle_step(metadata, step, &step_path, node).await?;
}
Ok::<(), anyhow::Error>(())
}
});
try_join_all(tasks).await?;
Ok(())
}
#[instrument(level = "info", name = "Handling Account Allocation", skip_all)]
pub async fn handle_account_allocation(&mut self, variable_name: &str) -> anyhow::Result<()> {
let Some(variable_name) = variable_name.strip_prefix("$VARIABLE:") else {
bail!("Account allocation must start with $VARIABLE:");
};
let private_key = self.private_key_allocator.lock().await.allocate()?;
let account = private_key.address();
let variable = U256::from_be_slice(account.0.as_slice());
self.variables.insert(variable_name.to_string(), variable);
Ok(())
}
/// Handles the contract deployment for a given input performing it if it needs to be performed.
#[instrument(level = "info", skip_all)]
async fn handle_input_contract_deployment(
&mut self,
metadata: &Metadata,
input: &FunctionCallStep,
node: &dyn EthereumNode,
) -> anyhow::Result<HashMap<ContractInstance, TransactionReceipt>> {
let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new();
for instance in input.find_all_contract_instances().into_iter() {
if !self.deployed_contracts.contains_key(&instance) {
instances_we_must_deploy.entry(instance).or_insert(false);
}
}
if let Method::Deployer = input.method {
instances_we_must_deploy.swap_remove(&input.instance);
instances_we_must_deploy.insert(input.instance.clone(), true);
}
let mut receipts = HashMap::new();
for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() {
let calldata = deploy_with_constructor_arguments.then_some(&input.calldata);
let value = deploy_with_constructor_arguments
.then_some(input.value)
.flatten();
let caller = {
let context = self.default_resolution_context();
let resolver = node.resolver().await?;
input
.caller
.resolve_address(resolver.as_ref(), context)
.await?
};
if let (_, _, Some(receipt)) = self
.get_or_deploy_contract_instance(&instance, metadata, caller, calldata, value, node)
.await
.context("Failed to get or deploy contract instance during input execution")?
{
receipts.insert(instance.clone(), receipt);
}
}
Ok(receipts)
}
/// Handles the execution of the input in terms of the calls that need to be made.
#[instrument(level = "info", skip_all)]
async fn handle_input_execution(
&mut self,
input: &FunctionCallStep,
mut deployment_receipts: HashMap<ContractInstance, TransactionReceipt>,
node: &dyn EthereumNode,
) -> anyhow::Result<TransactionReceipt> {
match input.method {
// This input was already executed when `handle_input` was called. We just need to
// lookup the transaction receipt in this case and continue on.
Method::Deployer => deployment_receipts
.remove(&input.instance)
.context("Failed to find deployment receipt for constructor call"),
Method::Fallback | Method::FunctionName(_) => {
let resolver = node.resolver().await?;
let tx = match input
.legacy_transaction(resolver.as_ref(), self.default_resolution_context())
.await
{
Ok(tx) => tx,
Err(err) => {
return Err(err);
}
};
match node.execute_transaction(tx).await {
Ok(receipt) => Ok(receipt),
Err(err) => Err(err),
}
}
}
}
#[instrument(level = "info", skip_all)]
async fn handle_input_call_frame_tracing(
&self,
tx_hash: TxHash,
node: &dyn EthereumNode,
) -> anyhow::Result<CallFrame> {
node.trace_transaction(
tx_hash,
GethDebugTracingOptions {
tracer: Some(GethDebugTracerType::BuiltInTracer(
GethDebugBuiltInTracerType::CallTracer,
)),
tracer_config: GethDebugTracerConfig(serde_json::json! {{
"onlyTopCall": true,
"withLog": false,
"withStorage": false,
"withMemory": false,
"withStack": false,
"withReturnData": true
}}),
..Default::default()
},
)
.await
.map(|trace| {
trace
.try_into_call_frame()
.expect("Impossible - we requested a callframe trace so we must get it back")
})
}
#[instrument(level = "info", skip_all)]
fn handle_input_variable_assignment(
&mut self,
input: &FunctionCallStep,
tracing_result: &CallFrame,
) -> anyhow::Result<()> {
let Some(ref assignments) = input.variable_assignments else {
return Ok(());
};
// Handling the return data variable assignments.
for (variable_name, output_word) in assignments.return_data.iter().zip(
tracing_result
.output
.as_ref()
.unwrap_or_default()
.to_vec()
.chunks(32),
) {
let value = U256::from_be_slice(output_word);
self.variables.insert(variable_name.clone(), value);
tracing::info!(
variable_name,
variable_value = hex::encode(value.to_be_bytes::<32>()),
"Assigned variable"
);
}
Ok(())
}
#[instrument(level = "info", skip_all)]
async fn handle_input_expectations(
&self,
input: &FunctionCallStep,
execution_receipt: &TransactionReceipt,
resolver: &(impl ResolverApi + ?Sized),
tracing_result: &CallFrame,
) -> anyhow::Result<()> {
// Resolving the `input.expected` into a series of expectations that we can then assert on.
let mut expectations = match input {
FunctionCallStep {
expected: Some(Expected::Calldata(calldata)),
..
} => vec![ExpectedOutput::new().with_calldata(calldata.clone())],
FunctionCallStep {
expected: Some(Expected::Expected(expected)),
..
} => vec![expected.clone()],
FunctionCallStep {
expected: Some(Expected::ExpectedMany(expected)),
..
} => expected.clone(),
FunctionCallStep { expected: None, .. } => vec![ExpectedOutput::new().with_success()],
};
// This is a bit of a special case and we have to support it separately on it's own. If it's
// a call to the deployer method, then the tests will assert that it "returns" the address
// of the contract. Deployments do not return the address of the contract but the runtime
// code of the contracts. Therefore, this assertion would always fail. So, we replace it
// with an assertion of "check if it succeeded"
if let Method::Deployer = &input.method {
for expectation in expectations.iter_mut() {
expectation.return_data = None;
}
}
futures::stream::iter(expectations.into_iter().map(Ok))
.try_for_each_concurrent(None, |expectation| async move {
self.handle_input_expectation_item(
execution_receipt,
resolver,
expectation,
tracing_result,
)
.await
})
.await
}
#[instrument(level = "info", skip_all)]
async fn handle_input_expectation_item(
&self,
execution_receipt: &TransactionReceipt,
resolver: &(impl ResolverApi + ?Sized),
expectation: ExpectedOutput,
tracing_result: &CallFrame,
) -> anyhow::Result<()> {
if let Some(ref version_requirement) = expectation.compiler_version {
if !version_requirement.matches(&self.compiler_version) {
return Ok(());
}
}
let resolution_context = self
.default_resolution_context()
.with_block_number(execution_receipt.block_number.as_ref())
.with_transaction_hash(&execution_receipt.transaction_hash);
// Handling the receipt state assertion.
let expected = !expectation.exception;
let actual = execution_receipt.status();
if actual != expected {
tracing::error!(
expected,
actual,
?execution_receipt,
?tracing_result,
"Transaction status assertion failed"
);
anyhow::bail!(
"Transaction status assertion failed - Expected {expected} but got {actual}",
);
}
// Handling the calldata assertion
if let Some(ref expected_calldata) = expectation.return_data {
let expected = expected_calldata;
let actual = &tracing_result.output.as_ref().unwrap_or_default();
if !expected
.is_equivalent(actual, resolver, resolution_context)
.await
.context("Failed to resolve calldata equivalence for return data assertion")?
{
tracing::error!(
?execution_receipt,
?expected,
%actual,
"Calldata assertion failed"
);
anyhow::bail!("Calldata assertion failed - Expected {expected:?} but got {actual}",);
}
}
// Handling the events assertion
if let Some(ref expected_events) = expectation.events {
// Handling the events length assertion.
let expected = expected_events.len();
let actual = execution_receipt.logs().len();
if actual != expected {
tracing::error!(expected, actual, "Event count assertion failed",);
anyhow::bail!(
"Event count assertion failed - Expected {expected} but got {actual}",
);
}
// Handling the events assertion.
for (event_idx, (expected_event, actual_event)) in expected_events
.iter()
.zip(execution_receipt.logs())
.enumerate()
{
// Handling the emitter assertion.
if let Some(ref expected_address) = expected_event.address {
let expected = expected_address
.resolve_address(resolver, resolution_context)
.await?;
let actual = actual_event.address();
if actual != expected {
tracing::error!(
event_idx,
%expected,
%actual,
"Event emitter assertion failed",
);
anyhow::bail!(
"Event emitter assertion failed - Expected {expected} but got {actual}",
);
}
}
// Handling the topics assertion.
for (expected, actual) in expected_event
.topics
.as_slice()
.iter()
.zip(actual_event.topics())
{
let expected = Calldata::new_compound([expected]);
if !expected
.is_equivalent(&actual.0, resolver, resolution_context)
.await
.context("Failed to resolve event topic equivalence")?
{
tracing::error!(
event_idx,
?execution_receipt,
?expected,
?actual,
"Event topics assertion failed",
);
anyhow::bail!(
"Event topics assertion failed - Expected {expected:?} but got {actual:?}",
);
}
}
// Handling the values assertion.
let expected = &expected_event.values;
let actual = &actual_event.data().data;
if !expected
.is_equivalent(&actual.0, resolver, resolution_context)
.await
.context("Failed to resolve event value equivalence")?
{
tracing::error!(
event_idx,
?execution_receipt,
?expected,
?actual,
"Event value assertion failed",
);
anyhow::bail!(
"Event value assertion failed - Expected {expected:?} but got {actual:?}",
);
}
}
}
Ok(())
}
#[instrument(level = "info", skip_all)]
async fn handle_input_diff(
&self,
tx_hash: TxHash,
node: &dyn EthereumNode,
) -> anyhow::Result<(GethTrace, DiffMode)> {
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
diff_mode: Some(true),
disable_code: None,
disable_storage: None,
});
let trace = node
.trace_transaction(tx_hash, trace_options)
.await
.context("Failed to obtain geth prestate tracer output")?;
let diff = node
.state_diff(tx_hash)
.await
.context("Failed to obtain state diff for transaction")?;
Ok((trace, diff))
}
#[instrument(level = "info", skip_all)]
pub async fn handle_balance_assertion_contract_deployment(
&mut self,
metadata: &Metadata,
balance_assertion: &BalanceAssertionStep,
node: &dyn EthereumNode,
) -> anyhow::Result<()> {
let Some(address) = balance_assertion.address.as_resolvable_address() else {
return Ok(());
};
let Some(instance) = address.strip_suffix(".address").map(ContractInstance::new) else {
return Ok(());
};
self.get_or_deploy_contract_instance(
&instance,
metadata,
FunctionCallStep::default_caller_address(),
None,
None,
node,
)
.await?;
Ok(())
}
#[instrument(level = "info", skip_all)]
pub async fn handle_balance_assertion_execution(
&mut self,
BalanceAssertionStep {
address,
expected_balance: amount,
..
}: &BalanceAssertionStep,
node: &dyn EthereumNode,
) -> anyhow::Result<()> {
let resolver = node.resolver().await?;
let address = address
.resolve_address(resolver.as_ref(), self.default_resolution_context())
.await?;
let balance = node.balance_of(address).await?;
let expected = *amount;
let actual = balance;
if expected != actual {
tracing::error!(%expected, %actual, %address, "Balance assertion failed");
anyhow::bail!(
"Balance assertion failed - Expected {} but got {} for {} resolved to {}",
expected,
actual,
address,
address,
)
}
Ok(())
}
#[instrument(level = "info", skip_all)]
pub async fn handle_storage_empty_assertion_contract_deployment(
&mut self,
metadata: &Metadata,
storage_empty_assertion: &StorageEmptyAssertionStep,
node: &dyn EthereumNode,
) -> anyhow::Result<()> {
let Some(address) = storage_empty_assertion.address.as_resolvable_address() else {
return Ok(());
};
let Some(instance) = address.strip_suffix(".address").map(ContractInstance::new) else {
return Ok(());
};
self.get_or_deploy_contract_instance(
&instance,
metadata,
FunctionCallStep::default_caller_address(),
None,
None,
node,
)
.await?;
Ok(())
}
#[instrument(level = "info", skip_all)]
pub async fn handle_storage_empty_assertion_execution(
&mut self,
StorageEmptyAssertionStep {
address,
is_storage_empty,
..
}: &StorageEmptyAssertionStep,
node: &dyn EthereumNode,
) -> anyhow::Result<()> {
let resolver = node.resolver().await?;
let address = address
.resolve_address(resolver.as_ref(), self.default_resolution_context())
.await?;
let storage = node.latest_state_proof(address, Default::default()).await?;
let is_empty = storage.storage_hash == EMPTY_ROOT_HASH;
let expected = is_storage_empty;
let actual = is_empty;
if *expected != actual {
tracing::error!(%expected, %actual, %address, "Storage Empty Assertion failed");
anyhow::bail!(
"Storage Empty Assertion failed - Expected {} but got {} for {} resolved to {}",
expected,
actual,
address,
address,
)
};
Ok(())
}
/// Gets the information of a deployed contract or library from the state. If it's found to not
/// be deployed then it will be deployed.
///
/// If a [`CaseIdx`] is not specified then this contact instance address will be stored in the
/// cross-case deployed contracts address mapping.
#[allow(clippy::too_many_arguments)]
pub async fn get_or_deploy_contract_instance(
&mut self,
contract_instance: &ContractInstance,
metadata: &Metadata,
deployer: Address,
calldata: Option<&Calldata>,
value: Option<EtherValue>,
node: &dyn EthereumNode,
) -> anyhow::Result<(Address, JsonAbi, Option<TransactionReceipt>)> {
if let Some((_, address, abi)) = self.deployed_contracts.get(contract_instance) {
return Ok((*address, abi.clone(), None));
}
let Some(ContractPathAndIdent {
contract_source_path,
contract_ident,
}) = metadata.contract_sources()?.remove(contract_instance)
else {
anyhow::bail!(
"Contract source not found for instance {:?}",
contract_instance
)
};
let Some((code, abi)) = self
.compiled_contracts
.get(&contract_source_path)
.and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref()))
.cloned()
else {
anyhow::bail!(
"Failed to find information for contract {:?}",
contract_instance
)
};
let mut code = match alloy::hex::decode(&code) {
Ok(code) => code,
Err(error) => {
tracing::error!(
?error,
contract_source_path = contract_source_path.display().to_string(),
contract_ident = contract_ident.as_ref(),
"Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking"
);
anyhow::bail!("Failed to hex-decode the byte code {}", error)
}
};
if let Some(calldata) = calldata {
let resolver = node.resolver().await?;
let calldata = calldata
.calldata(resolver.as_ref(), self.default_resolution_context())
.await?;
code.extend(calldata);
}
let tx = {
let tx = TransactionRequest::default().from(deployer);
let tx = match value {
Some(ref value) => tx.value(value.into_inner()),
_ => tx,
};
TransactionBuilder::<Ethereum>::with_deploy_code(tx, code)
};
let receipt = match node.execute_transaction(tx).await {
Ok(receipt) => receipt,
Err(error) => {
tracing::error!(?error, "Contract deployment transaction failed.");
return Err(error);
}
};
let Some(address) = receipt.contract_address else {
anyhow::bail!("Contract deployment didn't return an address");
};
tracing::info!(
instance_name = ?contract_instance,
instance_address = ?address,
"Deployed contract"
);
self.execution_reporter
.report_contract_deployed_event(contract_instance.clone(), address)?;
self.deployed_contracts.insert(
contract_instance.clone(),
(contract_ident, address, abi.clone()),
);
Ok((address, abi, Some(receipt)))
}
fn default_resolution_context(&self) -> ResolutionContext<'_> {
ResolutionContext::default()
.with_deployed_contracts(&self.deployed_contracts)
.with_variables(&self.variables)
}
}
pub struct CaseDriver<'a> {
metadata: &'a Metadata,
case: &'a Case,
platform_state: Vec<(&'a dyn EthereumNode, PlatformIdentifier, CaseState)>,
}
impl<'a> CaseDriver<'a> {
#[allow(clippy::too_many_arguments)]
pub fn new(
metadata: &'a Metadata,
case: &'a Case,
platform_state: Vec<(&'a dyn EthereumNode, PlatformIdentifier, CaseState)>,
) -> CaseDriver<'a> {
Self {
metadata,
case,
platform_state,
}
}
#[instrument(level = "info", name = "Executing Case", skip_all)]
pub async fn execute(&mut self) -> anyhow::Result<usize> {
let mut steps_executed = 0;
for (step_idx, step) in self
.case
.steps_iterator()
.enumerate()
.map(|(idx, v)| (StepIdx::new(idx), v))
{
let metadata = self.metadata;
let step_futures =
self.platform_state
.iter_mut()
.map(|(node, platform_id, case_state)| {
let platform_id = *platform_id;
let node_ref = *node;
let step = step.clone();
let span = info_span!(
"Handling Step",
%step_idx,
platform = %platform_id,
);
async move {
let step_path = StepPath::from_iterator([step_idx]);
case_state
.handle_step(metadata, &step, &step_path, node_ref)
.await
.map_err(|e| (platform_id, e))
}
.instrument(span)
});
match try_join_all(step_futures).await {
Ok(_outputs) => {
steps_executed += 1;
}
Err((platform_id, error)) => {
tracing::error!(
%step_idx,
platform = %platform_id,
?error,
"Step failed on platform",
);
return Err(error);
}
}
}
Ok(steps_executed)
}
}
#[derive(Clone, Debug)]
#[allow(clippy::large_enum_variant)]
pub enum StepOutput {
FunctionCall(TransactionReceipt, GethTrace, DiffMode),
BalanceAssertion,
StorageEmptyAssertion,
Repetition,
AccountAllocation,
}
@@ -5,13 +5,13 @@ use std::{
borrow::Cow,
collections::HashMap,
path::{Path, PathBuf},
sync::Arc,
sync::{Arc, LazyLock},
};
use crate::Platform;
use futures::FutureExt;
use revive_dt_common::{iterators::FilesWithExtensionIterator, types::CompilerIdentifier};
use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler};
use revive_dt_core::Platform;
use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata};
use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address};
@@ -19,7 +19,7 @@ use anyhow::{Context as _, Error, Result};
use revive_dt_report::ExecutionSpecificReporter;
use semver::Version;
use serde::{Deserialize, Serialize};
use tokio::sync::{Mutex, RwLock};
use tokio::sync::{Mutex, RwLock, Semaphore};
use tracing::{Instrument, debug, debug_span, instrument};
pub struct CachedCompiler<'a> {
@@ -165,10 +165,22 @@ impl<'a> CachedCompiler<'a> {
cache_value.compiler_output
}
None => {
compilation_callback()
let compiler_output = compilation_callback()
.await
.context("Compilation callback failed (cache miss path)")?
.compiler_output
.compiler_output;
self.artifacts_cache
.insert(
&cache_key,
&CacheValue {
compiler_output: compiler_output.clone(),
},
)
.await
.context(
"Failed to write the cached value of the compilation artifacts",
)?;
compiler_output
}
}
}
@@ -186,6 +198,12 @@ async fn compile_contracts(
compiler: &dyn SolidityCompiler,
reporter: &ExecutionSpecificReporter,
) -> Result<CompilerOutput> {
// Puts a limit on how many compilations we can perform at any given instance which helps us
// with some of the errors we've been seeing with high concurrency on MacOS (we have not tried
// it on Linux so we don't know if these issues also persist there or not.)
static SPAWN_GATE: LazyLock<Semaphore> = LazyLock::new(|| Semaphore::new(5));
let _permit = SPAWN_GATE.acquire().await?;
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
.with_allowed_extension("sol")
.with_use_cached_fs(true)
+33
View File
@@ -0,0 +1,33 @@
use revive_dt_config::CorpusConfiguration;
use revive_dt_format::{corpus::Corpus, metadata::MetadataFile};
use tracing::{info, info_span, instrument};
/// Given an object that implements [`AsRef<CorpusConfiguration>`], this function finds all of the
/// corpus files and produces a map containing all of the [`MetadataFile`]s discovered.
#[instrument(level = "debug", name = "Collecting Corpora", skip_all)]
pub fn collect_metadata_files(
context: impl AsRef<CorpusConfiguration>,
) -> anyhow::Result<Vec<MetadataFile>> {
let mut metadata_files = Vec::new();
let corpus_configuration = AsRef::<CorpusConfiguration>::as_ref(&context);
for path in &corpus_configuration.paths {
let span = info_span!("Processing corpus file", path = %path.display());
let _guard = span.enter();
let corpus = Corpus::try_from_path(path)?;
info!(
name = corpus.name(),
number_of_contained_paths = corpus.path_count(),
"Deserialized corpus file"
);
metadata_files.extend(corpus.enumerate_tests());
}
// There's a possibility that there are certain paths that all lead to the same metadata files
// and therefore it's important that we sort them and then deduplicate them.
metadata_files.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
metadata_files.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
Ok(metadata_files)
}
+9
View File
@@ -0,0 +1,9 @@
mod cached_compiler;
mod metadata;
mod pool;
mod test;
pub use cached_compiler::*;
pub use metadata::*;
pub use pool::*;
pub use test::*;
@@ -2,9 +2,9 @@
use std::sync::atomic::{AtomicUsize, Ordering};
use crate::Platform;
use anyhow::Context as _;
use revive_dt_config::*;
use revive_dt_core::Platform;
use revive_dt_node_interaction::EthereumNode;
/// The node pool starts one or more [Node] which then can be accessed
@@ -16,7 +16,7 @@ pub struct NodePool {
impl NodePool {
/// Create a new Pool. This will start as many nodes as there are workers in `config`.
pub fn new(context: Context, platform: &dyn Platform) -> anyhow::Result<Self> {
pub async fn new(context: Context, platform: &dyn Platform) -> anyhow::Result<Self> {
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
let nodes = concurrency_configuration.number_of_nodes;
@@ -33,11 +33,18 @@ impl NodePool {
.join()
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))
.context("Failed to join node spawn thread")?
.map_err(|error| anyhow::anyhow!("node failed to spawn: {error}"))
.context("Node failed to spawn")?,
);
}
let pre_transactions_tasks = nodes
.iter_mut()
.map(|node| node.pre_transactions())
.collect::<Vec<_>>();
futures::future::try_join_all(pre_transactions_tasks)
.await
.context("Failed to run the pre-transactions task")?;
Ok(Self {
nodes,
next: Default::default(),
+319
View File
@@ -0,0 +1,319 @@
use std::{borrow::Cow, collections::BTreeMap, path::Path, sync::Arc};
use futures::{Stream, StreamExt, stream};
use indexmap::{IndexMap, indexmap};
use revive_dt_common::{iterators::EitherIter, types::PlatformIdentifier};
use revive_dt_config::Context;
use revive_dt_format::mode::ParsedMode;
use serde_json::{Value, json};
use revive_dt_compiler::{Mode, SolidityCompiler};
use revive_dt_format::{
case::{Case, CaseIdx},
metadata::MetadataFile,
};
use revive_dt_node_interaction::EthereumNode;
use revive_dt_report::{ExecutionSpecificReporter, Reporter, TestSpecificReporter, TestSpecifier};
use tracing::{debug, error, info};
use crate::{Platform, helpers::NodePool};
pub async fn create_test_definitions_stream<'a>(
// This is only required for creating the compiler objects and is not used anywhere else in the
// function.
context: &Context,
metadata_files: impl IntoIterator<Item = &'a MetadataFile>,
platforms_and_nodes: &'a BTreeMap<PlatformIdentifier, (&dyn Platform, NodePool)>,
reporter: Reporter,
) -> impl Stream<Item = TestDefinition<'a>> {
stream::iter(
metadata_files
.into_iter()
// Flatten over the cases.
.flat_map(|metadata_file| {
metadata_file
.cases
.iter()
.enumerate()
.map(move |(case_idx, case)| (metadata_file, case_idx, case))
})
// Flatten over the modes, prefer the case modes over the metadata file modes.
.flat_map(move |(metadata_file, case_idx, case)| {
let reporter = reporter.clone();
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
let modes = match modes {
Some(modes) => EitherIter::A(
ParsedMode::many_to_modes(modes.iter()).map(Cow::<'static, _>::Owned),
),
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
};
modes.into_iter().map(move |mode| {
(
metadata_file,
case_idx,
case,
mode.clone(),
reporter.test_specific_reporter(Arc::new(TestSpecifier {
solc_mode: mode.as_ref().clone(),
metadata_file_path: metadata_file.metadata_file_path.clone(),
case_idx: CaseIdx::new(case_idx),
})),
)
})
})
// Inform the reporter of each one of the test cases that were discovered which we
// expect to run.
.inspect(|(_, _, _, _, reporter)| {
reporter
.report_test_case_discovery_event()
.expect("Can't fail");
}),
)
// Creating the Test Definition objects from all of the various objects we have and creating
// their required dependencies (e.g., compiler).
.filter_map(
move |(metadata_file, case_idx, case, mode, reporter)| async move {
let mut platforms = BTreeMap::new();
for (platform, node_pool) in platforms_and_nodes.values() {
let node = node_pool.round_robbin();
let compiler = platform
.new_compiler(context.clone(), mode.version.clone().map(Into::into))
.await
.inspect_err(|err| {
error!(
?err,
platform_identifier = %platform.platform_identifier(),
"Failed to instantiate the compiler"
)
})
.ok()?;
reporter
.report_node_assigned_event(
node.id(),
platform.platform_identifier(),
node.connection_string(),
)
.expect("Can't fail");
let reporter =
reporter.execution_specific_reporter(node.id(), platform.platform_identifier());
platforms.insert(
platform.platform_identifier(),
TestPlatformInformation {
platform: *platform,
node,
compiler,
reporter,
},
);
}
Some(TestDefinition {
/* Metadata file information */
metadata: metadata_file,
metadata_file_path: metadata_file.metadata_file_path.as_path(),
/* Mode Information */
mode: mode.clone(),
/* Case Information */
case_idx: CaseIdx::new(case_idx),
case,
/* Platform and Node Assignment Information */
platforms,
/* Reporter */
reporter,
})
},
)
// Filter out the test cases which are incompatible or that can't run in the current setup.
.filter_map(move |test| async move {
match test.check_compatibility() {
Ok(()) => Some(test),
Err((reason, additional_information)) => {
debug!(
metadata_file_path = %test.metadata.metadata_file_path.display(),
case_idx = %test.case_idx,
mode = %test.mode,
reason,
additional_information =
serde_json::to_string(&additional_information).unwrap(),
"Ignoring Test Case"
);
test.reporter
.report_test_ignored_event(
reason.to_string(),
additional_information
.into_iter()
.map(|(k, v)| (k.into(), v))
.collect::<IndexMap<_, _>>(),
)
.expect("Can't fail");
None
}
}
})
.inspect(|test| {
info!(
metadata_file_path = %test.metadata_file_path.display(),
case_idx = %test.case_idx,
mode = %test.mode,
"Created a test case definition"
);
})
}
/// This is a full description of a differential test to run alongside the full metadata file, the
/// specific case to be tested, the platforms that the tests should run on, the specific nodes of
/// these platforms that they should run on, the compilers to use, and everything else needed making
/// it a complete description.
pub struct TestDefinition<'a> {
/* Metadata file information */
pub metadata: &'a MetadataFile,
pub metadata_file_path: &'a Path,
/* Mode Information */
pub mode: Cow<'a, Mode>,
/* Case Information */
pub case_idx: CaseIdx,
pub case: &'a Case,
/* Platform and Node Assignment Information */
pub platforms: BTreeMap<PlatformIdentifier, TestPlatformInformation<'a>>,
/* Reporter */
pub reporter: TestSpecificReporter,
}
impl<'a> TestDefinition<'a> {
/// Checks if this test can be ran with the current configuration.
pub fn check_compatibility(&self) -> TestCheckFunctionResult {
self.check_metadata_file_ignored()?;
self.check_case_file_ignored()?;
self.check_target_compatibility()?;
self.check_evm_version_compatibility()?;
self.check_compiler_compatibility()?;
Ok(())
}
/// Checks if the metadata file is ignored or not.
fn check_metadata_file_ignored(&self) -> TestCheckFunctionResult {
if self.metadata.ignore.is_some_and(|ignore| ignore) {
Err(("Metadata file is ignored.", indexmap! {}))
} else {
Ok(())
}
}
/// Checks if the case file is ignored or not.
fn check_case_file_ignored(&self) -> TestCheckFunctionResult {
if self.case.ignore.is_some_and(|ignore| ignore) {
Err(("Case is ignored.", indexmap! {}))
} else {
Ok(())
}
}
/// Checks if the platforms all support the desired targets in the metadata file.
fn check_target_compatibility(&self) -> TestCheckFunctionResult {
let mut error_map = indexmap! {
"test_desired_targets" => json!(self.metadata.targets.as_ref()),
};
let mut is_allowed = true;
for (_, platform_information) in self.platforms.iter() {
let is_allowed_for_platform = match self.metadata.targets.as_ref() {
None => true,
Some(required_vm_identifiers) => {
required_vm_identifiers.contains(&platform_information.platform.vm_identifier())
}
};
is_allowed &= is_allowed_for_platform;
error_map.insert(
platform_information.platform.platform_identifier().into(),
json!(is_allowed_for_platform),
);
}
if is_allowed {
Ok(())
} else {
Err((
"One of the platforms do do not support the targets allowed by the test.",
error_map,
))
}
}
// Checks for the compatibility of the EVM version with the platforms specified.
fn check_evm_version_compatibility(&self) -> TestCheckFunctionResult {
let Some(evm_version_requirement) = self.metadata.required_evm_version else {
return Ok(());
};
let mut error_map = indexmap! {
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
};
let mut is_allowed = true;
for (_, platform_information) in self.platforms.iter() {
let is_allowed_for_platform =
evm_version_requirement.matches(&platform_information.node.evm_version());
is_allowed &= is_allowed_for_platform;
error_map.insert(
platform_information.platform.platform_identifier().into(),
json!(is_allowed_for_platform),
);
}
if is_allowed {
Ok(())
} else {
Err((
"EVM version is incompatible for the platforms specified",
error_map,
))
}
}
/// Checks if the platforms compilers support the mode that the test is for.
fn check_compiler_compatibility(&self) -> TestCheckFunctionResult {
let mut error_map = indexmap! {
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
};
let mut is_allowed = true;
for (_, platform_information) in self.platforms.iter() {
let is_allowed_for_platform = platform_information
.compiler
.supports_mode(self.mode.optimize_setting, self.mode.pipeline);
is_allowed &= is_allowed_for_platform;
error_map.insert(
platform_information.platform.platform_identifier().into(),
json!(is_allowed_for_platform),
);
}
if is_allowed {
Ok(())
} else {
Err((
"Compilers do not support this mode either for the provided platforms.",
error_map,
))
}
}
}
pub struct TestPlatformInformation<'a> {
pub platform: &'a dyn Platform,
pub node: &'a dyn EthereumNode,
pub compiler: Box<dyn SolidityCompiler>,
pub reporter: ExecutionSpecificReporter,
}
type TestCheckFunctionResult = Result<(), (&'static str, IndexMap<&'static str, Value>)>;
+113 -2
View File
@@ -3,6 +3,9 @@
//! This crate defines the testing configuration and
//! provides a helper utility to execute tests.
pub mod differential_tests;
pub mod helpers;
use std::{
pin::Pin,
thread::{self, JoinHandle},
@@ -14,12 +17,16 @@ use revive_dt_common::types::*;
use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc};
use revive_dt_config::*;
use revive_dt_node::{
Node, geth::GethNode, lighthouse_geth::LighthouseGethNode, substrate::SubstrateNode,
Node,
node_implementations::{
geth::GethNode, lighthouse_geth::LighthouseGethNode, substrate::SubstrateNode,
zombienet::ZombieNode,
},
};
use revive_dt_node_interaction::EthereumNode;
use tracing::info;
pub mod driver;
pub use helpers::CachedCompiler;
/// A trait that describes the interface for the platforms that are supported by the tool.
#[allow(clippy::type_complexity)]
@@ -359,6 +366,102 @@ impl Platform for ReviveDevNodeRevmSolcPlatform {
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
pub struct ZombienetPolkavmResolcPlatform;
impl Platform for ZombienetPolkavmResolcPlatform {
fn platform_identifier(&self) -> PlatformIdentifier {
PlatformIdentifier::ZombienetPolkavmResolc
}
fn node_identifier(&self) -> NodeIdentifier {
NodeIdentifier::Zombienet
}
fn vm_identifier(&self) -> VmIdentifier {
VmIdentifier::PolkaVM
}
fn compiler_identifier(&self) -> CompilerIdentifier {
CompilerIdentifier::Resolc
}
fn new_node(
&self,
context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
.path
.clone();
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let node = ZombieNode::new(polkadot_parachain_path, context);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
}))
}
fn new_compiler(
&self,
context: Context,
version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
Box::pin(async move {
let compiler = Solc::new(context, version).await;
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
})
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
pub struct ZombienetRevmSolcPlatform;
impl Platform for ZombienetRevmSolcPlatform {
fn platform_identifier(&self) -> PlatformIdentifier {
PlatformIdentifier::ZombienetRevmSolc
}
fn node_identifier(&self) -> NodeIdentifier {
NodeIdentifier::Zombienet
}
fn vm_identifier(&self) -> VmIdentifier {
VmIdentifier::Evm
}
fn compiler_identifier(&self) -> CompilerIdentifier {
CompilerIdentifier::Solc
}
fn new_node(
&self,
context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
.path
.clone();
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let node = ZombieNode::new(polkadot_parachain_path, context);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
}))
}
fn new_compiler(
&self,
context: Context,
version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
Box::pin(async move {
let compiler = Solc::new(context, version).await;
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
})
}
}
impl From<PlatformIdentifier> for Box<dyn Platform> {
fn from(value: PlatformIdentifier) -> Self {
match value {
@@ -378,6 +481,10 @@ impl From<PlatformIdentifier> for Box<dyn Platform> {
PlatformIdentifier::ReviveDevNodeRevmSolc => {
Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_>
}
PlatformIdentifier::ZombienetPolkavmResolc => {
Box::new(ZombienetPolkavmResolcPlatform) as Box<_>
}
PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>,
}
}
}
@@ -401,6 +508,10 @@ impl From<PlatformIdentifier> for &dyn Platform {
PlatformIdentifier::ReviveDevNodeRevmSolc => {
&ReviveDevNodeRevmSolcPlatform as &dyn Platform
}
PlatformIdentifier::ZombienetPolkavmResolc => {
&ZombienetPolkavmResolcPlatform as &dyn Platform
}
PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform,
}
}
}
+42 -744
View File
@@ -1,55 +1,21 @@
mod cached_compiler;
mod pool;
mod differential_benchmarks;
mod differential_tests;
mod helpers;
use std::{
borrow::Cow,
collections::{BTreeSet, HashMap},
io::{BufWriter, Write, stderr},
path::Path,
sync::Arc,
time::Instant,
};
use alloy::{
network::{Ethereum, TransactionBuilder},
rpc::types::TransactionRequest,
};
use anyhow::Context as _;
use clap::Parser;
use futures::stream;
use futures::{Stream, StreamExt};
use indexmap::{IndexMap, indexmap};
use revive_dt_node_interaction::EthereumNode;
use revive_dt_report::{
ExecutionSpecificReporter, ReportAggregator, Reporter, ReporterEvent, TestCaseStatus,
TestSpecificReporter, TestSpecifier,
};
use revive_dt_report::ReportAggregator;
use schemars::schema_for;
use serde_json::{Value, json};
use tokio::sync::Mutex;
use tracing::{debug, error, info, info_span, instrument};
use tracing::info;
use tracing_subscriber::{EnvFilter, FmtSubscriber};
use revive_dt_common::{
iterators::EitherIter,
types::{Mode, PrivateKeyAllocator},
};
use revive_dt_compiler::SolidityCompiler;
use revive_dt_config::{Context, *};
use revive_dt_core::{
Platform,
driver::{CaseDriver, CaseState},
};
use revive_dt_format::{
case::{Case, CaseIdx},
corpus::Corpus,
metadata::{ContractPathAndIdent, Metadata, MetadataFile},
mode::ParsedMode,
steps::{FunctionCallStep, Step},
};
use revive_dt_config::Context;
use revive_dt_core::Platform;
use revive_dt_format::metadata::Metadata;
use crate::cached_compiler::CachedCompiler;
use crate::pool::NodePool;
use crate::{
differential_benchmarks::handle_differential_benchmarks,
differential_tests::handle_differential_tests,
};
fn main() -> anyhow::Result<()> {
let (writer, _guard) = tracing_appender::non_blocking::NonBlockingBuilder::default()
@@ -75,37 +41,37 @@ fn main() -> anyhow::Result<()> {
let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task();
match context {
Context::ExecuteTests(context) => {
let tests = collect_corpora(&context)
.context("Failed to collect corpus files from provided arguments")?
.into_iter()
.inspect(|(corpus, _)| {
reporter
.report_corpus_file_discovery_event(corpus.clone())
.expect("Can't fail")
})
.flat_map(|(_, files)| files.into_iter())
.inspect(|metadata_file| {
reporter
.report_metadata_file_discovery_event(
metadata_file.metadata_file_path.clone(),
metadata_file.content.clone(),
)
.expect("Can't fail")
})
.collect::<Vec<_>>();
Context::Test(context) => tokio::runtime::Builder::new_multi_thread()
.worker_threads(context.concurrency_configuration.number_of_threads)
.enable_all()
.build()
.expect("Failed building the Runtime")
.block_on(async move {
let differential_tests_handling_task =
handle_differential_tests(*context, reporter);
tokio::runtime::Builder::new_multi_thread()
.worker_threads(context.concurrency_configuration.number_of_threads)
.enable_all()
.build()
.expect("Failed building the Runtime")
.block_on(async move {
execute_corpus(*context, &tests, reporter, report_aggregator_task)
.await
.context("Failed to execute corpus")
})
}
futures::future::try_join(differential_tests_handling_task, report_aggregator_task)
.await?;
Ok(())
}),
Context::Benchmark(context) => tokio::runtime::Builder::new_multi_thread()
.worker_threads(context.concurrency_configuration.number_of_threads)
.enable_all()
.build()
.expect("Failed building the Runtime")
.block_on(async move {
let differential_benchmarks_handling_task =
handle_differential_benchmarks(*context, reporter);
futures::future::try_join(
differential_benchmarks_handling_task,
report_aggregator_task,
)
.await?;
Ok(())
}),
Context::ExportJsonSchema => {
let schema = schema_for!(Metadata);
println!("{}", serde_json::to_string_pretty(&schema).unwrap());
@@ -113,671 +79,3 @@ fn main() -> anyhow::Result<()> {
}
}
}
#[instrument(level = "debug", name = "Collecting Corpora", skip_all)]
fn collect_corpora(
context: &TestExecutionContext,
) -> anyhow::Result<HashMap<Corpus, Vec<MetadataFile>>> {
let mut corpora = HashMap::new();
for path in &context.corpus {
let span = info_span!("Processing corpus file", path = %path.display());
let _guard = span.enter();
let corpus = Corpus::try_from_path(path)?;
info!(
name = corpus.name(),
number_of_contained_paths = corpus.path_count(),
"Deserialized corpus file"
);
let tests = corpus.enumerate_tests();
corpora.insert(corpus, tests);
}
Ok(corpora)
}
async fn run_driver(
context: TestExecutionContext,
metadata_files: &[MetadataFile],
reporter: Reporter,
report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
platforms: Vec<&dyn Platform>,
) -> anyhow::Result<()> {
let mut nodes = Vec::<(&dyn Platform, NodePool)>::new();
for platform in platforms.into_iter() {
let pool = NodePool::new(Context::ExecuteTests(Box::new(context.clone())), platform)
.inspect_err(|err| {
error!(
?err,
platform_identifier = %platform.platform_identifier(),
"Failed to initialize the node pool for the platform."
)
})
.context("Failed to initialize the node pool")?;
nodes.push((platform, pool));
}
let tests_stream = tests_stream(
&context,
metadata_files.iter(),
nodes.as_slice(),
reporter.clone(),
)
.await;
let driver_task = start_driver_task(&context, tests_stream)
.await
.context("Failed to start driver task")?;
let cli_reporting_task = start_cli_reporting_task(reporter);
let (_, _, rtn) = tokio::join!(cli_reporting_task, driver_task, report_aggregator_task);
rtn?;
Ok(())
}
async fn tests_stream<'a>(
args: &TestExecutionContext,
metadata_files: impl IntoIterator<Item = &'a MetadataFile> + Clone,
nodes: &'a [(&dyn Platform, NodePool)],
reporter: Reporter,
) -> impl Stream<Item = Test<'a>> {
let tests = metadata_files
.into_iter()
.flat_map(|metadata_file| {
metadata_file
.cases
.iter()
.enumerate()
.map(move |(case_idx, case)| (metadata_file, case_idx, case))
})
// Flatten over the modes, prefer the case modes over the metadata file modes.
.flat_map(|(metadata_file, case_idx, case)| {
let reporter = reporter.clone();
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
let modes = match modes {
Some(modes) => EitherIter::A(
ParsedMode::many_to_modes(modes.iter()).map(Cow::<'static, _>::Owned),
),
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
};
modes.into_iter().map(move |mode| {
(
metadata_file,
case_idx,
case,
mode.clone(),
reporter.test_specific_reporter(Arc::new(TestSpecifier {
solc_mode: mode.as_ref().clone(),
metadata_file_path: metadata_file.metadata_file_path.clone(),
case_idx: CaseIdx::new(case_idx),
})),
)
})
})
.collect::<Vec<_>>();
// Note: before we do any kind of filtering or process the iterator in any way, we need to
// inform the report aggregator of all of the cases that were found as it keeps a state of the
// test cases for its internal use.
for (_, _, _, _, reporter) in tests.iter() {
reporter
.report_test_case_discovery_event()
.expect("Can't fail")
}
stream::iter(tests.into_iter())
.filter_map(
move |(metadata_file, case_idx, case, mode, reporter)| async move {
let mut platforms = Vec::new();
for (platform, node_pool) in nodes.iter() {
let node = node_pool.round_robbin();
let compiler = platform
.new_compiler(
Context::ExecuteTests(Box::new(args.clone())),
mode.version.clone().map(Into::into),
)
.await
.inspect_err(|err| {
error!(
?err,
platform_identifier = %platform.platform_identifier(),
"Failed to instantiate the compiler"
)
})
.ok()?;
let reporter = reporter
.execution_specific_reporter(node.id(), platform.platform_identifier());
platforms.push((*platform, node, compiler, reporter));
}
Some(Test {
metadata: metadata_file,
metadata_file_path: metadata_file.metadata_file_path.as_path(),
mode: mode.clone(),
case_idx: CaseIdx::new(case_idx),
case,
platforms,
reporter,
})
},
)
.filter_map(move |test| async move {
match test.check_compatibility() {
Ok(()) => Some(test),
Err((reason, additional_information)) => {
debug!(
metadata_file_path = %test.metadata.metadata_file_path.display(),
case_idx = %test.case_idx,
mode = %test.mode,
reason,
additional_information =
serde_json::to_string(&additional_information).unwrap(),
"Ignoring Test Case"
);
test.reporter
.report_test_ignored_event(
reason.to_string(),
additional_information
.into_iter()
.map(|(k, v)| (k.into(), v))
.collect::<IndexMap<_, _>>(),
)
.expect("Can't fail");
None
}
}
})
}
async fn start_driver_task<'a>(
context: &TestExecutionContext,
tests: impl Stream<Item = Test<'a>>,
) -> anyhow::Result<impl Future<Output = ()>> {
info!("Starting driver task");
let cached_compiler = Arc::new(
CachedCompiler::new(
context
.working_directory
.as_path()
.join("compilation_cache"),
context
.compilation_configuration
.invalidate_compilation_cache,
)
.await
.context("Failed to initialize cached compiler")?,
);
Ok(tests.for_each_concurrent(
context.concurrency_configuration.concurrency_limit(),
move |test| {
let cached_compiler = cached_compiler.clone();
async move {
for (platform, node, _, _) in test.platforms.iter() {
test.reporter
.report_node_assigned_event(
node.id(),
platform.platform_identifier(),
node.connection_string(),
)
.expect("Can't fail");
}
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
context.wallet_configuration.highest_private_key_exclusive(),
)));
let reporter = test.reporter.clone();
let result =
handle_case_driver(&test, cached_compiler, private_key_allocator).await;
match result {
Ok(steps_executed) => reporter
.report_test_succeeded_event(steps_executed)
.expect("Can't fail"),
Err(error) => reporter
.report_test_failed_event(format!("{error:#}"))
.expect("Can't fail"),
}
}
},
))
}
#[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
async fn start_cli_reporting_task(reporter: Reporter) {
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
drop(reporter);
let start = Instant::now();
const GREEN: &str = "\x1B[32m";
const RED: &str = "\x1B[31m";
const GREY: &str = "\x1B[90m";
const COLOR_RESET: &str = "\x1B[0m";
const BOLD: &str = "\x1B[1m";
const BOLD_RESET: &str = "\x1B[22m";
let mut number_of_successes = 0;
let mut number_of_failures = 0;
let mut buf = BufWriter::new(stderr());
while let Ok(event) = aggregator_events_rx.recv().await {
let ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
metadata_file_path,
mode,
case_status,
} = event
else {
continue;
};
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
for (case_idx, case_status) in case_status.into_iter() {
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
let _ = match case_status {
TestCaseStatus::Succeeded { steps_executed } => {
number_of_successes += 1;
writeln!(
buf,
"{}{}Case Succeeded{} - Steps Executed: {}{}",
GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET
)
}
TestCaseStatus::Failed { reason } => {
number_of_failures += 1;
writeln!(
buf,
"{}{}Case Failed{} - Reason: {}{}",
RED,
BOLD,
BOLD_RESET,
reason.trim(),
COLOR_RESET,
)
}
TestCaseStatus::Ignored { reason, .. } => writeln!(
buf,
"{}{}Case Ignored{} - Reason: {}{}",
GREY,
BOLD,
BOLD_RESET,
reason.trim(),
COLOR_RESET,
),
};
}
let _ = writeln!(buf);
}
// Summary at the end.
let _ = writeln!(
buf,
"{} cases: {}{}{} cases succeeded, {}{}{} cases failed in {} seconds",
number_of_successes + number_of_failures,
GREEN,
number_of_successes,
COLOR_RESET,
RED,
number_of_failures,
COLOR_RESET,
start.elapsed().as_secs()
);
}
#[allow(clippy::too_many_arguments)]
#[instrument(
level = "info",
name = "Handling Case"
skip_all,
fields(
metadata_file_path = %test.metadata.relative_path().display(),
mode = %test.mode,
case_idx = %test.case_idx,
case_name = test.case.name.as_deref().unwrap_or("Unnamed Case"),
)
)]
async fn handle_case_driver<'a>(
test: &Test<'a>,
cached_compiler: Arc<CachedCompiler<'a>>,
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
) -> anyhow::Result<usize> {
let platform_state = stream::iter(test.platforms.iter())
// Compiling the pre-link contracts.
.filter_map(|(platform, node, compiler, reporter)| {
let cached_compiler = cached_compiler.clone();
async move {
let compiler_output = cached_compiler
.compile_contracts(
test.metadata,
test.metadata_file_path,
test.mode.clone(),
None,
compiler.as_ref(),
*platform,
reporter,
)
.await
.inspect_err(|err| {
error!(
?err,
platform_identifier = %platform.platform_identifier(),
"Pre-linking compilation failed"
)
})
.ok()?;
Some((test, platform, node, compiler, reporter, compiler_output))
}
})
// Deploying the libraries for the platform.
.filter_map(
|(test, platform, node, compiler, reporter, compiler_output)| async move {
let mut deployed_libraries = None::<HashMap<_, _>>;
let mut contract_sources = test
.metadata
.contract_sources()
.inspect_err(|err| {
error!(
?err,
platform_identifier = %platform.platform_identifier(),
"Failed to retrieve contract sources from metadata"
)
})
.ok()?;
for library_instance in test
.metadata
.libraries
.iter()
.flatten()
.flat_map(|(_, map)| map.values())
{
debug!(%library_instance, "Deploying Library Instance");
let ContractPathAndIdent {
contract_source_path: library_source_path,
contract_ident: library_ident,
} = contract_sources.remove(library_instance)?;
let (code, abi) = compiler_output
.contracts
.get(&library_source_path)
.and_then(|contracts| contracts.get(library_ident.as_str()))?;
let code = alloy::hex::decode(code).ok()?;
// Getting the deployer address from the cases themselves. This is to ensure
// that we're doing the deployments from different accounts and therefore we're
// not slowed down by the nonce.
let deployer_address = test
.case
.steps
.iter()
.filter_map(|step| match step {
Step::FunctionCall(input) => input.caller.as_address().copied(),
Step::BalanceAssertion(..) => None,
Step::StorageEmptyAssertion(..) => None,
Step::Repeat(..) => None,
Step::AllocateAccount(..) => None,
})
.next()
.unwrap_or(FunctionCallStep::default_caller_address());
let tx = TransactionBuilder::<Ethereum>::with_deploy_code(
TransactionRequest::default().from(deployer_address),
code,
);
let receipt = node
.execute_transaction(tx)
.await
.inspect_err(|err| {
error!(
?err,
%library_instance,
platform_identifier = %platform.platform_identifier(),
"Failed to deploy the library"
)
})
.ok()?;
debug!(
?library_instance,
platform_identifier = %platform.platform_identifier(),
"Deployed library"
);
let library_address = receipt.contract_address?;
deployed_libraries.get_or_insert_default().insert(
library_instance.clone(),
(library_ident.clone(), library_address, abi.clone()),
);
}
Some((
test,
platform,
node,
compiler,
reporter,
compiler_output,
deployed_libraries,
))
},
)
// Compiling the post-link contracts.
.filter_map(
|(test, platform, node, compiler, reporter, _, deployed_libraries)| {
let cached_compiler = cached_compiler.clone();
let private_key_allocator = private_key_allocator.clone();
async move {
let compiler_output = cached_compiler
.compile_contracts(
test.metadata,
test.metadata_file_path,
test.mode.clone(),
deployed_libraries.as_ref(),
compiler.as_ref(),
*platform,
reporter,
)
.await
.inspect_err(|err| {
error!(
?err,
platform_identifier = %platform.platform_identifier(),
"Pre-linking compilation failed"
)
})
.ok()?;
let case_state = CaseState::new(
compiler.version().clone(),
compiler_output.contracts,
deployed_libraries.unwrap_or_default(),
reporter.clone(),
private_key_allocator,
);
Some((*node, platform.platform_identifier(), case_state))
}
},
)
// Collect
.collect::<Vec<_>>()
.await;
let mut driver = CaseDriver::new(test.metadata, test.case, platform_state);
driver
.execute()
.await
.inspect(|steps_executed| info!(steps_executed, "Case succeeded"))
}
async fn execute_corpus(
context: TestExecutionContext,
tests: &[MetadataFile],
reporter: Reporter,
report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
) -> anyhow::Result<()> {
let platforms = context
.platforms
.iter()
.copied()
.collect::<BTreeSet<_>>()
.into_iter()
.map(Into::<&dyn Platform>::into)
.collect::<Vec<_>>();
run_driver(context, tests, reporter, report_aggregator_task, platforms).await?;
Ok(())
}
/// this represents a single "test"; a mode, path and collection of cases.
#[allow(clippy::type_complexity)]
struct Test<'a> {
metadata: &'a MetadataFile,
metadata_file_path: &'a Path,
mode: Cow<'a, Mode>,
case_idx: CaseIdx,
case: &'a Case,
platforms: Vec<(
&'a dyn Platform,
&'a dyn EthereumNode,
Box<dyn SolidityCompiler>,
ExecutionSpecificReporter,
)>,
reporter: TestSpecificReporter,
}
impl<'a> Test<'a> {
/// Checks if this test can be ran with the current configuration.
pub fn check_compatibility(&self) -> TestCheckFunctionResult {
self.check_metadata_file_ignored()?;
self.check_case_file_ignored()?;
self.check_target_compatibility()?;
self.check_evm_version_compatibility()?;
self.check_compiler_compatibility()?;
Ok(())
}
/// Checks if the metadata file is ignored or not.
fn check_metadata_file_ignored(&self) -> TestCheckFunctionResult {
if self.metadata.ignore.is_some_and(|ignore| ignore) {
Err(("Metadata file is ignored.", indexmap! {}))
} else {
Ok(())
}
}
/// Checks if the case file is ignored or not.
fn check_case_file_ignored(&self) -> TestCheckFunctionResult {
if self.case.ignore.is_some_and(|ignore| ignore) {
Err(("Case is ignored.", indexmap! {}))
} else {
Ok(())
}
}
/// Checks if the platforms all support the desired targets in the metadata file.
fn check_target_compatibility(&self) -> TestCheckFunctionResult {
let mut error_map = indexmap! {
"test_desired_targets" => json!(self.metadata.targets.as_ref()),
};
let mut is_allowed = true;
for (platform, ..) in self.platforms.iter() {
let is_allowed_for_platform = match self.metadata.targets.as_ref() {
None => true,
Some(targets) => {
let mut target_matches = false;
for target in targets.iter() {
if &platform.vm_identifier() == target {
target_matches = true;
break;
}
}
target_matches
}
};
is_allowed &= is_allowed_for_platform;
error_map.insert(
platform.platform_identifier().into(),
json!(is_allowed_for_platform),
);
}
if is_allowed {
Ok(())
} else {
Err((
"One of the platforms do do not support the targets allowed by the test.",
error_map,
))
}
}
// Checks for the compatibility of the EVM version with the platforms specified.
fn check_evm_version_compatibility(&self) -> TestCheckFunctionResult {
let Some(evm_version_requirement) = self.metadata.required_evm_version else {
return Ok(());
};
let mut error_map = indexmap! {
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
};
let mut is_allowed = true;
for (platform, node, ..) in self.platforms.iter() {
let is_allowed_for_platform = evm_version_requirement.matches(&node.evm_version());
is_allowed &= is_allowed_for_platform;
error_map.insert(
platform.platform_identifier().into(),
json!(is_allowed_for_platform),
);
}
if is_allowed {
Ok(())
} else {
Err((
"EVM version is incompatible for the platforms specified",
error_map,
))
}
}
/// Checks if the platforms compilers support the mode that the test is for.
fn check_compiler_compatibility(&self) -> TestCheckFunctionResult {
let mut error_map = indexmap! {
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
};
let mut is_allowed = true;
for (platform, _, compiler, ..) in self.platforms.iter() {
let is_allowed_for_platform =
compiler.supports_mode(self.mode.optimize_setting, self.mode.pipeline);
is_allowed &= is_allowed_for_platform;
error_map.insert(
platform.platform_identifier().into(),
json!(is_allowed_for_platform),
);
}
if is_allowed {
Ok(())
} else {
Err((
"Compilers do not support this mode either for the provided platforms.",
error_map,
))
}
}
}
type TestCheckFunctionResult = Result<(), (&'static str, IndexMap<&'static str, Value>)>;
-2
View File
@@ -14,8 +14,6 @@ revive-dt-common = { workspace = true }
revive-common = { workspace = true }
alloy = { workspace = true }
alloy-primitives = { workspace = true }
alloy-sol-types = { workspace = true }
anyhow = { workspace = true }
futures = { workspace = true }
regex = { workspace = true }
+3 -6
View File
@@ -3,10 +3,7 @@ use serde::{Deserialize, Serialize};
use revive_dt_common::{macros::define_wrapper_type, types::Mode};
use crate::{
mode::ParsedMode,
steps::{Expected, RepeatStep, Step},
};
use crate::{mode::ParsedMode, steps::*};
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)]
pub struct Case {
@@ -48,8 +45,8 @@ pub struct Case {
#[serde(skip_serializing_if = "Option::is_none")]
pub expected: Option<Expected>,
/// An optional boolean which defines if the case as a whole should be ignored. If null then the
/// case will not be ignored.
/// An optional boolean which defines if the case as a whole should be ignored. If null then
/// the case will not be ignored.
#[serde(skip_serializing_if = "Option::is_none")]
pub ignore: Option<bool>,
}
+13 -11
View File
@@ -31,8 +31,8 @@ pub struct MetadataFile {
/// The path of the metadata file. This will either be a JSON or solidity file.
pub metadata_file_path: PathBuf,
/// This is the path contained within the corpus file. This could either be the path of some dir
/// or could be the actual metadata file path.
/// This is the path contained within the corpus file. This could either be the path of some
/// dir or could be the actual metadata file path.
pub corpus_file_path: PathBuf,
/// The metadata contained within the file.
@@ -69,13 +69,13 @@ impl Deref for MetadataFile {
/// of steps and assertions that should be performed as part of the test case.
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema, Clone, Eq, PartialEq)]
pub struct Metadata {
/// This is an optional comment on the metadata file which has no impact on the execution in any
/// way.
/// This is an optional comment on the metadata file which has no impact on the execution in
/// any way.
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
/// An optional boolean which defines if the metadata file as a whole should be ignored. If null
/// then the metadata file will not be ignored.
/// An optional boolean which defines if the metadata file as a whole should be ignored. If
/// null then the metadata file will not be ignored.
#[serde(skip_serializing_if = "Option::is_none")]
pub ignore: Option<bool>,
@@ -94,8 +94,8 @@ pub struct Metadata {
/// This is a map where the key is the name of the contract instance and the value is the
/// contract's path and ident in the file.
///
/// If any contract is to be used by the test then it must be included in here first so that the
/// framework is aware of its path, compiles it, and prepares it.
/// If any contract is to be used by the test then it must be included in here first so that
/// the framework is aware of its path, compiles it, and prepares it.
#[serde(skip_serializing_if = "Option::is_none")]
pub contracts: Option<BTreeMap<ContractInstance, ContractPathAndIdent>>,
@@ -123,8 +123,9 @@ pub struct Metadata {
pub required_evm_version: Option<EvmVersionRequirement>,
/// A set of compilation directives that will be passed to the compiler whenever the contracts
/// for the test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`]
/// is just a filter for when a test can run whereas this is an instruction to the compiler.
/// for the test are being compiled. Note that this differs from the [`Mode`]s in that a
/// [`Mode`] is just a filter for when a test can run whereas this is an instruction to the
/// compiler.
#[serde(skip_serializing_if = "Option::is_none")]
pub compiler_directives: Option<CompilationDirectives>,
}
@@ -326,7 +327,8 @@ define_wrapper_type!(
)]
#[serde(try_from = "String", into = "String")]
pub struct ContractPathAndIdent {
/// The path of the contract source code relative to the directory containing the metadata file.
/// The path of the contract source code relative to the directory containing the metadata
/// file.
pub contract_source_path: PathBuf,
/// The identifier of the contract.
+5 -6
View File
@@ -1,13 +1,12 @@
use anyhow::Context as _;
use regex::Regex;
use revive_dt_common::iterators::EitherIter;
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
use revive_dt_common::{
iterators::EitherIter,
types::{Mode, ModeOptimizerSetting, ModePipeline},
};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use std::fmt::Display;
use std::str::FromStr;
use std::sync::LazyLock;
use std::{collections::HashSet, fmt::Display, str::FromStr, sync::LazyLock};
/// This represents a mode that has been parsed from test metadata.
///
+34 -29
View File
@@ -4,10 +4,9 @@ use alloy::{
eips::BlockNumberOrTag,
json_abi::Function,
network::TransactionBuilder,
primitives::{Address, Bytes, U256},
primitives::{Address, Bytes, FixedBytes, U256, utils::parse_units},
rpc::types::TransactionRequest,
};
use alloy_primitives::{FixedBytes, utils::parse_units};
use anyhow::Context as _;
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, stream};
use schemars::JsonSchema;
@@ -17,8 +16,10 @@ use serde::{Deserialize, Serialize};
use revive_dt_common::macros::define_wrapper_type;
use tracing::{Instrument, info_span, instrument};
use crate::traits::ResolverApi;
use crate::{metadata::ContractInstance, traits::ResolutionContext};
use crate::{
metadata::ContractInstance,
traits::{ResolutionContext, ResolverApi},
};
/// A test step.
///
@@ -147,8 +148,8 @@ pub struct FunctionCallStep {
#[schemars(skip)]
pub storage: Option<HashMap<String, Calldata>>,
/// Variable assignment to perform in the framework allowing us to reference them again later on
/// during the execution.
/// Variable assignment to perform in the framework allowing us to reference them again later
/// on during the execution.
#[serde(skip_serializing_if = "Option::is_none")]
pub variable_assignments: Option<VariableAssignments>,
}
@@ -488,21 +489,20 @@ impl FunctionCallStep {
anyhow::bail!("ABI for instance '{}' not found", self.instance.as_ref());
};
// We follow the same logic that's implemented in the matter-labs-tester where they resolve
// the function name into a function selector and they assume that he function doesn't have
// any existing overloads.
// We follow the same logic that's implemented in the matter-labs-tester where they
// resolve the function name into a function selector and they assume that he
// function doesn't have any existing overloads.
// Overloads are handled by providing the full function signature in the "function
// name".
// https://github.com/matter-labs/era-compiler-tester/blob/1dfa7d07cba0734ca97e24704f12dd57f6990c2c/compiler_tester/src/test/case/input/mod.rs#L158-L190
let selector =
if function_name.contains('(') && function_name.contains(')') {
Function::parse(function_name)
let selector = if function_name.contains('(') && function_name.contains(')') {
Function::parse(function_name)
.context(
"Failed to parse the provided function name into a function signature",
)?
.selector()
} else {
abi.functions()
} else {
abi.functions()
.find(|function| function.signature().starts_with(function_name))
.ok_or_else(|| {
anyhow::anyhow!(
@@ -511,19 +511,21 @@ impl FunctionCallStep {
&self.instance
)
})
.with_context(|| format!(
"Failed to resolve function selector for {:?} on instance {:?}",
function_name, &self.instance
))?
.with_context(|| {
format!(
"Failed to resolve function selector for {:?} on instance {:?}",
function_name, &self.instance
)
})?
.selector()
};
};
// Allocating a vector that we will be using for the calldata. The vector size will be:
// 4 bytes for the function selector.
// Allocating a vector that we will be using for the calldata. The vector size will
// be: 4 bytes for the function selector.
// function.inputs.len() * 32 bytes for the arguments (each argument is a U256).
//
// We're using indices in the following code in order to avoid the need for us to allocate
// a new buffer for each one of the resolved arguments.
// We're using indices in the following code in order to avoid the need for us to
// allocate a new buffer for each one of the resolved arguments.
let mut calldata = Vec::<u8>::with_capacity(4 + self.calldata.size_requirement());
calldata.extend(selector.0);
self.calldata
@@ -537,7 +539,7 @@ impl FunctionCallStep {
}
/// Parse this input into a legacy transaction.
pub async fn legacy_transaction(
pub async fn as_transaction(
&self,
resolver: &(impl ResolverApi + ?Sized),
context: ResolutionContext<'_>,
@@ -959,9 +961,12 @@ impl<'de> Deserialize<'de> for EtherValue {
#[cfg(test)]
mod tests {
use alloy::{eips::BlockNumberOrTag, json_abi::JsonAbi};
use alloy_primitives::{BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, address};
use alloy_sol_types::SolValue;
use alloy::{
eips::BlockNumberOrTag,
json_abi::JsonAbi,
primitives::{BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, address},
sol_types::SolValue,
};
use std::{collections::HashMap, pin::Pin};
use super::*;
@@ -1115,7 +1120,7 @@ mod tests {
let encoded = input.encoded_input(&resolver, context).await.unwrap();
assert!(encoded.0.starts_with(&selector));
type T = (alloy_primitives::Address,);
type T = (alloy::primitives::Address,);
let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap();
assert_eq!(
decoded.0,
@@ -1162,7 +1167,7 @@ mod tests {
let encoded = input.encoded_input(&resolver, context).await.unwrap();
assert!(encoded.0.starts_with(&selector));
type T = (alloy_primitives::Address,);
type T = (alloy::primitives::Address,);
let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap();
assert_eq!(
decoded.0,
+6 -6
View File
@@ -1,10 +1,10 @@
use std::collections::HashMap;
use std::pin::Pin;
use std::{collections::HashMap, pin::Pin};
use alloy::eips::BlockNumberOrTag;
use alloy::json_abi::JsonAbi;
use alloy::primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, U256};
use alloy_primitives::TxHash;
use alloy::{
eips::BlockNumberOrTag,
json_abi::JsonAbi,
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, U256},
};
use anyhow::Result;
use crate::metadata::{ContractIdent, ContractInstance};
+34
View File
@@ -0,0 +1,34 @@
[package]
name = "ml-test-runner"
description = "ML-based test runner for executing differential tests file by file"
version.workspace = true
authors.workspace = true
license.workspace = true
edition.workspace = true
repository.workspace = true
rust-version.workspace = true
[[bin]]
name = "ml-test-runner"
path = "src/main.rs"
[dependencies]
revive-dt-common = { workspace = true }
revive-dt-compiler = { workspace = true }
revive-dt-config = { workspace = true }
revive-dt-core = { workspace = true }
revive-dt-format = { workspace = true }
revive-dt-node = { workspace = true }
revive-dt-node-interaction = { workspace = true }
revive-dt-report = { workspace = true }
alloy = { workspace = true }
anyhow = { workspace = true }
clap = { workspace = true }
tokio = { workspace = true }
temp-dir = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
[lints]
workspace = true
+74
View File
@@ -0,0 +1,74 @@
# ML Test Runner
A test runner for executing Revive differential tests file-by-file with cargo-test-style output.
This is similar to the `retester` binary but designed for ML-based test execution with a focus on:
- Running tests file-by-file (rather than in bulk)
- Caching passed tests to skip them in future runs
- Providing cargo-test-style output for easy integration with ML pipelines
- Single platform testing (rather than differential testing)
## Features
- **File-by-file execution**: Run tests on individual `.sol` files, corpus files (`.json`), or recursively walk directories
- **Cached results**: Skip tests that have already passed using `--cached-passed`
- **Fail fast**: Stop on first failure with `--bail`
- **Cargo-like output**: Familiar test output format with colored pass/fail indicators
- **Platform support**: Test against `geth` or `kitchensink` platforms
## Usage
```bash
# Run a single .sol file (compile-only mode, default)
./ml-test-runner path/to/test.sol --platform geth
# Run all tests in a corpus file
./ml-test-runner path/to/corpus.json --platform kitchensink
# Walk a directory recursively for .sol files
./ml-test-runner path/to/tests/ --platform geth
# Use cached results and bail on first failure
./ml-test-runner path/to/tests/ --cached-passed ./cache.txt --bail
# Start the platform and execute tests (full mode)
./ml-test-runner path/to/tests/ --platform geth --start-platform
# Enable verbose logging (info, debug, or trace level)
RUST_LOG=info ./ml-test-runner path/to/tests/
RUST_LOG=debug ./ml-test-runner path/to/tests/ --start-platform
RUST_LOG=trace ./ml-test-runner path/to/tests/ --start-platform
```
## Arguments
- `<PATH>` - Path to test file (`.sol`), corpus file (`.json`), or folder of `.sol` files
- `--cached-passed <FILE>` - File to track tests that have already passed
- `--bail` - Stop after the first file failure
- `--platform <PLATFORM>` - Platform to test against (`geth`, `kitchensink`, or `zombienet`, default: `geth`)
- `--start-platform` - Start the platform and execute tests (default: `false`, compile-only mode)
## Output Format
The runner produces cargo-test-style output:
```
test path/to/test1.sol ... ok
test path/to/test2.sol ... FAILED
test path/to/test3.sol ... cached
failures:
---- path/to/test2.sol ----
Error: ...
test result: FAILED. 1 passed; 1 failed; 1 cached; finished in 2.34s
```
## Building
```bash
cargo build --release -p ml-test-runner
```
The binary will be available at `target/release/ml-test-runner`.
+541
View File
@@ -0,0 +1,541 @@
use anyhow::Context;
use clap::Parser;
use revive_dt_common::{
iterators::FilesWithExtensionIterator,
types::{PlatformIdentifier, PrivateKeyAllocator},
};
use revive_dt_config::TestExecutionContext;
use revive_dt_core::{
CachedCompiler, Platform,
helpers::{TestDefinition, TestPlatformInformation},
};
use revive_dt_format::{
case::CaseIdx,
corpus::Corpus,
metadata::{Metadata, MetadataFile},
};
use std::{
borrow::Cow,
collections::{BTreeMap, HashSet},
fs::File,
io::{BufRead, BufReader, BufWriter, Write},
path::{Path, PathBuf},
sync::Arc,
time::Instant,
};
use temp_dir::TempDir;
use tokio::sync::Mutex;
use tracing::info;
use tracing_subscriber::{EnvFilter, FmtSubscriber};
/// ML-based test runner for executing differential tests file by file
#[derive(Debug, Parser)]
#[command(name = "ml-test-runner")]
struct MlTestRunnerArgs {
/// Path to test file (.sol), corpus file (.json), or folder containing .sol files
#[arg(value_name = "PATH")]
path: PathBuf,
/// File to cache tests that have already passed
#[arg(long = "cached-passed")]
cached_passed: Option<PathBuf>,
/// Stop after the first file failure
#[arg(long = "bail")]
bail: bool,
/// Platform to test against (e.g., geth-evm-solc, kitchensink-polkavm-resolc)
#[arg(long = "platform", default_value = "geth-evm-solc")]
platform: PlatformIdentifier,
/// Start the platform and wait for RPC readiness
#[arg(long = "start-platform", default_value = "false")]
start_platform: bool,
/// Private key to use for wallet initialization (hex string with or without 0x prefix)
#[arg(
long = "private-key",
default_value = "0x5fb92d6e98884f76de468fa3f6278f8807c48bebc13595d45af5bdc4da702133"
)]
private_key: String,
/// RPC port to connect to when using existing node
#[arg(long = "rpc-port", default_value = "8545")]
rpc_port: u16,
}
fn main() -> anyhow::Result<()> {
let subscriber = FmtSubscriber::builder()
.with_env_filter(EnvFilter::from_default_env())
.with_writer(std::io::stderr)
.finish();
tracing::subscriber::set_global_default(subscriber).expect("Failed to set tracing subscriber");
let args = MlTestRunnerArgs::parse();
info!("ML test runner starting");
info!("Platform: {:?}", args.platform);
info!("Start platform: {}", args.start_platform);
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.expect("Failed building the Runtime")
.block_on(run(args))
}
async fn run(args: MlTestRunnerArgs) -> anyhow::Result<()> {
let start_time = Instant::now();
info!("Discovering test files from: {}", args.path.display());
let test_files = discover_test_files(&args.path)?;
info!("Found {} test file(s)", test_files.len());
let cached_passed = if let Some(cache_file) = &args.cached_passed {
let cached = load_cached_passed(cache_file)?;
info!("Loaded {} cached passed test(s)", cached.len());
cached
} else {
HashSet::new()
};
let cached_passed = Arc::new(Mutex::new(cached_passed));
let mut passed_files = 0;
let mut failed_files = 0;
let mut skipped_files = 0;
let mut failures = Vec::new();
const GREEN: &str = "\x1B[32m";
const RED: &str = "\x1B[31m";
const YELLOW: &str = "\x1B[33m";
const COLOUR_RESET: &str = "\x1B[0m";
const BOLD: &str = "\x1B[1m";
const BOLD_RESET: &str = "\x1B[22m";
for test_file in test_files {
let file_display = test_file.display().to_string();
// Check if already passed
{
let cache = cached_passed.lock().await;
if cache.contains(&file_display) {
println!("test {} ... {YELLOW}cached{COLOUR_RESET}", file_display);
skipped_files += 1;
continue;
}
}
info!("Loading metadata from: {}", test_file.display());
let metadata_file = match load_metadata_file(&test_file) {
Ok(mf) => {
info!("Loaded metadata with {} case(s)", mf.cases.len());
mf
}
Err(e) => {
println!("test {} ... {RED}FAILED{COLOUR_RESET}", file_display);
println!(" Error loading metadata: {}", e);
failed_files += 1;
failures.push((
file_display.clone(),
format!("Error loading metadata: {}", e),
));
if args.bail {
break;
}
continue;
}
};
info!("Executing test file: {}", file_display);
match execute_test_file(&args, &metadata_file).await {
Ok(_) => {
println!("test {} ... {GREEN}ok{COLOUR_RESET}", file_display);
info!("Test file passed: {}", file_display);
passed_files += 1;
{
let mut cache = cached_passed.lock().await;
cache.insert(file_display);
}
}
Err(e) => {
println!("test {} ... {RED}FAILED{COLOUR_RESET}", file_display);
failed_files += 1;
failures.push((file_display, format!("{:?}", e)));
if args.bail {
info!("Bailing after first failure");
break;
}
}
}
}
if let Some(cache_file) = &args.cached_passed {
let cache = cached_passed.lock().await;
info!("Saving {} cached passed test(s)", cache.len());
save_cached_passed(cache_file, &cache)?;
}
// Print summary
println!();
if !failures.is_empty() {
println!("{BOLD}failures:{BOLD_RESET}");
println!();
for (file, error) in &failures {
println!("---- {} ----", file);
println!("{}", error);
println!();
}
}
let elapsed = start_time.elapsed();
println!(
"test result: {}. {} passed; {} failed; {} cached; finished in {:.2}s",
if failed_files == 0 {
format!("{GREEN}ok{COLOUR_RESET}")
} else {
format!("{RED}FAILED{COLOUR_RESET}")
},
passed_files,
failed_files,
skipped_files,
elapsed.as_secs_f64()
);
if failed_files > 0 {
std::process::exit(1);
}
Ok(())
}
/// Discover test files from the given path
fn discover_test_files(path: &Path) -> anyhow::Result<Vec<PathBuf>> {
if !path.exists() {
anyhow::bail!("Path does not exist: {}", path.display());
}
let mut files = Vec::new();
if path.is_file() {
let extension = path.extension().and_then(|s| s.to_str()).unwrap_or("");
match extension {
"sol" => {
// Single .sol file
files.push(path.to_path_buf());
}
"json" => {
// Corpus file - enumerate its tests
let corpus = Corpus::try_from_path(path)?;
let metadata_files = corpus.enumerate_tests();
for metadata in metadata_files {
files.push(metadata.metadata_file_path);
}
}
_ => anyhow::bail!(
"Unsupported file extension: {}. Expected .sol or .json",
extension
),
}
} else if path.is_dir() {
// Walk directory recursively for .sol files
for entry in FilesWithExtensionIterator::new(path)
.with_allowed_extension("sol")
.with_use_cached_fs(true)
{
files.push(entry);
}
} else {
anyhow::bail!("Path is neither a file nor a directory: {}", path.display());
}
Ok(files)
}
/// Load metadata from a test file
fn load_metadata_file(path: &Path) -> anyhow::Result<MetadataFile> {
let metadata = Metadata::try_from_file(path)
.ok_or_else(|| anyhow::anyhow!("Failed to load metadata from {}", path.display()))?;
Ok(MetadataFile {
metadata_file_path: path.to_path_buf(),
corpus_file_path: path.to_path_buf(),
content: metadata,
})
}
/// Execute all test cases in a metadata file
async fn execute_test_file(
args: &MlTestRunnerArgs,
metadata_file: &MetadataFile,
) -> anyhow::Result<()> {
if metadata_file.cases.is_empty() {
anyhow::bail!("No test cases found in file");
}
info!("Processing {} test case(s)", metadata_file.cases.len());
// Get the platform based on CLI args
let platform: &dyn Platform = match args.platform {
PlatformIdentifier::GethEvmSolc => &revive_dt_core::GethEvmSolcPlatform,
PlatformIdentifier::LighthouseGethEvmSolc => &revive_dt_core::LighthouseGethEvmSolcPlatform,
PlatformIdentifier::KitchensinkPolkavmResolc => {
&revive_dt_core::KitchensinkPolkavmResolcPlatform
}
PlatformIdentifier::KitchensinkRevmSolc => &revive_dt_core::KitchensinkRevmSolcPlatform,
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
&revive_dt_core::ReviveDevNodePolkavmResolcPlatform
}
PlatformIdentifier::ReviveDevNodeRevmSolc => &revive_dt_core::ReviveDevNodeRevmSolcPlatform,
PlatformIdentifier::ZombienetPolkavmResolc => {
&revive_dt_core::ZombienetPolkavmResolcPlatform
}
PlatformIdentifier::ZombienetRevmSolc => &revive_dt_core::ZombienetRevmSolcPlatform,
};
let temp_dir = TempDir::new()?;
info!("Created temporary directory: {}", temp_dir.path().display());
let test_context = TestExecutionContext::default();
let context = revive_dt_config::Context::Test(Box::new(test_context));
let node: &'static dyn revive_dt_node_interaction::EthereumNode = if args.start_platform {
info!("Starting blockchain node...");
let node_handle = platform
.new_node(context.clone())
.context("Failed to spawn node thread")?;
info!("Waiting for node to start...");
let node = node_handle
.join()
.map_err(|e| anyhow::anyhow!("Node thread panicked: {:?}", e))?
.context("Failed to start node")?;
info!(
"Node started with ID: {}, connection: {}",
node.id(),
node.connection_string()
);
let node = Box::leak(node);
info!("Running pre-transactions...");
node.pre_transactions()
.await
.context("Failed to run pre-transactions")?;
info!("Pre-transactions completed");
node
} else {
info!("Using existing node");
let existing_node: Box<dyn revive_dt_node_interaction::EthereumNode> = match args.platform {
PlatformIdentifier::GethEvmSolc | PlatformIdentifier::LighthouseGethEvmSolc => {
Box::new(
revive_dt_node::node_implementations::geth::GethNode::new_existing(
&args.private_key,
args.rpc_port,
)
.await?,
)
}
PlatformIdentifier::KitchensinkPolkavmResolc
| PlatformIdentifier::KitchensinkRevmSolc
| PlatformIdentifier::ReviveDevNodePolkavmResolc
| PlatformIdentifier::ReviveDevNodeRevmSolc
| PlatformIdentifier::ZombienetPolkavmResolc
| PlatformIdentifier::ZombienetRevmSolc => Box::new(
revive_dt_node::node_implementations::substrate::SubstrateNode::new_existing(
&args.private_key,
args.rpc_port,
)
.await?,
),
};
Box::leak(existing_node)
};
info!("Initializing cached compiler");
let cached_compiler = CachedCompiler::new(temp_dir.path().join("compilation_cache"), false)
.await
.map(Arc::new)
.context("Failed to create cached compiler")?;
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
alloy::primitives::U256::from(100),
)));
let (reporter, report_task) =
revive_dt_report::ReportAggregator::new(context.clone()).into_task();
tokio::spawn(report_task);
info!(
"Building test definitions for {} case(s)",
metadata_file.cases.len()
);
let mut test_definitions = Vec::new();
for (case_idx, case) in metadata_file.cases.iter().enumerate() {
info!("Building test definition for case {}", case_idx);
let test_def = build_test_definition(
metadata_file,
case,
case_idx,
platform,
node,
&context,
&reporter,
)
.await?;
if let Some(test_def) = test_def {
info!("Test definition for case {} created successfully", case_idx);
test_definitions.push(test_def);
}
}
info!("Executing {} test definition(s)", test_definitions.len());
for (idx, test_definition) in test_definitions.iter().enumerate() {
info!("─────────────────────────────────────────────────────────────────");
info!(
"Executing case {}/{}: case_idx={}, mode={}, steps={}",
idx + 1,
test_definitions.len(),
test_definition.case_idx,
test_definition.mode,
test_definition.case.steps.len()
);
info!("Creating driver for case {}", test_definition.case_idx);
let driver = revive_dt_core::differential_tests::Driver::new_root(
test_definition,
private_key_allocator.clone(),
&cached_compiler,
)
.await
.context("Failed to create driver")?;
info!(
"Running {} step(s) for case {}",
test_definition.case.steps.len(),
test_definition.case_idx
);
let steps_executed = driver.execute_all().await.context(format!(
"Failed to execute case {}",
test_definition.case_idx
))?;
info!(
"✓ Case {} completed successfully, executed {} step(s)",
test_definition.case_idx, steps_executed
);
}
info!("─────────────────────────────────────────────────────────────────");
info!(
"All {} test case(s) executed successfully",
test_definitions.len()
);
Ok(())
}
/// Build a test definition for a single test case
async fn build_test_definition<'a>(
metadata_file: &'a MetadataFile,
case: &'a revive_dt_format::case::Case,
case_idx: usize,
platform: &'a dyn Platform,
node: &'a dyn revive_dt_node_interaction::EthereumNode,
context: &revive_dt_config::Context,
reporter: &revive_dt_report::Reporter,
) -> anyhow::Result<Option<TestDefinition<'a>>> {
let mode = case
.modes
.as_ref()
.or(metadata_file.modes.as_ref())
.and_then(|modes| modes.first())
.and_then(|parsed_mode| parsed_mode.to_modes().next())
.map(Cow::Owned)
.or_else(|| revive_dt_compiler::Mode::all().next().map(Cow::Borrowed))
.unwrap();
let compiler = platform
.new_compiler(context.clone(), mode.version.clone().map(Into::into))
.await
.context("Failed to create compiler")?;
let test_reporter =
reporter.test_specific_reporter(Arc::new(revive_dt_report::TestSpecifier {
solc_mode: mode.as_ref().clone(),
metadata_file_path: metadata_file.metadata_file_path.clone(),
case_idx: CaseIdx::new(case_idx),
}));
let execution_reporter =
test_reporter.execution_specific_reporter(node.id(), platform.platform_identifier());
let mut platforms = BTreeMap::new();
platforms.insert(
platform.platform_identifier(),
TestPlatformInformation {
platform,
node,
compiler,
reporter: execution_reporter,
},
);
let test_definition = TestDefinition {
metadata: metadata_file,
metadata_file_path: &metadata_file.metadata_file_path,
mode,
case_idx: CaseIdx::new(case_idx),
case,
platforms,
reporter: test_reporter,
};
if let Err((reason, _)) = test_definition.check_compatibility() {
println!(" Skipping case {}: {}", case_idx, reason);
return Ok(None);
}
Ok(Some(test_definition))
}
/// Load cached passed tests from file
fn load_cached_passed(path: &Path) -> anyhow::Result<HashSet<String>> {
if !path.exists() {
return Ok(HashSet::new());
}
let file = File::open(path).context("Failed to open cached-passed file")?;
let reader = BufReader::new(file);
let mut cache = HashSet::new();
for line in reader.lines() {
let line = line?;
let trimmed = line.trim();
if !trimmed.is_empty() {
cache.insert(trimmed.to_string());
}
}
Ok(cache)
}
/// Save cached passed tests to file
fn save_cached_passed(path: &Path, cache: &HashSet<String>) -> anyhow::Result<()> {
let file = File::create(path).context("Failed to create cached-passed file")?;
let mut writer = BufWriter::new(file);
let mut entries: Vec<_> = cache.iter().collect();
entries.sort();
for entry in entries {
writeln!(writer, "{}", entry)?;
}
writer.flush()?;
Ok(())
}
+1
View File
@@ -15,6 +15,7 @@ revive-dt-format = { workspace = true }
alloy = { workspace = true }
anyhow = { workspace = true }
futures = { workspace = true }
[lints]
workspace = true
+54 -5
View File
@@ -1,24 +1,41 @@
//! This crate implements all node interactions.
use std::pin::Pin;
use std::sync::Arc;
use std::{pin::Pin, sync::Arc};
use alloy::primitives::{Address, StorageKey, TxHash, U256};
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace};
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest};
use alloy::{
primitives::{Address, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
rpc::types::{
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace},
},
};
use anyhow::Result;
use futures::Stream;
use revive_common::EVMVersion;
use revive_dt_format::traits::ResolverApi;
/// An interface for all interactions with Ethereum compatible nodes.
#[allow(clippy::type_complexity)]
pub trait EthereumNode {
/// A function to run post spawning the nodes and before any transactions are run on the node.
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>>;
fn id(&self) -> usize;
/// Returns the nodes connection string.
fn connection_string(&self) -> &str;
fn submit_transaction(
&self,
transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = Result<TxHash>> + '_>>;
fn get_receipt(
&self,
tx_hash: TxHash,
) -> Pin<Box<dyn Future<Output = Result<TransactionReceipt>> + '_>>;
/// Execute the [TransactionRequest] and return a [TransactionReceipt].
fn execute_transaction(
&self,
@@ -50,4 +67,36 @@ pub trait EthereumNode {
/// Returns the EVM version of the node.
fn evm_version(&self) -> EVMVersion;
/// Returns a stream of the blocks that were mined by the node.
fn subscribe_to_full_blocks_information(
&self,
) -> Pin<
Box<
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
+ '_,
>,
>;
/// Checks if the provided address is in the wallet. If it is, returns the address.
/// Otherwise, returns the default signer's address.
fn resolve_signer_or_default(&self, address: Address) -> Address;
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct MinedBlockInformation {
/// The block number.
pub block_number: BlockNumber,
/// The block timestamp.
pub block_timestamp: BlockTimestamp,
/// The amount of gas mined in the block.
pub mined_gas: u128,
/// The gas limit of the block.
pub block_gas_limit: u128,
/// The hashes of the transactions that were mined as part of the block.
pub transaction_hashes: Vec<TxHash>,
}
+3
View File
@@ -11,7 +11,9 @@ rust-version.workspace = true
[dependencies]
anyhow = { workspace = true }
alloy = { workspace = true }
futures = { workspace = true }
tracing = { workspace = true }
tower = { workspace = true }
tokio = { workspace = true }
revive-common = { workspace = true }
@@ -27,6 +29,7 @@ serde_yaml_ng = { workspace = true }
sp-core = { workspace = true }
sp-runtime = { workspace = true }
zombienet-sdk = { workspace = true }
[dev-dependencies]
temp-dir = { workspace = true }
+5
View File
@@ -1,5 +1,10 @@
use alloy::primitives::ChainId;
/// This constant defines how much Wei accounts are pre-seeded with in genesis.
///
/// Note: After changing this number, check that the tests for substrate work as we encountered
/// some issues with different values of the initial balance on substrate.
pub const INITIAL_BALANCE: u128 = 10u128.pow(37);
/// The chain id used for all of the chains spawned by the framework.
pub const CHAIN_ID: ChainId = 420420420;
+3
View File
@@ -0,0 +1,3 @@
mod process;
pub use process::*;
@@ -110,8 +110,11 @@ impl Process {
}
let check_result =
check_function(stdout_line.as_deref(), stderr_line.as_deref())
.context("Failed to wait for the process to be ready")?;
check_function(stdout_line.as_deref(), stderr_line.as_deref()).context(
format!(
"Failed to wait for the process to be ready - {stdout} - {stderr}"
),
)?;
if check_result {
break;
@@ -127,10 +130,10 @@ impl Process {
ProcessReadinessWaitBehavior::WaitForCommandToExit => {
if !child
.wait()
.context("Failed waiting for kurtosis run process to finish")?
.context("Failed waiting for process to finish")?
.success()
{
anyhow::bail!("Failed to initialize kurtosis network",);
anyhow::bail!("Failed to spawn command");
}
}
}
+3 -5
View File
@@ -3,12 +3,10 @@
use alloy::genesis::Genesis;
use revive_dt_node_interaction::EthereumNode;
pub mod common;
pub mod constants;
pub mod geth;
pub mod lighthouse_geth;
pub mod process;
pub mod substrate;
pub mod helpers;
pub mod node_implementations;
pub mod provider_utils;
/// An abstract interface for testing nodes.
pub trait Node: EthereumNode {
@@ -18,20 +18,26 @@ use alloy::{
eips::BlockNumberOrTag,
genesis::{Genesis, GenesisAccount},
network::{Ethereum, EthereumWallet, NetworkWallet},
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
primitives::{
Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, StorageKey, TxHash, U256,
},
providers::{
Provider, ProviderBuilder,
Provider,
ext::DebugApi,
fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller},
fillers::{CachedNonceManager, ChainIdFiller, NonceFiller},
},
rpc::types::{
EIP1186AccountProofResponse, TransactionRequest,
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
trace::geth::{
DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame,
},
},
};
use anyhow::Context as _;
use futures::{Stream, StreamExt};
use revive_common::EVMVersion;
use tracing::{Instrument, instrument};
use tokio::sync::OnceCell;
use tracing::{Instrument, error, instrument};
use revive_dt_common::{
fs::clear_directory,
@@ -39,13 +45,13 @@ use revive_dt_common::{
};
use revive_dt_config::*;
use revive_dt_format::traits::ResolverApi;
use revive_dt_node_interaction::EthereumNode;
use revive_dt_node_interaction::{EthereumNode, MinedBlockInformation};
use crate::{
Node,
common::FallbackGasFiller,
constants::INITIAL_BALANCE,
process::{Process, ProcessReadinessWaitBehavior},
constants::{CHAIN_ID, INITIAL_BALANCE},
helpers::{Process, ProcessReadinessWaitBehavior},
provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider},
};
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
@@ -70,7 +76,8 @@ pub struct GethNode {
start_timeout: Duration,
wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager,
chain_id_filler: ChainIdFiller,
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
chain_id: ChainId,
}
impl GethNode {
@@ -119,11 +126,122 @@ impl GethNode {
handle: None,
start_timeout: geth_configuration.start_timeout_ms,
wallet: wallet.clone(),
chain_id_filler: Default::default(),
nonce_manager: Default::default(),
provider: Default::default(),
chain_id: CHAIN_ID,
}
}
pub async fn new_existing(private_key: &str, rpc_port: u16) -> anyhow::Result<Self> {
use alloy::{
primitives::FixedBytes,
providers::{Provider, ProviderBuilder},
signers::local::PrivateKeySigner,
};
let key_str = private_key
.trim()
.strip_prefix("0x")
.unwrap_or(private_key.trim());
let key_bytes = alloy::hex::decode(key_str)
.map_err(|e| anyhow::anyhow!("Failed to decode private key hex: {}", e))?;
if key_bytes.len() != 32 {
anyhow::bail!(
"Private key must be 32 bytes (64 hex characters), got {}",
key_bytes.len()
);
}
let mut bytes = [0u8; 32];
bytes.copy_from_slice(&key_bytes);
let signer = PrivateKeySigner::from_bytes(&FixedBytes(bytes))
.map_err(|e| anyhow::anyhow!("Failed to create signer from private key: {}", e))?;
let address = signer.address();
let wallet = Arc::new(EthereumWallet::new(signer));
let connection_string = format!("http://localhost:{}", rpc_port);
let chain_id = ProviderBuilder::new()
.connect_http(connection_string.parse()?)
.get_chain_id()
.await
.context("Failed to query chain ID from RPC")?;
let node = Self {
connection_string: format!("http://localhost:{}", rpc_port),
base_directory: PathBuf::new(),
data_directory: PathBuf::new(),
logs_directory: PathBuf::new(),
geth: PathBuf::new(),
id: 0,
chain_id,
handle: None,
start_timeout: Duration::from_secs(0),
wallet,
nonce_manager: Default::default(),
provider: Default::default(),
};
// Check balance and fund if needed
node.ensure_funded(address).await?;
Ok(node)
}
/// Ensure that the given address has at least 1000 ETH, funding it from the node's managed
/// account if necessary.
async fn ensure_funded(&self, address: Address) -> anyhow::Result<()> {
use alloy::{
primitives::utils::{format_ether, parse_ether},
providers::{Provider, ProviderBuilder},
};
let provider = ProviderBuilder::new().connect_http(self.connection_string.parse()?);
let balance = provider.get_balance(address).await?;
let min_balance = parse_ether("1000")?;
if balance >= min_balance {
tracing::info!(
"Wallet {} already has sufficient balance: {} ETH",
address,
format_ether(balance)
);
return Ok(());
}
tracing::info!(
"Funding wallet {} (current: {} ETH, target: 1000 ETH)",
address,
format_ether(balance)
);
// Get the node's managed account
let accounts = provider.get_accounts().await?;
if accounts.is_empty() {
anyhow::bail!("No managed accounts available on the node to fund wallet");
}
let from_account = accounts[0];
let funding_amount = min_balance - balance;
let tx = TransactionRequest::default()
.from(from_account)
.to(address)
.value(funding_amount);
provider
.send_transaction(tx)
.await?
.get_receipt()
.await
.context("Failed to get receipt for funding transaction")?;
tracing::info!("Successfully funded wallet {}", address);
Ok(())
}
/// Create the node directory and call `geth init` to configure the genesis.
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
@@ -235,7 +353,7 @@ impl GethNode {
match process {
Ok(process) => self.handle = Some(process),
Err(err) => {
tracing::error!(?err, "Failed to start geth, shutting down gracefully");
error!(?err, "Failed to start geth, shutting down gracefully");
self.shutdown()
.context("Failed to gracefully shutdown after geth start error")?;
return Err(err);
@@ -245,27 +363,29 @@ impl GethNode {
Ok(self)
}
async fn provider(
&self,
) -> anyhow::Result<FillProvider<impl TxFiller<Ethereum>, impl Provider<Ethereum>, Ethereum>>
{
ProviderBuilder::new()
.disable_recommended_fillers()
.filler(FallbackGasFiller::new(
25_000_000,
1_000_000_000,
1_000_000_000,
))
.filler(self.chain_id_filler.clone())
.filler(NonceFiller::new(self.nonce_manager.clone()))
.wallet(self.wallet.clone())
.connect(&self.connection_string)
async fn provider(&self) -> anyhow::Result<ConcreteProvider<Ethereum, Arc<EthereumWallet>>> {
self.provider
.get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>(
self.connection_string.as_str(),
FallbackGasFiller::default(),
ChainIdFiller::new(Some(self.chain_id)),
NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(),
)
.await
.context("Failed to construct the provider")
})
.await
.map_err(Into::into)
.cloned()
}
}
impl EthereumNode for GethNode {
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>> {
Box::pin(async move { Ok(()) })
}
fn id(&self) -> usize {
self.id as _
}
@@ -274,6 +394,50 @@ impl EthereumNode for GethNode {
&self.connection_string
}
#[instrument(
level = "info",
skip_all,
fields(geth_node_id = self.id, connection_string = self.connection_string),
err,
)]
fn submit_transaction(
&self,
transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TxHash>> + '_>> {
Box::pin(async move {
let provider = self
.provider()
.await
.context("Failed to create the provider for transaction submission")?;
let pending_transaction = provider
.send_transaction(transaction)
.await
.context("Failed to submit the transaction through the provider")?;
Ok(*pending_transaction.tx_hash())
})
}
#[instrument(
level = "info",
skip_all,
fields(geth_node_id = self.id, connection_string = self.connection_string),
err,
)]
fn get_receipt(
&self,
tx_hash: TxHash,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move {
self.provider()
.await
.context("Failed to create provider for getting the receipt")?
.get_transaction_receipt(tx_hash)
.await
.context("Failed to get the receipt of the transaction")?
.context("Failed to get the receipt of the transaction")
})
}
#[instrument(
level = "info",
skip_all,
@@ -283,8 +447,7 @@ impl EthereumNode for GethNode {
fn execute_transaction(
&self,
transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::rpc::types::TransactionReceipt>> + '_>>
{
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move {
let provider = self
.provider()
@@ -292,32 +455,33 @@ impl EthereumNode for GethNode {
.context("Failed to create provider for transaction submission")?;
let pending_transaction = provider
.send_transaction(transaction)
.await
.inspect_err(
|err| tracing::error!(%err, "Encountered an error when submitting the transaction"),
)
.context("Failed to submit transaction to geth node")?;
.send_transaction(transaction)
.await
.inspect_err(
|err| error!(%err, "Encountered an error when submitting the transaction"),
)
.context("Failed to submit transaction to geth node")?;
let transaction_hash = *pending_transaction.tx_hash();
// The following is a fix for the "transaction indexing is in progress" error that we used
// to get. You can find more information on this in the following GH issue in geth
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
// The following is a fix for the "transaction indexing is in progress" error that we
// used to get. You can find more information on this in the following GH issue in
// geth https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
// before we can get the receipt of the transaction it needs to have been indexed by the
// node's indexer. Just because the transaction has been confirmed it doesn't mean that it
// has been indexed. When we call alloy's `get_receipt` it checks if the transaction was
// confirmed. If it has been, then it will call `eth_getTransactionReceipt` method which
// _might_ return the above error if the tx has not yet been indexed yet. So, we need to
// implement a retry mechanism for the receipt to keep retrying to get it until it
// eventually works, but we only do that if the error we get back is the "transaction
// indexing is in progress" error or if the receipt is None.
// node's indexer. Just because the transaction has been confirmed it doesn't mean that
// it has been indexed. When we call alloy's `get_receipt` it checks if the
// transaction was confirmed. If it has been, then it will call
// `eth_getTransactionReceipt` method which _might_ return the above error if the tx
// has not yet been indexed yet. So, we need to implement a retry mechanism for the
// receipt to keep retrying to get it until it eventually works, but we only do that
// if the error we get back is the "transaction indexing is in progress" error or if
// the receipt is None.
//
// Getting the transaction indexed and taking a receipt can take a long time especially when
// a lot of transactions are being submitted to the node. Thus, while initially we only
// allowed for 60 seconds of waiting with a 1 second delay in polling, we need to allow for
// a larger wait time. Therefore, in here we allow for 5 minutes of waiting with exponential
// backoff each time we attempt to get the receipt and find that it's not available.
let provider = Arc::new(provider);
// Getting the transaction indexed and taking a receipt can take a long time especially
// when a lot of transactions are being submitted to the node. Thus, while initially
// we only allowed for 60 seconds of waiting with a 1 second delay in polling, we
// need to allow for a larger wait time. Therefore, in here we allow for 5 minutes of
// waiting with exponential backoff each time we attempt to get the receipt and find
// that it's not available.
poll(
Self::RECEIPT_POLLING_DURATION,
PollingWaitBehavior::Constant(Duration::from_millis(200)),
@@ -351,14 +515,12 @@ impl EthereumNode for GethNode {
&self,
tx_hash: TxHash,
trace_options: GethDebugTracingOptions,
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::rpc::types::trace::geth::GethTrace>> + '_>>
{
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
Box::pin(async move {
let provider = Arc::new(
self.provider()
.await
.context("Failed to create provider for tracing")?,
);
let provider = self
.provider()
.await
.context("Failed to create provider for tracing")?;
poll(
Self::TRACE_POLLING_DURATION,
PollingWaitBehavior::Constant(Duration::from_millis(200)),
@@ -456,14 +618,64 @@ impl EthereumNode for GethNode {
fn evm_version(&self) -> EVMVersion {
EVMVersion::Cancun
}
fn subscribe_to_full_blocks_information(
&self,
) -> Pin<
Box<
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
+ '_,
>,
> {
Box::pin(async move {
let provider = self
.provider()
.await
.context("Failed to create the provider for block subscription")?;
let block_subscription = provider.subscribe_full_blocks();
let block_stream = block_subscription
.into_stream()
.await
.context("Failed to create the block stream")?;
let mined_block_information_stream = block_stream.filter_map(|block| async {
let block = block.ok()?;
Some(MinedBlockInformation {
block_number: block.number(),
block_timestamp: block.header.timestamp,
mined_gas: block.header.gas_used as _,
block_gas_limit: block.header.gas_limit as _,
transaction_hashes: block
.transactions
.into_hashes()
.as_hashes()
.expect("Must be hashes")
.to_vec(),
})
});
Ok(Box::pin(mined_block_information_stream)
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
})
}
fn resolve_signer_or_default(&self, address: Address) -> Address {
let signer_addresses: Vec<_> =
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet).collect();
if signer_addresses.contains(&address) {
address
} else {
self.wallet.default_signer().address()
}
}
}
pub struct GethNodeResolver<F: TxFiller<Ethereum>, P: Provider<Ethereum>> {
pub struct GethNodeResolver {
id: u32,
provider: FillProvider<F, P, Ethereum>,
provider: ConcreteProvider<Ethereum, Arc<EthereumWallet>>,
}
impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi for GethNodeResolver<F, P> {
impl ResolverApi for GethNodeResolver {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn chain_id(
&self,
@@ -648,12 +860,38 @@ mod tests {
(context, node)
}
fn shared_state() -> &'static (TestExecutionContext, GethNode) {
static STATE: LazyLock<(TestExecutionContext, GethNode)> = LazyLock::new(new_node);
&STATE
}
fn shared_node() -> &'static GethNode {
static NODE: LazyLock<(TestExecutionContext, GethNode)> = LazyLock::new(new_node);
&NODE.1
&shared_state().1
}
#[tokio::test]
async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
// Arrange
let (context, node) = shared_state();
let account_address = context
.wallet_configuration
.wallet()
.default_signer()
.address();
let transaction = TransactionRequest::default()
.to(account_address)
.value(U256::from(100_000_000_000_000u128));
// Act
let receipt = node.execute_transaction(transaction).await;
// Assert
let _ = receipt.expect("Failed to get the receipt for the transfer");
}
#[test]
#[ignore = "Ignored since they take a long time to run"]
fn version_works() {
// Arrange
let node = shared_node();
@@ -670,6 +908,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_chain_id_from_node() {
// Arrange
let node = shared_node();
@@ -683,6 +922,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_gas_limit_from_node() {
// Arrange
let node = shared_node();
@@ -700,6 +940,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_coinbase_from_node() {
// Arrange
let node = shared_node();
@@ -717,6 +958,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_block_difficulty_from_node() {
// Arrange
let node = shared_node();
@@ -734,6 +976,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_block_hash_from_node() {
// Arrange
let node = shared_node();
@@ -751,6 +994,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_block_timestamp_from_node() {
// Arrange
let node = shared_node();
@@ -768,6 +1012,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_block_number_from_node() {
// Arrange
let node = shared_node();
@@ -9,7 +9,7 @@
//! that the tool has.
use std::{
collections::BTreeMap,
collections::{BTreeMap, HashSet},
fs::{File, create_dir_all},
io::Read,
ops::ControlFlow,
@@ -31,20 +31,24 @@ use alloy::{
Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256, address,
},
providers::{
Provider, ProviderBuilder,
Provider,
ext::DebugApi,
fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller},
},
rpc::types::{
EIP1186AccountProofResponse, TransactionRequest,
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
trace::geth::{
DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame,
},
},
};
use anyhow::Context as _;
use futures::{Stream, StreamExt};
use revive_common::EVMVersion;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_with::serde_as;
use tracing::{Instrument, instrument};
use tokio::sync::OnceCell;
use tracing::{Instrument, info, instrument};
use revive_dt_common::{
fs::clear_directory,
@@ -52,13 +56,13 @@ use revive_dt_common::{
};
use revive_dt_config::*;
use revive_dt_format::traits::ResolverApi;
use revive_dt_node_interaction::EthereumNode;
use revive_dt_node_interaction::{EthereumNode, MinedBlockInformation};
use crate::{
Node,
common::FallbackGasFiller,
constants::INITIAL_BALANCE,
process::{Process, ProcessReadinessWaitBehavior},
constants::{CHAIN_ID, INITIAL_BALANCE},
helpers::{Process, ProcessReadinessWaitBehavior},
provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider},
};
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
@@ -75,7 +79,8 @@ static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
pub struct LighthouseGethNode {
/* Node Identifier */
id: u32,
connection_string: String,
ws_connection_string: String,
http_connection_string: String,
enclave_name: String,
/* Directory Paths */
@@ -91,17 +96,21 @@ pub struct LighthouseGethNode {
/* Spawned Processes */
process: Option<Process>,
/* Prefunded Account Information */
prefunded_account_address: Address,
/* Provider Related Fields */
wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager,
chain_id_filler: ChainIdFiller,
persistent_http_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
persistent_ws_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
}
impl LighthouseGethNode {
const BASE_DIRECTORY: &str = "lighthouse";
const LOGS_DIRECTORY: &str = "logs";
const IPC_FILE_NAME: &str = "geth.ipc";
const CONFIG_FILE_NAME: &str = "config.yaml";
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
@@ -134,10 +143,8 @@ impl LighthouseGethNode {
Self {
/* Node Identifier */
id,
connection_string: base_directory
.join(Self::IPC_FILE_NAME)
.display()
.to_string(),
ws_connection_string: String::default(),
http_connection_string: String::default(),
enclave_name: format!(
"enclave-{}-{}",
SystemTime::now()
@@ -160,15 +167,19 @@ impl LighthouseGethNode {
/* Spawned Processes */
process: None,
/* Prefunded Account Information */
prefunded_account_address: wallet.default_signer().address(),
/* Provider Related Fields */
wallet: wallet.clone(),
nonce_manager: Default::default(),
chain_id_filler: Default::default(),
persistent_http_provider: OnceCell::const_new(),
persistent_ws_provider: OnceCell::const_new(),
}
}
/// Create the node directory and call `geth init` to configure the genesis.
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn init(&mut self, _: Genesis) -> anyhow::Result<&mut Self> {
self.init_directories()
.context("Failed to initialize the directories of the Lighthouse Geth node.")?;
@@ -198,10 +209,12 @@ impl LighthouseGethNode {
execution_layer_extra_parameters: vec![
"--nodiscover".to_string(),
"--cache=4096".to_string(),
"--txpool.globalslots=100000".to_string(),
"--txpool.globalqueue=100000".to_string(),
"--txpool.accountslots=128".to_string(),
"--txpool.accountqueue=1024".to_string(),
"--txlookuplimit=0".to_string(),
"--gcmode=archive".to_string(),
"--txpool.globalslots=500000".to_string(),
"--txpool.globalqueue=500000".to_string(),
"--txpool.accountslots=32768".to_string(),
"--txpool.accountqueue=32768".to_string(),
"--http.api=admin,engine,net,eth,web3,debug,txpool".to_string(),
"--http.addr=0.0.0.0".to_string(),
"--ws".to_string(),
@@ -211,13 +224,14 @@ impl LighthouseGethNode {
"--ws.origins=*".to_string(),
],
consensus_layer_extra_parameters: vec![
"--disable-quic".to_string(),
"--disable-deposit-contract-sync".to_string(),
],
}],
network_parameters: NetworkParameters {
preset: NetworkPreset::Mainnet,
seconds_per_slot: 12,
network_id: 420420420,
network_id: CHAIN_ID,
deposit_contract_address: address!("0x00000000219ab540356cBB839Cbe05303d7705Fa"),
altair_fork_epoch: 0,
bellatrix_fork_epoch: 0,
@@ -228,14 +242,8 @@ impl LighthouseGethNode {
num_validator_keys_per_node: 64,
genesis_delay: 10,
prefunded_accounts: {
let map = NetworkWallet::<Ethereum>::signer_addresses(&self.wallet)
.map(|address| {
(
address,
GenesisAccount::default()
.with_balance(INITIAL_BALANCE.try_into().unwrap()),
)
})
let map = std::iter::once(self.prefunded_account_address)
.map(|address| (address, GenesisAccount::default().with_balance(U256::MAX)))
.collect::<BTreeMap<_, _>>();
serde_json::to_string(&map).unwrap()
},
@@ -248,7 +256,12 @@ impl LighthouseGethNode {
public_port_start: Some(32000 + self.id as u16 * 1000),
},
),
consensus_layer_port_publisher_parameters: Default::default(),
consensus_layer_port_publisher_parameters: Some(
PortPublisherSingleItemParameters {
enabled: Some(true),
public_port_start: Some(59010 + self.id as u16 * 50),
},
),
}),
};
@@ -261,7 +274,7 @@ impl LighthouseGethNode {
}
/// Spawn the go-ethereum node child process.
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn spawn_process(&mut self) -> anyhow::Result<&mut Self> {
let process = Process::new(
None,
@@ -292,6 +305,7 @@ impl LighthouseGethNode {
}),
},
)
.context("Failed to spawn the kurtosis enclave")
.inspect_err(|err| {
tracing::error!(?err, "Failed to spawn Kurtosis");
self.shutdown().expect("Failed to shutdown kurtosis");
@@ -316,64 +330,157 @@ impl LighthouseGethNode {
stdout
};
self.connection_string = stdout
self.http_connection_string = stdout
.split("el-1-geth-lighthouse")
.nth(1)
.and_then(|str| str.split(" rpc").nth(1))
.and_then(|str| str.split("->").nth(1))
.and_then(|str| str.split("\n").next())
.and_then(|str| str.trim().split(" ").next())
.map(|str| format!("http://{}", str.trim()))
.context("Failed to find the HTTP connection string of Kurtosis")?;
self.ws_connection_string = stdout
.split("el-1-geth-lighthouse")
.nth(1)
.and_then(|str| str.split("ws").nth(1))
.and_then(|str| str.split("->").nth(1))
.and_then(|str| str.split("\n").next())
.and_then(|str| str.trim().split(" ").next())
.map(|str| format!("ws://{}", str.trim()))
.context("Failed to find the WS connection string of Kurtosis")?;
info!(
http_connection_string = self.http_connection_string,
ws_connection_string = self.ws_connection_string,
"Discovered the connection strings for the node"
);
Ok(self)
}
async fn provider(
&self,
) -> anyhow::Result<FillProvider<impl TxFiller<Ethereum>, impl Provider<Ethereum>, Ethereum>>
{
ProviderBuilder::new()
.disable_recommended_fillers()
.filler(FallbackGasFiller::new(
25_000_000,
1_000_000_000,
1_000_000_000,
))
.filler(self.chain_id_filler.clone())
.filler(NonceFiller::new(self.nonce_manager.clone()))
.wallet(self.wallet.clone())
.connect(&self.connection_string)
.await
.context("Failed to create the provider for Kurtosis")
}
}
impl EthereumNode for LighthouseGethNode {
fn id(&self) -> usize {
self.id as _
}
fn connection_string(&self) -> &str {
&self.connection_string
}
#[instrument(
level = "info",
skip_all,
fields(geth_node_id = self.id, connection_string = self.connection_string),
err,
fields(lighthouse_node_id = self.id, connection_string = self.ws_connection_string),
err(Debug),
)]
fn execute_transaction(
&self,
transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::rpc::types::TransactionReceipt>> + '_>>
{
Box::pin(async move {
let provider = self
.provider()
#[allow(clippy::type_complexity)]
async fn ws_provider(&self) -> anyhow::Result<ConcreteProvider<Ethereum, Arc<EthereumWallet>>> {
self.persistent_ws_provider
.get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>(
self.ws_connection_string.as_str(),
FallbackGasFiller::default(),
ChainIdFiller::new(Some(CHAIN_ID)),
NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(),
)
.await
.context("Failed to create provider for transaction submission")?;
.context("Failed to construct the provider")
})
.await
.cloned()
}
#[instrument(
level = "info",
skip_all,
fields(lighthouse_node_id = self.id, connection_string = self.ws_connection_string),
err(Debug),
)]
#[allow(clippy::type_complexity)]
async fn http_provider(
&self,
) -> anyhow::Result<ConcreteProvider<Ethereum, Arc<EthereumWallet>>> {
self.persistent_http_provider
.get_or_try_init(|| async move {
construct_concurrency_limited_provider::<Ethereum, _>(
self.http_connection_string.as_str(),
FallbackGasFiller::default(),
ChainIdFiller::new(Some(CHAIN_ID)),
NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(),
)
.await
.context("Failed to construct the provider")
})
.await
.cloned()
}
/// Funds all of the accounts in the Ethereum wallet from the initially funded account.
#[instrument(
level = "info",
skip_all,
fields(lighthouse_node_id = self.id, connection_string = self.ws_connection_string),
err(Debug),
)]
async fn fund_all_accounts(&self) -> anyhow::Result<()> {
let mut full_block_subscriber = self
.ws_provider()
.await
.context("Failed to create the WS provider")?
.subscribe_full_blocks()
.into_stream()
.await
.context("Full block subscriber")?;
let mut tx_hashes = futures::future::try_join_all(
NetworkWallet::<Ethereum>::signer_addresses(self.wallet.as_ref())
.enumerate()
.map(|(nonce, address)| async move {
let mut transaction = TransactionRequest::default()
.from(self.prefunded_account_address)
.to(address)
.nonce(nonce as _)
.value(INITIAL_BALANCE.try_into().unwrap());
transaction.chain_id = Some(CHAIN_ID);
self.submit_transaction(transaction).await
}),
)
.await
.context("Failed to submit all transactions")?
.into_iter()
.collect::<HashSet<_>>();
while let Some(block) = full_block_subscriber.next().await {
let Ok(block) = block else {
continue;
};
let block_number = block.number();
let block_timestamp = block.header.timestamp;
let block_transaction_count = block.transactions.len();
for hash in block.transactions.into_hashes().as_hashes().unwrap() {
tx_hashes.remove(hash);
}
info!(
block.number = block_number,
block.timestamp = block_timestamp,
block.transaction_count = block_transaction_count,
remaining_transactions = tx_hashes.len(),
"Discovered new block when funding accounts"
);
if tx_hashes.is_empty() {
break;
}
}
Ok(())
}
fn internal_execute_transaction<'a>(
transaction: TransactionRequest,
provider: FillProvider<
impl TxFiller<Ethereum> + 'a,
impl Provider<Ethereum> + Clone + 'a,
Ethereum,
>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + 'a>> {
Box::pin(async move {
let pending_transaction = provider
.send_transaction(transaction)
.await
@@ -404,10 +511,9 @@ impl EthereumNode for LighthouseGethNode {
// allow for a larger wait time. Therefore, in here we allow for 5 minutes of waiting
// with exponential backoff each time we attempt to get the receipt and find that it's
// not available.
let provider = Arc::new(provider);
poll(
Self::RECEIPT_POLLING_DURATION,
PollingWaitBehavior::Constant(Duration::from_millis(200)),
PollingWaitBehavior::Constant(Duration::from_millis(500)),
move || {
let provider = provider.clone();
async move {
@@ -432,17 +538,92 @@ impl EthereumNode for LighthouseGethNode {
.await
})
}
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
impl EthereumNode for LighthouseGethNode {
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>> {
Box::pin(async move { self.fund_all_accounts().await })
}
fn id(&self) -> usize {
self.id as _
}
fn connection_string(&self) -> &str {
&self.ws_connection_string
}
#[instrument(
level = "info",
skip_all,
fields(lighthouse_node_id = self.id, connection_string = self.ws_connection_string),
err,
)]
fn submit_transaction(
&self,
transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TxHash>> + '_>> {
Box::pin(async move {
let provider = self
.http_provider()
.await
.context("Failed to create the provider for transaction submission")?;
let pending_transaction = provider
.send_transaction(transaction)
.await
.context("Failed to submit the transaction through the provider")?;
Ok(*pending_transaction.tx_hash())
})
}
#[instrument(
level = "info",
skip_all,
fields(lighthouse_node_id = self.id, connection_string = self.ws_connection_string),
)]
fn get_receipt(
&self,
tx_hash: TxHash,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move {
self.ws_provider()
.await
.context("Failed to create provider for getting the receipt")?
.get_transaction_receipt(tx_hash)
.await
.context("Failed to get the receipt of the transaction")?
.context("Failed to get the receipt of the transaction")
})
}
#[instrument(
level = "info",
skip_all,
fields(lighthouse_node_id = self.id, connection_string = self.ws_connection_string),
err,
)]
fn execute_transaction(
&self,
transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move {
let provider = self
.http_provider()
.await
.context("Failed to create provider for transaction execution")?;
Self::internal_execute_transaction(transaction, provider).await
})
}
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn trace_transaction(
&self,
tx_hash: TxHash,
trace_options: GethDebugTracingOptions,
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::rpc::types::trace::geth::GethTrace>> + '_>>
{
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
Box::pin(async move {
let provider = Arc::new(
self.provider()
self.http_provider()
.await
.context("Failed to create provider for tracing")?,
);
@@ -473,7 +654,7 @@ impl EthereumNode for LighthouseGethNode {
})
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn state_diff(
&self,
tx_hash: TxHash,
@@ -497,13 +678,13 @@ impl EthereumNode for LighthouseGethNode {
})
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn balance_of(
&self,
address: Address,
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
Box::pin(async move {
self.provider()
self.ws_provider()
.await
.context("Failed to get the Geth provider")?
.get_balance(address)
@@ -512,14 +693,14 @@ impl EthereumNode for LighthouseGethNode {
})
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn latest_state_proof(
&self,
address: Address,
keys: Vec<StorageKey>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<EIP1186AccountProofResponse>> + '_>> {
Box::pin(async move {
self.provider()
self.ws_provider()
.await
.context("Failed to get the Geth provider")?
.get_proof(address, keys)
@@ -529,13 +710,13 @@ impl EthereumNode for LighthouseGethNode {
})
}
// #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn resolver(
&self,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Arc<dyn ResolverApi + '_>>> + '_>> {
Box::pin(async move {
let id = self.id;
let provider = self.provider().await?;
let provider = self.ws_provider().await?;
Ok(Arc::new(LighthouseGethNodeResolver { id, provider }) as Arc<dyn ResolverApi>)
})
}
@@ -543,6 +724,53 @@ impl EthereumNode for LighthouseGethNode {
fn evm_version(&self) -> EVMVersion {
EVMVersion::Cancun
}
fn subscribe_to_full_blocks_information(
&self,
) -> Pin<
Box<
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
+ '_,
>,
> {
Box::pin(async move {
let provider = self.ws_provider().await?;
let block_subscription = provider.subscribe_full_blocks().channel_size(1024);
let block_stream = block_subscription
.into_stream()
.await
.context("Failed to create the block stream")?;
let mined_block_information_stream = block_stream.filter_map(|block| async {
let block = block.ok()?;
Some(MinedBlockInformation {
block_number: block.number(),
block_timestamp: block.header.timestamp,
mined_gas: block.header.gas_used as _,
block_gas_limit: block.header.gas_limit as _,
transaction_hashes: block
.transactions
.into_hashes()
.as_hashes()
.expect("Must be hashes")
.to_vec(),
})
});
Ok(Box::pin(mined_block_information_stream)
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
})
}
fn resolve_signer_or_default(&self, address: Address) -> Address {
let signer_addresses: Vec<_> =
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet).collect();
if signer_addresses.contains(&address) {
address
} else {
self.wallet.default_signer().address()
}
}
}
pub struct LighthouseGethNodeResolver<F: TxFiller<Ethereum>, P: Provider<Ethereum>> {
@@ -553,14 +781,14 @@ pub struct LighthouseGethNodeResolver<F: TxFiller<Ethereum>, P: Provider<Ethereu
impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi
for LighthouseGethNodeResolver<F, P>
{
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn chain_id(
&self,
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::primitives::ChainId>> + '_>> {
Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) })
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn transaction_gas_price(
&self,
tx_hash: TxHash,
@@ -574,7 +802,7 @@ impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi
})
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn block_gas_limit(
&self,
number: BlockNumberOrTag,
@@ -589,7 +817,7 @@ impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi
})
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn block_coinbase(
&self,
number: BlockNumberOrTag,
@@ -604,7 +832,7 @@ impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi
})
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn block_difficulty(
&self,
number: BlockNumberOrTag,
@@ -619,7 +847,7 @@ impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi
})
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn block_base_fee(
&self,
number: BlockNumberOrTag,
@@ -639,7 +867,7 @@ impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi
})
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn block_hash(
&self,
number: BlockNumberOrTag,
@@ -654,7 +882,7 @@ impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi
})
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn block_timestamp(
&self,
number: BlockNumberOrTag,
@@ -669,29 +897,55 @@ impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi
})
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn last_block_number(&self) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockNumber>> + '_>> {
Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) })
}
}
impl Node for LighthouseGethNode {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn shutdown(&mut self) -> anyhow::Result<()> {
if !Command::new(self.kurtosis_binary_path.as_path())
let mut child = Command::new(self.kurtosis_binary_path.as_path())
.arg("enclave")
.arg("rm")
.arg("-f")
.arg(self.enclave_name.as_str())
.stdout(Stdio::null())
.stderr(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.expect("Failed to spawn the enclave kill command")
.expect("Failed to spawn the enclave kill command");
if !child
.wait()
.expect("Failed to wait for the enclave kill command")
.success()
{
panic!("Failed to shut down the enclave {}", self.enclave_name)
let stdout = {
let mut stdout = String::default();
child
.stdout
.take()
.expect("Should be piped")
.read_to_string(&mut stdout)
.context("Failed to read stdout of kurtosis inspect to string")?;
stdout
};
let stderr = {
let mut stderr = String::default();
child
.stderr
.take()
.expect("Should be piped")
.read_to_string(&mut stderr)
.context("Failed to read stderr of kurtosis inspect to string")?;
stderr
};
panic!(
"Failed to shut down the enclave {} - stdout: {stdout}, stderr: {stderr}",
self.enclave_name
)
}
drop(self.process.take());
@@ -699,13 +953,13 @@ impl Node for LighthouseGethNode {
Ok(())
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
self.init(genesis)?.spawn_process()?;
Ok(())
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn version(&self) -> anyhow::Result<String> {
let output = Command::new(&self.kurtosis_binary_path)
.arg("version")
@@ -722,7 +976,7 @@ impl Node for LighthouseGethNode {
}
impl Drop for LighthouseGethNode {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
fn drop(&mut self) {
self.shutdown().expect("Failed to shutdown")
}
@@ -853,7 +1107,9 @@ mod tests {
use super::*;
fn test_config() -> TestExecutionContext {
TestExecutionContext::default()
let mut config = TestExecutionContext::default();
config.wallet_configuration.additional_keys = 100;
config
}
fn new_node() -> (TestExecutionContext, LighthouseGethNode) {
@@ -888,6 +1144,7 @@ mod tests {
async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
// Arrange
let (context, node) = new_node();
node.fund_all_accounts().await.expect("Failed");
let account_address = context
.wallet_configuration
@@ -906,6 +1163,7 @@ mod tests {
}
#[test]
#[ignore = "Ignored since they take a long time to run"]
fn version_works() {
// Arrange
let (_context, node) = new_node();
@@ -922,6 +1180,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_chain_id_from_node() {
// Arrange
let (_context, node) = new_node();
@@ -935,6 +1194,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_gas_limit_from_node() {
// Arrange
let (_context, node) = new_node();
@@ -952,6 +1212,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_coinbase_from_node() {
// Arrange
let (_context, node) = new_node();
@@ -969,6 +1230,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_block_difficulty_from_node() {
// Arrange
let (_context, node) = new_node();
@@ -986,6 +1248,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_block_hash_from_node() {
// Arrange
let (_context, node) = new_node();
@@ -1003,6 +1266,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_block_timestamp_from_node() {
// Arrange
let (_context, node) = new_node();
@@ -1020,6 +1284,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_block_number_from_node() {
// Arrange
let (_context, node) = new_node();
@@ -0,0 +1,4 @@
pub mod geth;
pub mod lighthouse_geth;
pub mod substrate;
pub mod zombienet;
@@ -23,17 +23,20 @@ use alloy::{
TxHash, U256,
},
providers::{
Provider, ProviderBuilder,
Provider,
ext::DebugApi,
fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller},
fillers::{CachedNonceManager, ChainIdFiller, NonceFiller},
},
rpc::types::{
EIP1186AccountProofResponse, TransactionReceipt,
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
eth::{Block, Header, Transaction},
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
trace::geth::{
DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame,
},
},
};
use anyhow::Context as _;
use futures::{Stream, StreamExt};
use revive_common::EVMVersion;
use revive_dt_common::fs::clear_directory;
use revive_dt_format::traits::ResolverApi;
@@ -43,14 +46,18 @@ use sp_core::crypto::Ss58Codec;
use sp_runtime::AccountId32;
use revive_dt_config::*;
use revive_dt_node_interaction::EthereumNode;
use revive_dt_node_interaction::{EthereumNode, MinedBlockInformation};
use tokio::sync::OnceCell;
use tracing::instrument;
use crate::{
Node,
common::FallbackGasFiller,
constants::INITIAL_BALANCE,
process::{Process, ProcessReadinessWaitBehavior},
constants::{CHAIN_ID, INITIAL_BALANCE},
helpers::{Process, ProcessReadinessWaitBehavior},
provider_utils::{
ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider,
execute_transaction,
},
};
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
@@ -59,6 +66,7 @@ static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
/// or the revive-dev-node which is done by changing the path and some of the other arguments passed
/// to the command.
#[derive(Debug)]
pub struct SubstrateNode {
id: u32,
node_binary: PathBuf,
@@ -71,11 +79,12 @@ pub struct SubstrateNode {
eth_proxy_process: Option<Process>,
wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager,
chain_id_filler: ChainIdFiller,
provider: OnceCell<ConcreteProvider<ReviveNetwork, Arc<EthereumWallet>>>,
chain_id: alloy::primitives::ChainId,
}
impl SubstrateNode {
const BASE_DIRECTORY: &str = "Substrate";
const BASE_DIRECTORY: &str = "substrate";
const LOGS_DIRECTORY: &str = "logs";
const DATA_DIRECTORY: &str = "chains";
@@ -121,11 +130,66 @@ impl SubstrateNode {
substrate_process: None,
eth_proxy_process: None,
wallet: wallet.clone(),
chain_id_filler: Default::default(),
nonce_manager: Default::default(),
provider: Default::default(),
chain_id: CHAIN_ID,
}
}
pub async fn new_existing(private_key: &str, rpc_port: u16) -> anyhow::Result<Self> {
use alloy::{
primitives::FixedBytes,
providers::{Provider, ProviderBuilder},
signers::local::PrivateKeySigner,
};
let key_str = private_key
.trim()
.strip_prefix("0x")
.unwrap_or(private_key.trim());
let key_bytes = alloy::hex::decode(key_str)
.map_err(|e| anyhow::anyhow!("Failed to decode private key hex: {}", e))?;
if key_bytes.len() != 32 {
anyhow::bail!(
"Private key must be 32 bytes (64 hex characters), got {}",
key_bytes.len()
);
}
let mut bytes = [0u8; 32];
bytes.copy_from_slice(&key_bytes);
let signer = PrivateKeySigner::from_bytes(&FixedBytes(bytes))
.map_err(|e| anyhow::anyhow!("Failed to create signer from private key: {}", e))?;
let wallet = Arc::new(EthereumWallet::new(signer));
let rpc_url = format!("http://localhost:{}", rpc_port);
// Query the chain ID from the RPC
let chain_id = ProviderBuilder::new()
.connect_http(rpc_url.parse()?)
.get_chain_id()
.await
.context("Failed to query chain ID from RPC")?;
Ok(Self {
id: 0,
node_binary: PathBuf::new(),
eth_proxy_binary: PathBuf::new(),
export_chainspec_command: String::new(),
rpc_url,
base_directory: PathBuf::new(),
logs_directory: PathBuf::new(),
substrate_process: None,
eth_proxy_process: None,
wallet,
nonce_manager: Default::default(),
provider: Default::default(),
chain_id,
})
}
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
let _ = remove_dir_all(self.base_directory.as_path());
let _ = clear_directory(&self.base_directory);
@@ -144,6 +208,7 @@ impl SubstrateNode {
.arg(self.export_chainspec_command.as_str())
.arg("--chain")
.arg("dev")
.env_remove("RUST_LOG")
.output()
.context("Failed to export the chain-spec")?;
@@ -335,27 +400,29 @@ impl SubstrateNode {
async fn provider(
&self,
) -> anyhow::Result<
FillProvider<impl TxFiller<ReviveNetwork>, impl Provider<ReviveNetwork>, ReviveNetwork>,
> {
ProviderBuilder::new()
.disable_recommended_fillers()
.network::<ReviveNetwork>()
.filler(FallbackGasFiller::new(
25_000_000,
1_000_000_000,
1_000_000_000,
))
.filler(self.chain_id_filler.clone())
.filler(NonceFiller::new(self.nonce_manager.clone()))
.wallet(self.wallet.clone())
.connect(&self.rpc_url)
) -> anyhow::Result<ConcreteProvider<ReviveNetwork, Arc<EthereumWallet>>> {
self.provider
.get_or_try_init(|| async move {
construct_concurrency_limited_provider::<ReviveNetwork, _>(
self.rpc_url.as_str(),
FallbackGasFiller::new(u64::MAX, 5_000_000_000, 1_000_000_000),
ChainIdFiller::new(Some(self.chain_id)),
NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(),
)
.await
.context("Failed to construct the provider")
})
.await
.map_err(Into::into)
.cloned()
}
}
impl EthereumNode for SubstrateNode {
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>> {
Box::pin(async move { Ok(()) })
}
fn id(&self) -> usize {
self.id as _
}
@@ -364,22 +431,48 @@ impl EthereumNode for SubstrateNode {
&self.rpc_url
}
fn execute_transaction(
fn submit_transaction(
&self,
transaction: alloy::rpc::types::TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TxHash>> + '_>> {
Box::pin(async move {
let receipt = self
let provider = self
.provider()
.await
.context("Failed to create provider for transaction submission")?
.context("Failed to create the provider for transaction submission")?;
let pending_transaction = provider
.send_transaction(transaction)
.await
.context("Failed to submit transaction to substrate proxy")?
.get_receipt()
.context("Failed to submit the transaction through the provider")?;
Ok(*pending_transaction.tx_hash())
})
}
fn get_receipt(
&self,
tx_hash: TxHash,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move {
self.provider()
.await
.context("Failed to fetch transaction receipt from substrate proxy")?;
Ok(receipt)
.context("Failed to create provider for getting the receipt")?
.get_transaction_receipt(tx_hash)
.await
.context("Failed to get the receipt of the transaction")?
.context("Failed to get the receipt of the transaction")
})
}
fn execute_transaction(
&self,
transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move {
let provider = self
.provider()
.await
.context("Failed to create the provider")?;
execute_transaction(provider, transaction).await
})
}
@@ -387,8 +480,7 @@ impl EthereumNode for SubstrateNode {
&self,
tx_hash: TxHash,
trace_options: GethDebugTracingOptions,
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::rpc::types::trace::geth::GethTrace>> + '_>>
{
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
Box::pin(async move {
self.provider()
.await
@@ -463,16 +555,66 @@ impl EthereumNode for SubstrateNode {
fn evm_version(&self) -> EVMVersion {
EVMVersion::Cancun
}
fn subscribe_to_full_blocks_information(
&self,
) -> Pin<
Box<
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
+ '_,
>,
> {
Box::pin(async move {
let provider = self
.provider()
.await
.context("Failed to create the provider for block subscription")?;
let mut block_subscription = provider
.watch_full_blocks()
.await
.context("Failed to create the blocks stream")?;
block_subscription.set_channel_size(0xFFFF);
block_subscription.set_poll_interval(Duration::from_secs(1));
let block_stream = block_subscription.into_stream();
let mined_block_information_stream = block_stream.filter_map(|block| async {
let block = block.ok()?;
Some(MinedBlockInformation {
block_number: block.number(),
block_timestamp: block.header.timestamp,
mined_gas: block.header.gas_used as _,
block_gas_limit: block.header.gas_limit,
transaction_hashes: block
.transactions
.into_hashes()
.as_hashes()
.expect("Must be hashes")
.to_vec(),
})
});
Ok(Box::pin(mined_block_information_stream)
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
})
}
fn resolve_signer_or_default(&self, address: Address) -> Address {
let signer_addresses: Vec<_> =
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet).collect();
if signer_addresses.contains(&address) {
address
} else {
self.wallet.default_signer().address()
}
}
}
pub struct SubstrateNodeResolver<F: TxFiller<ReviveNetwork>, P: Provider<ReviveNetwork>> {
pub struct SubstrateNodeResolver {
id: u32,
provider: FillProvider<F, P, ReviveNetwork>,
provider: ConcreteProvider<ReviveNetwork, Arc<EthereumWallet>>,
}
impl<F: TxFiller<ReviveNetwork>, P: Provider<ReviveNetwork>> ResolverApi
for SubstrateNodeResolver<F, P>
{
impl ResolverApi for SubstrateNodeResolver {
#[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))]
fn chain_id(
&self,
@@ -1068,9 +1210,7 @@ mod tests {
use crate::Node;
fn test_config() -> TestExecutionContext {
let mut context = TestExecutionContext::default();
context.kitchensink_configuration.use_kitchensink = true;
context
TestExecutionContext::default()
}
fn new_node() -> (TestExecutionContext, SubstrateNode) {
@@ -1142,6 +1282,7 @@ mod tests {
}
#[test]
#[ignore = "Ignored since they take a long time to run"]
fn test_init_generates_chainspec_with_balances() {
let genesis_content = r#"
{
@@ -1195,6 +1336,7 @@ mod tests {
}
#[test]
#[ignore = "Ignored since they take a long time to run"]
fn test_parse_genesis_alloc() {
// Create test genesis file
let genesis_json = r#"
@@ -1237,6 +1379,7 @@ mod tests {
}
#[test]
#[ignore = "Ignored since they take a long time to run"]
fn print_eth_to_substrate_mappings() {
let eth_addresses = vec![
"0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1",
@@ -1252,6 +1395,7 @@ mod tests {
}
#[test]
#[ignore = "Ignored since they take a long time to run"]
fn test_eth_to_substrate_address() {
let cases = vec![
(
@@ -1282,6 +1426,7 @@ mod tests {
}
#[test]
#[ignore = "Ignored since they take a long time to run"]
fn version_works() {
let node = shared_node();
@@ -1294,6 +1439,7 @@ mod tests {
}
#[test]
#[ignore = "Ignored since they take a long time to run"]
fn eth_rpc_version_works() {
let node = shared_node();
@@ -1306,6 +1452,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_chain_id_from_node() {
// Arrange
let node = shared_node();
@@ -1319,6 +1466,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_gas_limit_from_node() {
// Arrange
let node = shared_node();
@@ -1336,6 +1484,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_coinbase_from_node() {
// Arrange
let node = shared_node();
@@ -1353,6 +1502,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_block_difficulty_from_node() {
// Arrange
let node = shared_node();
@@ -1370,6 +1520,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_block_hash_from_node() {
// Arrange
let node = shared_node();
@@ -1387,6 +1538,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_block_timestamp_from_node() {
// Arrange
let node = shared_node();
@@ -1404,6 +1556,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn can_get_block_number_from_node() {
// Arrange
let node = shared_node();
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,69 @@
use std::sync::Arc;
use alloy::transports::BoxFuture;
use tokio::sync::Semaphore;
use tower::{Layer, Service};
#[derive(Clone, Debug)]
pub struct ConcurrencyLimiterLayer {
semaphore: Arc<Semaphore>,
}
impl ConcurrencyLimiterLayer {
pub fn new(permit_count: usize) -> Self {
Self {
semaphore: Arc::new(Semaphore::new(permit_count)),
}
}
}
impl<S> Layer<S> for ConcurrencyLimiterLayer {
type Service = ConcurrencyLimiterService<S>;
fn layer(&self, inner: S) -> Self::Service {
ConcurrencyLimiterService {
service: inner,
semaphore: self.semaphore.clone(),
}
}
}
#[derive(Clone)]
pub struct ConcurrencyLimiterService<S> {
service: S,
semaphore: Arc<Semaphore>,
}
impl<S, Request> Service<Request> for ConcurrencyLimiterService<S>
where
S: Service<Request> + Send,
S::Future: Send + 'static,
{
type Response = S::Response;
type Error = S::Error;
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(
&mut self,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), Self::Error>> {
self.service.poll_ready(cx)
}
fn call(&mut self, req: Request) -> Self::Future {
let semaphore = self.semaphore.clone();
let future = self.service.call(req);
Box::pin(async move {
let _permit = semaphore
.acquire()
.await
.expect("Semaphore has been closed");
tracing::debug!(
available_permits = semaphore.available_permits(),
"Acquired Semaphore Permit"
);
future.await
})
}
}
@@ -30,6 +30,12 @@ impl FallbackGasFiller {
}
}
impl Default for FallbackGasFiller {
fn default() -> Self {
FallbackGasFiller::new(25_000_000, 1_000_000_000, 1_000_000_000)
}
}
impl<N> TxFiller<N> for FallbackGasFiller
where
N: Network,
+7
View File
@@ -0,0 +1,7 @@
mod concurrency_limiter;
mod fallback_gas_provider;
mod provider;
pub use concurrency_limiter::*;
pub use fallback_gas_provider::*;
pub use provider::*;
+128
View File
@@ -0,0 +1,128 @@
use std::{ops::ControlFlow, sync::LazyLock, time::Duration};
use alloy::{
network::{Ethereum, Network, NetworkWallet, TransactionBuilder4844},
providers::{
Identity, PendingTransactionBuilder, Provider, ProviderBuilder, RootProvider,
fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller},
},
rpc::client::ClientBuilder,
};
use anyhow::{Context, Result};
use revive_dt_common::futures::{PollingWaitBehavior, poll};
use tracing::debug;
use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller};
pub type ConcreteProvider<N, W> = FillProvider<
JoinFill<
JoinFill<JoinFill<JoinFill<Identity, FallbackGasFiller>, ChainIdFiller>, NonceFiller>,
WalletFiller<W>,
>,
RootProvider<N>,
N,
>;
pub async fn construct_concurrency_limited_provider<N, W>(
rpc_url: &str,
fallback_gas_filler: FallbackGasFiller,
chain_id_filler: ChainIdFiller,
nonce_filler: NonceFiller,
wallet: W,
) -> Result<ConcreteProvider<N, W>>
where
N: Network<TransactionRequest: TransactionBuilder4844>,
W: NetworkWallet<N>,
Identity: TxFiller<N>,
FallbackGasFiller: TxFiller<N>,
ChainIdFiller: TxFiller<N>,
NonceFiller: TxFiller<N>,
WalletFiller<W>: TxFiller<N>,
{
// This is a global limit on the RPC concurrency that applies to all of the providers created
// by the framework. With this limit, it means that we can have a maximum of N concurrent
// requests at any point of time and no more than that. This is done in an effort to stabilize
// the framework from some of the interment issues that we've been seeing related to RPC calls.
static GLOBAL_CONCURRENCY_LIMITER_LAYER: LazyLock<ConcurrencyLimiterLayer> =
LazyLock::new(|| ConcurrencyLimiterLayer::new(10));
let client = ClientBuilder::default()
.layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone())
.connect(rpc_url)
.await
.context("Failed to construct the RPC client")?;
let provider = ProviderBuilder::new()
.disable_recommended_fillers()
.network::<N>()
.filler(fallback_gas_filler)
.filler(chain_id_filler)
.filler(nonce_filler)
.wallet(wallet)
.connect_client(client);
Ok(provider)
}
pub async fn execute_transaction<N, W>(
provider: ConcreteProvider<N, W>,
transaction: N::TransactionRequest,
) -> Result<N::ReceiptResponse>
where
N: Network<
TransactionRequest: TransactionBuilder4844,
TxEnvelope = <Ethereum as Network>::TxEnvelope,
>,
W: NetworkWallet<N>,
Identity: TxFiller<N>,
FallbackGasFiller: TxFiller<N>,
ChainIdFiller: TxFiller<N>,
NonceFiller: TxFiller<N>,
WalletFiller<W>: TxFiller<N>,
{
let sendable_transaction = provider
.fill(transaction)
.await
.context("Failed to fill transaction")?;
let transaction_envelope = sendable_transaction
.try_into_envelope()
.context("Failed to convert transaction into an envelope")?;
let tx_hash = *transaction_envelope.tx_hash();
let mut pending_transaction = match provider.send_tx_envelope(transaction_envelope).await {
Ok(pending_transaction) => pending_transaction,
Err(error) => {
let error_string = error.to_string();
if error_string.contains("Transaction Already Imported") {
PendingTransactionBuilder::<N>::new(provider.root().clone(), tx_hash)
} else {
return Err(error).context(format!("Failed to submit transaction {tx_hash}"));
}
}
};
debug!(%tx_hash, "Submitted Transaction");
pending_transaction.set_timeout(Some(Duration::from_secs(120)));
let tx_hash = pending_transaction.watch().await.context(format!(
"Transaction inclusion watching timeout for {tx_hash}"
))?;
poll(
Duration::from_secs(60),
PollingWaitBehavior::Constant(Duration::from_secs(3)),
|| {
let provider = provider.clone();
async move {
match provider.get_transaction_receipt(tx_hash).await {
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
_ => Ok(ControlFlow::Continue(())),
}
}
},
)
.await
.context(format!("Polling for receipt failed for {tx_hash}"))
}
+1 -1
View File
@@ -13,7 +13,7 @@ revive-dt-config = { workspace = true }
revive-dt-format = { workspace = true }
revive-dt-compiler = { workspace = true }
alloy-primitives = { workspace = true }
alloy = { workspace = true }
anyhow = { workspace = true }
paste = { workspace = true }
indexmap = { workspace = true, features = ["serde"] }
+11 -2
View File
@@ -8,7 +8,7 @@ use std::{
time::{SystemTime, UNIX_EPOCH},
};
use alloy_primitives::Address;
use alloy::primitives::Address;
use anyhow::{Context as _, Result};
use indexmap::IndexMap;
use revive_dt_common::types::PlatformIdentifier;
@@ -106,6 +106,10 @@ impl ReportAggregator {
RunnerEvent::ContractDeployed(event) => {
self.handle_contract_deployed_event(*event);
}
RunnerEvent::Completion(event) => {
self.handle_completion(*event);
break;
}
}
}
debug!("Report aggregation completed");
@@ -382,6 +386,10 @@ impl ReportAggregator {
.insert(event.contract_instance, event.address);
}
fn handle_completion(&mut self, _: CompletionEvent) {
self.runner_rx.close();
}
fn test_case_report(&mut self, specifier: &TestSpecifier) -> &mut TestCaseReport {
self.report
.test_case_information
@@ -434,7 +442,8 @@ impl Report {
#[derive(Clone, Debug, Serialize, Default)]
pub struct TestCaseReport {
/// Information on the status of the test case and whether it succeeded, failed, or was ignored.
/// Information on the status of the test case and whether it succeeded, failed, or was
/// ignored.
#[serde(skip_serializing_if = "Option::is_none")]
pub status: Option<TestCaseStatus>,
/// Information related to the execution on one of the platforms.
+7 -3
View File
@@ -3,13 +3,15 @@
use std::{collections::BTreeMap, path::PathBuf, sync::Arc};
use alloy_primitives::Address;
use alloy::primitives::Address;
use anyhow::Context as _;
use indexmap::IndexMap;
use revive_dt_common::types::PlatformIdentifier;
use revive_dt_compiler::{CompilerInput, CompilerOutput};
use revive_dt_format::metadata::Metadata;
use revive_dt_format::{corpus::Corpus, metadata::ContractInstance};
use revive_dt_format::{
corpus::Corpus,
metadata::{ContractInstance, Metadata},
};
use semver::Version;
use tokio::sync::{broadcast, oneshot};
@@ -613,6 +615,8 @@ define_event! {
/// The address of the contract.
address: Address
},
/// Reports the completion of the run.
Completion {}
}
}
+10 -5
View File
@@ -75,7 +75,9 @@ ABSOLUTE_PATH=$(realpath "$TEST_REPO_DIR/fixtures/solidity/")
cat > "$CORPUS_FILE" << EOF
{
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
"path": "$ABSOLUTE_PATH"
"paths": [
"$(realpath "$TEST_REPO_DIR/fixtures/solidity/simple")"
]
}
EOF
@@ -89,12 +91,15 @@ echo "This may take a while..."
echo ""
# Run the tool
RUST_LOG="info" cargo run --release -- execute-tests \
--platform geth-evm-solc \
--platform revive-dev-node-polkavm-resolc \
cargo build --release;
RUST_LOG="info,alloy_pubsub::service=error" ./target/release/retester test \
--platform revive-dev-node-revm-solc \
--corpus "$CORPUS_FILE" \
--working-directory "$WORKDIR" \
--concurrency.number-of-nodes 5 \
--concurrency.number-of-nodes 10 \
--concurrency.number-of-threads 5 \
--concurrency.number-of-concurrent-tasks 1000 \
--wallet.additional-keys 100000 \
--kitchensink.path "$SUBSTRATE_NODE_BIN" \
--revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
--eth-rpc.path "$ETH_RPC_BIN" \