Compare commits

...

12 Commits

Author SHA1 Message Date
Omar Abdulla 0a28a90168 Remove un-needed dependency 2025-10-14 16:34:03 +03:00
Omar Abdulla ee55eea6b2 Report the ref time and proof size for substrate chains in block information 2025-10-13 10:54:52 +03:00
Omar Abdulla 1026426616 Add a provider method to the EthereumNode 2025-10-13 06:18:49 +03:00
Omar Abdulla 2827f32478 Remove the revive network 2025-10-13 05:52:08 +03:00
Omar 3c86cbb7ef Make output format deserializable (#187)
* Make output format deserializable

* Flush the buffer after writing the entire file output
2025-10-09 15:41:26 +00:00
Omar fde07b7c0d Allow for succeeding tests to be ignored (#186) 2025-10-09 14:35:09 +00:00
Omar ebc24a588b Add different output formats (#185)
* Add different output formats

* Add the mode to the output
2025-10-09 14:24:14 +00:00
Omar 21e25f09e6 Zombienet & Benchmarks Cleanups (#184)
* Minor zombienet cleanups

* Remove un-necessary trace call from the benchmark driver

* Improve the benchmarks driver

* Ignore the lighthouse tests

* Allow for the consensus to be specified for the revive dev node

* Ignore the zombienet tests for the time being
2025-10-09 11:41:01 +00:00
Marios 8c412dc924 Use absolute path for zombienet to avoid conflicts (#178) 2025-10-07 16:03:18 +00:00
Omar 6da3172581 Fix/fallback gas for substrate (#177)
* Update compiler semaphore

* Fix the fallback gas for substrate chains

* Fix the concurrency & the substrate gas limit fallback value
2025-10-07 02:25:37 +00:00
Marios c6eb04b04e Support zombienet (#172)
* Basic zombie node definition

* [WIP] - Impl EthereumNode for zombie node

* Remove unused imports

* [WIP] Support substrate node in zombienet network

* Impl zombie node resolver && node for zombie node

* Spawn eth-rpc on top of collator node

* Implement ZombienetPlatform and integrate zombie node with eth rpc

* Add wourkaround to run tests

* Add few comments

* fmt

* Replace default transaction request in test

* Merge - Fix conficts with main

* fmt

* Clippy fix

* Add polkadot and parachain node to ci

* CI - Fetch polkadot binaries from releases

* Fix unit test assertion

* Minor doc improvements

* Change names from ZombieNet to Zombienet and switch from Command to Process

* Refactor ZombieNode to cache provider using OnceCell

* CI: Cache polkadot binaries and use them if available

* Fix conficts with main

* Refactor shared_node to return static reference and add shared_state for context access

* fmt

* Rename ZombienetConfiguration to PolkadotParachainConfiguration and update related usage
2025-10-07 02:24:20 +00:00
Omar e5114d31dc Update compiler semaphore (#176) 2025-10-05 22:28:57 +00:00
35 changed files with 4787 additions and 1123 deletions
+62
View File
@@ -15,6 +15,7 @@ concurrency:
env:
CARGO_TERM_COLOR: always
POLKADOT_VERSION: polkadot-stable2506-2
jobs:
cache-polkadot:
@@ -65,6 +66,37 @@ jobs:
run: |
cd polkadot-sdk
cargo install --path substrate/frame/revive/rpc --bin eth-rpc
- name: Cache downloaded Polkadot binaries
id: cache-polkadot
uses: actions/cache@v3
with:
path: |
~/polkadot-cache/polkadot
~/polkadot-cache/polkadot-execute-worker
~/polkadot-cache/polkadot-prepare-worker
~/polkadot-cache/polkadot-parachain
key: polkadot-downloaded-${{ matrix.os }}-${{ env.POLKADOT_VERSION }}
- name: Download Polkadot binaries on macOS
if: matrix.os == 'macos-14' && steps.cache-polkadot.outputs.cache-hit != 'true'
run: |
mkdir -p ~/polkadot-cache
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-aarch64-apple-darwin -o ~/polkadot-cache/polkadot
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-execute-worker-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-execute-worker
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-prepare-worker-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-prepare-worker
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-parachain-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-parachain
chmod +x ~/polkadot-cache/*
- name: Download Polkadot binaries on Ubuntu
if: matrix.os == 'ubuntu-24.04' && steps.cache-polkadot.outputs.cache-hit != 'true'
run: |
mkdir -p ~/polkadot-cache
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot -o ~/polkadot-cache/polkadot
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-execute-worker -o ~/polkadot-cache/polkadot-execute-worker
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-prepare-worker -o ~/polkadot-cache/polkadot-prepare-worker
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-parachain -o ~/polkadot-cache/polkadot-parachain
chmod +x ~/polkadot-cache/*
ci:
name: CI on ${{ matrix.os }}
@@ -86,6 +118,24 @@ jobs:
~/.cargo/bin/eth-rpc
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}
- name: Restore downloaded Polkadot binaries from cache
uses: actions/cache@v3
with:
path: |
~/polkadot-cache/polkadot
~/polkadot-cache/polkadot-execute-worker
~/polkadot-cache/polkadot-prepare-worker
~/polkadot-cache/polkadot-parachain
key: polkadot-downloaded-${{ matrix.os }}-${{ env.POLKADOT_VERSION }}
- name: Install Polkadot binaries
run: |
sudo cp ~/polkadot-cache/polkadot /usr/local/bin/
sudo cp ~/polkadot-cache/polkadot-execute-worker /usr/local/bin/
sudo cp ~/polkadot-cache/polkadot-prepare-worker /usr/local/bin/
sudo cp ~/polkadot-cache/polkadot-parachain /usr/local/bin/
sudo chmod +x /usr/local/bin/polkadot*
- name: Setup Rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
@@ -170,6 +220,18 @@ jobs:
- name: Check resolc version
run: resolc --version
- name: Check polkadot version
run: polkadot --version
- name: Check polkadot-parachain version
run: polkadot-parachain --version
- name: Check polkadot-execute-worker version
run: polkadot-execute-worker --version
- name: Check polkadot-prepare-worker version
run: polkadot-prepare-worker --version
- name: Test Formatting
run: make format
Generated
+2322 -58
View File
File diff suppressed because it is too large Load Diff
+4
View File
@@ -22,6 +22,7 @@ revive-dt-node-pool = { version = "0.1.0", path = "crates/node-pool" }
revive-dt-report = { version = "0.1.0", path = "crates/report" }
revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
ansi_term = "0.12.1"
anyhow = "1.0"
bson = { version = "2.15.0" }
cacache = { version = "13.1.0" }
@@ -49,6 +50,7 @@ sha2 = { version = "0.10.9" }
sp-core = "36.1.0"
sp-runtime = "41.1.0"
strum = { version = "0.27.2", features = ["derive"] }
subxt = { version = "0.44.0" }
temp-dir = { version = "0.1.16" }
tempfile = "3.3"
thiserror = "2"
@@ -73,6 +75,8 @@ revive-solc-json-interface = { git = "https://github.com/paritytech/revive", rev
revive-common = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
revive-differential = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
zombienet-sdk = { git = "https://github.com/paritytech/zombienet-sdk.git", rev = "891f6554354ce466abd496366dbf8b4f82141241" }
[workspace.dependencies.alloy]
version = "1.0.37"
default-features = false
Binary file not shown.
+1
View File
@@ -14,6 +14,7 @@ anyhow = { workspace = true }
clap = { workspace = true }
moka = { workspace = true, features = ["sync"] }
once_cell = { workspace = true }
regex = { workspace = true }
semver = { workspace = true }
serde = { workspace = true }
schemars = { workspace = true }
+6
View File
@@ -39,6 +39,10 @@ pub enum PlatformIdentifier {
ReviveDevNodePolkavmResolc,
/// The revive dev node with the REVM backend with the solc compiler.
ReviveDevNodeRevmSolc,
/// A zombienet based Substrate/Polkadot node with the PolkaVM backend with the resolc compiler.
ZombienetPolkavmResolc,
/// A zombienet based Substrate/Polkadot node with the REVM backend with the solc compiler.
ZombienetRevmSolc,
}
/// An enum of the platform identifiers of all of the platforms supported by this framework.
@@ -95,6 +99,8 @@ pub enum NodeIdentifier {
Kitchensink,
/// The revive dev node implementation.
ReviveDevNode,
/// A zombienet spawned nodes
Zombienet,
}
/// An enum representing the identifiers of the supported VMs.
+265
View File
@@ -1,6 +1,11 @@
use crate::iterators::EitherIter;
use crate::types::VersionOrRequirement;
use anyhow::{Context as _, bail};
use regex::Regex;
use schemars::JsonSchema;
use semver::Version;
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use std::fmt::Display;
use std::str::FromStr;
use std::sync::LazyLock;
@@ -33,6 +38,19 @@ impl Display for Mode {
}
}
impl FromStr for Mode {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parsed_mode = ParsedMode::from_str(s)?;
let mut iter = parsed_mode.to_modes();
let (Some(mode), None) = (iter.next(), iter.next()) else {
bail!("Failed to parse the mode")
};
Ok(mode)
}
}
impl Mode {
/// Return all of the available mode combinations.
pub fn all() -> impl Iterator<Item = &'static Mode> {
@@ -171,3 +189,250 @@ impl ModeOptimizerSetting {
!matches!(self, ModeOptimizerSetting::M0)
}
}
/// This represents a mode that has been parsed from test metadata.
///
/// Mode strings can take the following form (in pseudo-regex):
///
/// ```text
/// [YEILV][+-]? (M[0123sz])? <semver>?
/// ```
///
/// We can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].
#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)]
#[serde(try_from = "String", into = "String")]
pub struct ParsedMode {
pub pipeline: Option<ModePipeline>,
pub optimize_flag: Option<bool>,
pub optimize_setting: Option<ModeOptimizerSetting>,
pub version: Option<semver::VersionReq>,
}
impl FromStr for ParsedMode {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
static REGEX: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(r"(?x)
^
(?:(?P<pipeline>[YEILV])(?P<optimize_flag>[+-])?)? # Pipeline to use eg Y, E+, E-
\s*
(?P<optimize_setting>M[a-zA-Z0-9])? # Optimize setting eg M0, Ms, Mz
\s*
(?P<version>[>=<^]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8
$
").unwrap()
});
let Some(caps) = REGEX.captures(s) else {
anyhow::bail!("Cannot parse mode '{s}' from string");
};
let pipeline = match caps.name("pipeline") {
Some(m) => Some(
ModePipeline::from_str(m.as_str())
.context("Failed to parse mode pipeline from string")?,
),
None => None,
};
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
let optimize_setting = match caps.name("optimize_setting") {
Some(m) => Some(
ModeOptimizerSetting::from_str(m.as_str())
.context("Failed to parse optimizer setting from string")?,
),
None => None,
};
let version = match caps.name("version") {
Some(m) => Some(
semver::VersionReq::parse(m.as_str())
.map_err(|e| {
anyhow::anyhow!(
"Cannot parse the version requirement '{}': {e}",
m.as_str()
)
})
.context("Failed to parse semver requirement from mode string")?,
),
None => None,
};
Ok(ParsedMode {
pipeline,
optimize_flag,
optimize_setting,
version,
})
}
}
impl Display for ParsedMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut has_written = false;
if let Some(pipeline) = self.pipeline {
pipeline.fmt(f)?;
if let Some(optimize_flag) = self.optimize_flag {
f.write_str(if optimize_flag { "+" } else { "-" })?;
}
has_written = true;
}
if let Some(optimize_setting) = self.optimize_setting {
if has_written {
f.write_str(" ")?;
}
optimize_setting.fmt(f)?;
has_written = true;
}
if let Some(version) = &self.version {
if has_written {
f.write_str(" ")?;
}
version.fmt(f)?;
}
Ok(())
}
}
impl From<ParsedMode> for String {
fn from(parsed_mode: ParsedMode) -> Self {
parsed_mode.to_string()
}
}
impl TryFrom<String> for ParsedMode {
type Error = anyhow::Error;
fn try_from(value: String) -> Result<Self, Self::Error> {
ParsedMode::from_str(&value)
}
}
impl ParsedMode {
/// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try.
pub fn to_modes(&self) -> impl Iterator<Item = Mode> {
let pipeline_iter = self.pipeline.as_ref().map_or_else(
|| EitherIter::A(ModePipeline::test_cases()),
|p| EitherIter::B(std::iter::once(*p)),
);
let optimize_flag_setting = self.optimize_flag.map(|flag| {
if flag {
ModeOptimizerSetting::M3
} else {
ModeOptimizerSetting::M0
}
});
let optimize_flag_iter = match optimize_flag_setting {
Some(setting) => EitherIter::A(std::iter::once(setting)),
None => EitherIter::B(ModeOptimizerSetting::test_cases()),
};
let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else(
|| EitherIter::A(optimize_flag_iter),
|s| EitherIter::B(std::iter::once(*s)),
);
pipeline_iter.flat_map(move |pipeline| {
optimize_settings_iter
.clone()
.map(move |optimize_setting| Mode {
pipeline,
optimize_setting,
version: self.version.clone(),
})
})
}
/// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s.
/// This avoids any duplicate entries.
pub fn many_to_modes<'a>(
parsed: impl Iterator<Item = &'a ParsedMode>,
) -> impl Iterator<Item = Mode> {
let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect();
modes.into_iter()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parsed_mode_from_str() {
let strings = vec![
("Mz", "Mz"),
("Y", "Y"),
("Y+", "Y+"),
("Y-", "Y-"),
("E", "E"),
("E+", "E+"),
("E-", "E-"),
("Y M0", "Y M0"),
("Y M1", "Y M1"),
("Y M2", "Y M2"),
("Y M3", "Y M3"),
("Y Ms", "Y Ms"),
("Y Mz", "Y Mz"),
("E M0", "E M0"),
("E M1", "E M1"),
("E M2", "E M2"),
("E M3", "E M3"),
("E Ms", "E Ms"),
("E Mz", "E Mz"),
// When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning)
("Y 0.8.0", "Y ^0.8.0"),
("E+ 0.8.0", "E+ ^0.8.0"),
("Y M3 >=0.8.0", "Y M3 >=0.8.0"),
("E Mz <0.7.0", "E Mz <0.7.0"),
// We can parse +- _and_ M1/M2 but the latter takes priority.
("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"),
("E- M2 0.7.0", "E- M2 ^0.7.0"),
// We don't see this in the wild but it is parsed.
("<=0.8", "<=0.8"),
];
for (actual, expected) in strings {
let parsed = ParsedMode::from_str(actual)
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
assert_eq!(
expected,
parsed.to_string(),
"Mode string '{actual}' did not parse to '{expected}': got '{parsed}'"
);
}
}
#[test]
fn test_parsed_mode_to_test_modes() {
let strings = vec![
("Mz", vec!["Y Mz", "E Mz"]),
("Y", vec!["Y M0", "Y M3"]),
("E", vec!["E M0", "E M3"]),
("Y+", vec!["Y M3"]),
("Y-", vec!["Y M0"]),
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
(
"<=0.8",
vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"],
),
];
for (actual, expected) in strings {
let parsed = ParsedMode::from_str(actual)
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect();
let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect();
assert_eq!(
expected_set, actual_set,
"Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'"
);
}
}
}
@@ -7,7 +7,10 @@ pragma solidity >=0.6.9;
import "./callable.sol";
contract Main {
function main(uint[1] calldata p1, Callable callable) public returns(uint) {
function main(
uint[1] calldata p1,
Callable callable
) public pure returns (uint) {
return callable.f(p1);
}
}
+169 -46
View File
@@ -12,19 +12,18 @@ use std::{
use alloy::{
genesis::Genesis,
hex::ToHexExt,
network::EthereumWallet,
primitives::{FixedBytes, U256},
primitives::{B256, FixedBytes, U256},
signers::local::PrivateKeySigner,
};
use clap::{Parser, ValueEnum, ValueHint};
use revive_dt_common::types::PlatformIdentifier;
use semver::Version;
use serde::{Serialize, Serializer};
use serde::{Deserialize, Serialize, Serializer};
use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
use temp_dir::TempDir;
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
#[command(name = "retester")]
pub enum Context {
/// Executes tests in the MatterLabs format differentially on multiple targets concurrently.
@@ -107,6 +106,16 @@ impl AsRef<KurtosisConfiguration> for Context {
}
}
impl AsRef<PolkadotParachainConfiguration> for Context {
fn as_ref(&self) -> &PolkadotParachainConfiguration {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
}
impl AsRef<KitchensinkConfiguration> for Context {
fn as_ref(&self) -> &KitchensinkConfiguration {
match self {
@@ -190,8 +199,30 @@ impl AsRef<ReportConfiguration> for Context {
}
}
#[derive(Clone, Debug, Parser, Serialize)]
impl AsRef<IgnoreSuccessConfiguration> for Context {
fn as_ref(&self) -> &IgnoreSuccessConfiguration {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(..) => unreachable!(),
Self::ExportJsonSchema => unreachable!(),
}
}
}
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct TestExecutionContext {
/// The set of platforms that the differential tests should run on.
#[arg(
short = 'p',
long = "platform",
default_values = ["geth-evm-solc", "revive-dev-node-polkavm-resolc"]
)]
pub platforms: Vec<PlatformIdentifier>,
/// The output format to use for the tool's output.
#[arg(short, long, default_value_t = OutputFormat::CargoTestLike)]
pub output_format: OutputFormat,
/// The working directory that the program will use for all of the temporary artifacts needed at
/// runtime.
///
@@ -205,14 +236,6 @@ pub struct TestExecutionContext {
)]
pub working_directory: WorkingDirectoryConfiguration,
/// The set of platforms that the differential tests should run on.
#[arg(
short = 'p',
long = "platform",
default_values = ["geth-evm-solc", "revive-dev-node-polkavm-resolc"]
)]
pub platforms: Vec<PlatformIdentifier>,
/// Configuration parameters for the corpus files to use.
#[clap(flatten, next_help_heading = "Corpus Configuration")]
pub corpus_configuration: CorpusConfiguration,
@@ -225,6 +248,10 @@ pub struct TestExecutionContext {
#[clap(flatten, next_help_heading = "Resolc Configuration")]
pub resolc_configuration: ResolcConfiguration,
/// Configuration parameters for the Polkadot Parachain.
#[clap(flatten, next_help_heading = "Polkadot Parachain Configuration")]
pub polkadot_parachain_configuration: PolkadotParachainConfiguration,
/// Configuration parameters for the geth node.
#[clap(flatten, next_help_heading = "Geth Configuration")]
pub geth_configuration: GethConfiguration,
@@ -264,9 +291,13 @@ pub struct TestExecutionContext {
/// Configuration parameters for the report.
#[clap(flatten, next_help_heading = "Report Configuration")]
pub report_configuration: ReportConfiguration,
/// Configuration parameters for ignoring certain test cases based on the report
#[clap(flatten, next_help_heading = "Ignore Success Configuration")]
pub ignore_success_configuration: IgnoreSuccessConfiguration,
}
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct BenchmarkingContext {
/// The working directory that the program will use for all of the temporary artifacts needed at
/// runtime.
@@ -318,6 +349,10 @@ pub struct BenchmarkingContext {
#[clap(flatten, next_help_heading = "Kitchensink Configuration")]
pub kitchensink_configuration: KitchensinkConfiguration,
/// Configuration parameters for the Polkadot Parachain.
#[clap(flatten, next_help_heading = "Polkadot Parachain Configuration")]
pub polkadot_parachain_configuration: PolkadotParachainConfiguration,
/// Configuration parameters for the Revive Dev Node.
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
@@ -379,6 +414,12 @@ impl AsRef<GethConfiguration> for TestExecutionContext {
}
}
impl AsRef<PolkadotParachainConfiguration> for TestExecutionContext {
fn as_ref(&self) -> &PolkadotParachainConfiguration {
&self.polkadot_parachain_configuration
}
}
impl AsRef<KurtosisConfiguration> for TestExecutionContext {
fn as_ref(&self) -> &KurtosisConfiguration {
&self.lighthouse_configuration
@@ -433,6 +474,12 @@ impl AsRef<ReportConfiguration> for TestExecutionContext {
}
}
impl AsRef<IgnoreSuccessConfiguration> for TestExecutionContext {
fn as_ref(&self) -> &IgnoreSuccessConfiguration {
&self.ignore_success_configuration
}
}
impl Default for BenchmarkingContext {
fn default() -> Self {
Self::parse_from(["execution-context"])
@@ -475,6 +522,12 @@ impl AsRef<KurtosisConfiguration> for BenchmarkingContext {
}
}
impl AsRef<PolkadotParachainConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &PolkadotParachainConfiguration {
&self.polkadot_parachain_configuration
}
}
impl AsRef<KitchensinkConfiguration> for BenchmarkingContext {
fn as_ref(&self) -> &KitchensinkConfiguration {
&self.kitchensink_configuration
@@ -518,7 +571,7 @@ impl AsRef<ReportConfiguration> for BenchmarkingContext {
}
/// A set of configuration parameters for the corpus files to use for the execution.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct CorpusConfiguration {
/// A list of test corpus JSON files to be tested.
#[arg(short = 'c', long = "corpus")]
@@ -526,7 +579,7 @@ pub struct CorpusConfiguration {
}
/// A set of configuration parameters for Solc.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct SolcConfiguration {
/// Specifies the default version of the Solc compiler that should be used if there is no
/// override specified by one of the test cases.
@@ -535,7 +588,7 @@ pub struct SolcConfiguration {
}
/// A set of configuration parameters for Resolc.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct ResolcConfiguration {
/// Specifies the path of the resolc compiler to be used by the tool.
///
@@ -545,8 +598,32 @@ pub struct ResolcConfiguration {
pub path: PathBuf,
}
/// A set of configuration parameters for Polkadot Parachain.
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct PolkadotParachainConfiguration {
/// Specifies the path of the polkadot-parachain node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the polkadot-parachain binary
/// that's provided in the user's $PATH.
#[clap(
id = "polkadot-parachain.path",
long = "polkadot-parachain.path",
default_value = "polkadot-parachain"
)]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "polkadot-parachain.start-timeout-ms",
long = "polkadot-parachain.start-timeout-ms",
default_value = "5000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
}
/// A set of configuration parameters for Geth.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct GethConfiguration {
/// Specifies the path of the geth node to be used by the tool.
///
@@ -566,7 +643,7 @@ pub struct GethConfiguration {
}
/// A set of configuration parameters for kurtosis.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct KurtosisConfiguration {
/// Specifies the path of the kurtosis node to be used by the tool.
///
@@ -581,7 +658,7 @@ pub struct KurtosisConfiguration {
}
/// A set of configuration parameters for Kitchensink.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct KitchensinkConfiguration {
/// Specifies the path of the kitchensink node to be used by the tool.
///
@@ -605,7 +682,7 @@ pub struct KitchensinkConfiguration {
}
/// A set of configuration parameters for the revive dev node.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct ReviveDevNodeConfiguration {
/// Specifies the path of the revive dev node to be used by the tool.
///
@@ -626,10 +703,18 @@ pub struct ReviveDevNodeConfiguration {
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
/// The consensus to use for the spawned revive-dev-node.
#[clap(
id = "revive-dev-node.consensus",
long = "revive-dev-node.consensus",
default_value = "instant-seal"
)]
pub consensus: String,
}
/// A set of configuration parameters for the ETH RPC.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct EthRpcConfiguration {
/// Specifies the path of the ETH RPC to be used by the tool.
///
@@ -649,7 +734,7 @@ pub struct EthRpcConfiguration {
}
/// A set of configuration parameters for the genesis.
#[derive(Clone, Debug, Default, Parser, Serialize)]
#[derive(Clone, Debug, Default, Parser, Serialize, Deserialize)]
pub struct GenesisConfiguration {
/// Specifies the path of the genesis file to use for the nodes that are started.
///
@@ -687,15 +772,14 @@ impl GenesisConfiguration {
}
/// A set of configuration parameters for the wallet.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct WalletConfiguration {
/// The private key of the default signer.
#[clap(
long = "wallet.default-private-key",
default_value = "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d"
)]
#[serde(serialize_with = "serialize_private_key")]
default_key: PrivateKeySigner,
default_key: B256,
/// This argument controls which private keys the nodes should have access to and be added to
/// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set
@@ -713,7 +797,8 @@ impl WalletConfiguration {
pub fn wallet(&self) -> Arc<EthereumWallet> {
self.wallet
.get_or_init(|| {
let mut wallet = EthereumWallet::new(self.default_key.clone());
let mut wallet =
EthereumWallet::new(PrivateKeySigner::from_bytes(&self.default_key).unwrap());
for signer in (1..=self.additional_keys)
.map(|id| U256::from(id))
.map(|id| id.to_be_bytes::<32>())
@@ -731,15 +816,8 @@ impl WalletConfiguration {
}
}
fn serialize_private_key<S>(value: &PrivateKeySigner, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
value.to_bytes().encode_hex().serialize(serializer)
}
/// A set of configuration for concurrency.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct ConcurrencyConfiguration {
/// Determines the amount of nodes that will be spawned for each chain.
#[clap(long = "concurrency.number-of-nodes", default_value_t = 5)]
@@ -777,14 +855,14 @@ impl ConcurrencyConfiguration {
}
}
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct CompilationConfiguration {
/// Controls if the compilation cache should be invalidated or not.
#[arg(long = "compilation.invalidate-cache")]
pub invalidate_compilation_cache: bool,
}
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct ReportConfiguration {
/// Controls if the compiler input is included in the final report.
#[clap(long = "report.include-compiler-input")]
@@ -795,6 +873,13 @@ pub struct ReportConfiguration {
pub include_compiler_output: bool,
}
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct IgnoreSuccessConfiguration {
/// The path of the report generated by the tool to use to ignore the cases that succeeded.
#[clap(long = "ignore-success.report-path")]
pub path: Option<PathBuf>,
}
/// Represents the working directory that the program uses.
#[derive(Debug, Clone)]
pub enum WorkingDirectoryConfiguration {
@@ -804,6 +889,24 @@ pub enum WorkingDirectoryConfiguration {
Path(PathBuf),
}
impl Serialize for WorkingDirectoryConfiguration {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.as_path().serialize(serializer)
}
}
impl<'a> Deserialize<'a> for WorkingDirectoryConfiguration {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'a>,
{
PathBuf::deserialize(deserializer).map(Self::Path)
}
}
impl WorkingDirectoryConfiguration {
pub fn as_path(&self) -> &Path {
self.as_ref()
@@ -853,15 +956,6 @@ impl Display for WorkingDirectoryConfiguration {
}
}
impl Serialize for WorkingDirectoryConfiguration {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.as_path().serialize(serializer)
}
}
fn parse_duration(s: &str) -> anyhow::Result<Duration> {
u64::from_str(s)
.map(Duration::from_millis)
@@ -893,4 +987,33 @@ pub enum TestingPlatform {
Geth,
/// The kitchensink runtime provides the PolkaVM (PVM) based node implementation.
Kitchensink,
/// A polkadot/Substrate based network
Zombienet,
}
/// The output format to use for the test execution output.
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Serialize,
Deserialize,
ValueEnum,
EnumString,
Display,
AsRefStr,
IntoStaticStr,
)]
#[strum(serialize_all = "kebab-case")]
pub enum OutputFormat {
/// The legacy format that was used in the past for the output.
Legacy,
/// An output format that looks heavily resembles the output from `cargo test`.
CargoTestLike,
}
+1
View File
@@ -21,6 +21,7 @@ revive-dt-node = { workspace = true }
revive-dt-node-interaction = { workspace = true }
revive-dt-report = { workspace = true }
ansi_term = { workspace = true }
alloy = { workspace = true }
anyhow = { workspace = true }
bson = { workspace = true }
@@ -22,6 +22,7 @@ use alloy::{
},
};
use anyhow::{Context as _, Result, bail};
use futures::TryFutureExt;
use indexmap::IndexMap;
use revive_dt_common::{
futures::{PollingWaitBehavior, poll},
@@ -35,7 +36,7 @@ use revive_dt_format::{
},
traits::{ResolutionContext, ResolverApi},
};
use tokio::sync::{Mutex, mpsc::UnboundedSender};
use tokio::sync::{Mutex, OnceCell, mpsc::UnboundedSender};
use tracing::{Instrument, Span, debug, error, field::display, info, info_span, instrument};
use crate::{
@@ -123,13 +124,7 @@ where
&self.platform_information.reporter,
)
.await
.inspect_err(|err| {
error!(
?err,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Pre-linking compilation failed"
)
})
.inspect_err(|err| error!(?err, "Pre-linking compilation failed"))
.context("Failed to produce the pre-linking compiled contracts")?;
let mut deployed_libraries = None::<HashMap<_, _>>;
@@ -137,13 +132,7 @@ where
.test_definition
.metadata
.contract_sources()
.inspect_err(|err| {
error!(
?err,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Failed to retrieve contract sources from metadata"
)
})
.inspect_err(|err| error!(?err, "Failed to retrieve contract sources from metadata"))
.context("Failed to get the contract instances from the metadata file")?;
for library_instance in self
.test_definition
@@ -191,20 +180,19 @@ where
TransactionRequest::default().from(deployer_address),
code,
);
let receipt = self.execute_transaction(tx).await.inspect_err(|err| {
error!(
?err,
%library_instance,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Failed to deploy the library"
)
})?;
let receipt = self
.execute_transaction(tx)
.and_then(|(_, receipt_fut)| receipt_fut)
.await
.inspect_err(|err| {
error!(
?err,
%library_instance,
"Failed to deploy the library"
)
})?;
debug!(
?library_instance,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Deployed library"
);
debug!(?library_instance, "Deployed library");
let library_address = receipt
.contract_address
@@ -227,13 +215,7 @@ where
&self.platform_information.reporter,
)
.await
.inspect_err(|err| {
error!(
?err,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Post-linking compilation failed"
)
})
.inspect_err(|err| error!(?err, "Post-linking compilation failed"))
.context("Failed to compile the post-link contracts")?;
self.execution_state = ExecutionState::new(
@@ -269,7 +251,6 @@ where
skip_all,
fields(
driver_id = self.driver_id,
platform_identifier = %self.platform_information.platform.platform_identifier(),
%step_path,
),
err(Debug),
@@ -305,15 +286,11 @@ where
.handle_function_call_contract_deployment(step)
.await
.context("Failed to deploy contracts for the function call step")?;
let execution_receipt = self
let transaction_hash = self
.handle_function_call_execution(step, deployment_receipts)
.await
.context("Failed to handle the function call execution")?;
let tracing_result = self
.handle_function_call_call_frame_tracing(execution_receipt.transaction_hash)
.await
.context("Failed to handle the function call call frame tracing")?;
self.handle_function_call_variable_assignment(step, &tracing_result)
self.handle_function_call_variable_assignment(step, transaction_hash)
.await
.context("Failed to handle function call variable assignment")?;
Ok(1)
@@ -367,18 +344,19 @@ where
&mut self,
step: &FunctionCallStep,
mut deployment_receipts: HashMap<ContractInstance, TransactionReceipt>,
) -> Result<TransactionReceipt> {
) -> Result<TxHash> {
match step.method {
// This step was already executed when `handle_step` was called. We just need to
// lookup the transaction receipt in this case and continue on.
Method::Deployer => deployment_receipts
.remove(&step.instance)
.context("Failed to find deployment receipt for constructor call"),
.context("Failed to find deployment receipt for constructor call")
.map(|receipt| receipt.transaction_hash),
Method::Fallback | Method::FunctionName(_) => {
let tx = step
.as_transaction(self.resolver.as_ref(), self.default_resolution_context())
.await?;
self.execute_transaction(tx).await
Ok(self.execute_transaction(tx).await?.0)
}
}
}
@@ -417,15 +395,19 @@ where
async fn handle_function_call_variable_assignment(
&mut self,
step: &FunctionCallStep,
tracing_result: &CallFrame,
tx_hash: TxHash,
) -> Result<()> {
let Some(ref assignments) = step.variable_assignments else {
return Ok(());
};
// Handling the return data variable assignments.
let callframe = OnceCell::new();
for (variable_name, output_word) in assignments.return_data.iter().zip(
tracing_result
callframe
.get_or_try_init(|| self.handle_function_call_call_frame_tracing(tx_hash))
.await
.context("Failed to get the callframe trace for transaction")?
.output
.as_ref()
.unwrap_or_default()
@@ -547,7 +529,6 @@ where
skip_all,
fields(
driver_id = self.driver_id,
platform_identifier = %self.platform_information.platform.platform_identifier(),
%contract_instance,
%deployer
),
@@ -590,7 +571,6 @@ where
skip_all,
fields(
driver_id = self.driver_id,
platform_identifier = %self.platform_information.platform.platform_identifier(),
%contract_instance,
%deployer
),
@@ -660,7 +640,11 @@ where
TransactionBuilder::<Ethereum>::with_deploy_code(tx, code)
};
let receipt = match self.execute_transaction(tx).await {
let receipt = match self
.execute_transaction(tx)
.and_then(|(_, receipt_fut)| receipt_fut)
.await
{
Ok(receipt) => receipt,
Err(error) => {
tracing::error!(?error, "Contract deployment transaction failed.");
@@ -734,7 +718,7 @@ where
async fn execute_transaction(
&self,
transaction: TransactionRequest,
) -> anyhow::Result<TransactionReceipt> {
) -> anyhow::Result<(TxHash, impl Future<Output = Result<TransactionReceipt>>)> {
let node = self.platform_information.node;
let transaction_hash = node
.submit_transaction(transaction)
@@ -747,24 +731,28 @@ where
.send(WatcherEvent::SubmittedTransaction { transaction_hash })
.context("Failed to send the transaction hash to the watcher")?;
info!("Starting to poll for transaction receipt");
poll(
Duration::from_secs(30 * 60),
PollingWaitBehavior::Constant(Duration::from_secs(1)),
|| {
async move {
match node.get_receipt(transaction_hash).await {
Ok(receipt) => {
info!("Polling succeeded, receipt found");
Ok(ControlFlow::Break(receipt))
Ok((transaction_hash, async move {
info!("Starting to poll for transaction receipt");
poll(
Duration::from_secs(30 * 60),
PollingWaitBehavior::Constant(Duration::from_secs(1)),
|| {
async move {
match node.get_receipt(transaction_hash).await {
Ok(receipt) => {
info!("Polling succeeded, receipt found");
Ok(ControlFlow::Break(receipt))
}
Err(_) => Ok(ControlFlow::Continue(())),
}
Err(_) => Ok(ControlFlow::Continue(())),
}
}
.instrument(info_span!("Polling for receipt"))
},
)
.await
.instrument(info_span!("Polling for receipt"))
},
)
.instrument(info_span!("Polling for receipt", %transaction_hash))
.await
.inspect(|_| info!("Found the transaction receipt"))
}))
}
// endregion:Transaction Execution
}
@@ -8,7 +8,7 @@ use revive_dt_common::types::PrivateKeyAllocator;
use revive_dt_core::Platform;
use revive_dt_format::steps::{Step, StepIdx, StepPath};
use tokio::sync::Mutex;
use tracing::{error, info, info_span, instrument, warn};
use tracing::{Instrument, error, info, info_span, instrument, warn};
use revive_dt_config::{BenchmarkingContext, Context};
use revive_dt_report::Reporter;
@@ -86,6 +86,7 @@ pub async fn handle_differential_benchmarks(
&full_context,
metadata_files.iter(),
&platforms_and_nodes,
None,
reporter.clone(),
)
.await
@@ -159,12 +160,15 @@ pub async fn handle_differential_benchmarks(
futures::future::try_join(
watcher.run(),
driver.execute_all().inspect(|_| {
info!("All transactions submitted - driver completed execution");
watcher_tx
.send(WatcherEvent::AllTransactionsSubmitted)
.unwrap()
}),
driver
.execute_all()
.instrument(info_span!("Executing Benchmarks", %platform_identifier))
.inspect(|_| {
info!("All transactions submitted - driver completed execution");
watcher_tx
.send(WatcherEvent::AllTransactionsSubmitted)
.unwrap()
}),
)
.await
.context("Failed to run the driver and executor")
@@ -104,6 +104,12 @@ impl Watcher {
async move {
let mut mined_blocks_information = Vec::new();
// region:TEMPORARY
eprintln!("Watcher information for {}", self.platform_identifier);
eprintln!(
"block_number,block_timestamp,mined_gas,block_gas_limit,tx_count,ref_time,max_ref_time,proof_size,max_proof_size"
);
// endregion:TEMPORARY
while let Some(block) = blocks_information_stream.next().await {
// If the block number is equal to or less than the last block before the
// repetition then we ignore it and continue on to the next block.
@@ -118,8 +124,9 @@ impl Watcher {
}
info!(
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
block_number = block.block_number,
block_tx_count = block.transaction_hashes.len(),
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
"Observed a block"
);
@@ -131,6 +138,24 @@ impl Watcher {
watch_for_transaction_hashes.remove(tx_hash);
}
// region:TEMPORARY
// TODO: The following core is TEMPORARY and will be removed once we have proper
// reporting in place and then it can be removed. This serves as as way of doing
// some very simple reporting for the time being.
eprintln!(
"\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\"",
block.block_number,
block.block_timestamp,
block.mined_gas,
block.block_gas_limit,
block.transaction_hashes.len(),
block.ref_time,
block.max_ref_time,
block.proof_size,
block.max_proof_size,
);
// endregion:TEMPORARY
mined_blocks_information.push(block);
}
@@ -139,41 +164,10 @@ impl Watcher {
}
};
let (_, mined_blocks_information) =
let (_, _) =
futures::future::join(watcher_event_watching_task, block_information_watching_task)
.await;
// region:TEMPORARY
{
// TODO: The following core is TEMPORARY and will be removed once we have proper
// reporting in place and then it can be removed. This serves as as way of doing some
// very simple reporting for the time being.
use std::io::Write;
let mut stderr = std::io::stderr().lock();
writeln!(
stderr,
"Watcher information for {}",
self.platform_identifier
)?;
writeln!(
stderr,
"block_number,block_timestamp,mined_gas,block_gas_limit,tx_count"
)?;
for block in mined_blocks_information {
writeln!(
stderr,
"{},{},{},{},{}",
block.block_number,
block.block_timestamp,
block.mined_gas,
block.block_gas_limit,
block.transaction_hashes.len()
)?
}
}
// endregion:TEMPORARY
Ok(())
}
}
+9 -25
View File
@@ -31,7 +31,7 @@ use revive_dt_format::{
traits::ResolutionContext,
};
use tokio::sync::Mutex;
use tracing::{debug, error, info, instrument};
use tracing::{error, info, instrument};
use crate::{
differential_tests::ExecutionState,
@@ -109,7 +109,6 @@ impl<'a> Driver<'a, StepsIterator> {
// endregion:Constructors
// region:Execution
#[instrument(level = "info", skip_all)]
pub async fn execute_all(mut self) -> Result<usize> {
let platform_drivers = std::mem::take(&mut self.platform_drivers);
let results = futures::future::try_join_all(
@@ -218,8 +217,6 @@ where
.flatten()
.flat_map(|(_, map)| map.values())
{
debug!(%library_instance, "Deploying Library Instance");
let ContractPathAndIdent {
contract_source_path: library_source_path,
contract_ident: library_ident,
@@ -268,12 +265,6 @@ where
)
})?;
debug!(
?library_instance,
platform_identifier = %platform_information.platform.platform_identifier(),
"Deployed library"
);
let library_address = receipt
.contract_address
.expect("Failed to deploy the library");
@@ -312,7 +303,6 @@ where
// endregion:Constructors & Initialization
// region:Step Handling
#[instrument(level = "info", skip_all)]
pub async fn execute_all(mut self) -> Result<usize> {
while let Some(result) = self.execute_next_step().await {
result?
@@ -320,14 +310,6 @@ where
Ok(self.steps_executed)
}
#[instrument(
level = "info",
skip_all,
fields(
platform_identifier = %self.platform_information.platform.platform_identifier(),
node_id = self.platform_information.node.id(),
),
)]
pub async fn execute_next_step(&mut self) -> Option<Result<()>> {
let (step_path, step) = self.steps_iterator.next()?;
info!(%step_path, "Executing Step");
@@ -344,6 +326,7 @@ where
skip_all,
fields(
platform_identifier = %self.platform_information.platform.platform_identifier(),
node_id = self.platform_information.node.id(),
%step_path,
),
err(Debug),
@@ -402,6 +385,7 @@ where
Ok(1)
}
#[instrument(level = "debug", skip_all)]
async fn handle_function_call_contract_deployment(
&mut self,
step: &FunctionCallStep,
@@ -447,6 +431,7 @@ where
Ok(receipts)
}
#[instrument(level = "debug", skip_all)]
async fn handle_function_call_execution(
&mut self,
step: &FunctionCallStep,
@@ -470,14 +455,12 @@ where
}
};
match self.platform_information.node.execute_transaction(tx).await {
Ok(receipt) => Ok(receipt),
Err(err) => Err(err),
}
self.platform_information.node.execute_transaction(tx).await
}
}
}
#[instrument(level = "debug", skip_all)]
async fn handle_function_call_call_frame_tracing(
&mut self,
tx_hash: TxHash,
@@ -509,6 +492,7 @@ where
})
}
#[instrument(level = "debug", skip_all)]
async fn handle_function_call_variable_assignment(
&mut self,
step: &FunctionCallStep,
@@ -541,6 +525,7 @@ where
Ok(())
}
#[instrument(level = "debug", skip_all)]
async fn handle_function_call_assertions(
&mut self,
step: &FunctionCallStep,
@@ -583,6 +568,7 @@ where
.await
}
#[instrument(level = "debug", skip_all)]
async fn handle_function_call_assertion_item(
&self,
receipt: &TransactionReceipt,
@@ -865,7 +851,6 @@ where
level = "info",
skip_all,
fields(
platform_identifier = %self.platform_information.platform.platform_identifier(),
%contract_instance,
%deployer
),
@@ -907,7 +892,6 @@ where
level = "info",
skip_all,
fields(
platform_identifier = %self.platform_information.platform.platform_identifier(),
%contract_instance,
%deployer
),
+210 -92
View File
@@ -1,20 +1,21 @@
//! The main entry point into differential testing.
use std::{
collections::BTreeMap,
collections::{BTreeMap, BTreeSet},
io::{BufWriter, Write, stderr},
sync::Arc,
time::Instant,
time::{Duration, Instant},
};
use ansi_term::{ANSIStrings, Color};
use anyhow::Context as _;
use futures::{FutureExt, StreamExt};
use revive_dt_common::types::PrivateKeyAllocator;
use revive_dt_common::{cached_fs::read_to_string, types::PrivateKeyAllocator};
use revive_dt_core::Platform;
use tokio::sync::Mutex;
use tokio::sync::{Mutex, RwLock, Semaphore};
use tracing::{Instrument, error, info, info_span, instrument};
use revive_dt_config::{Context, TestExecutionContext};
use revive_dt_config::{Context, OutputFormat, TestExecutionContext};
use revive_dt_report::{Reporter, ReporterEvent, TestCaseStatus};
use crate::{
@@ -71,11 +72,20 @@ pub async fn handle_differential_tests(
info!("Spawned the platform nodes");
// Preparing test definitions.
let only_execute_failed_tests = match context.ignore_success_configuration.path.as_ref() {
Some(path) => {
let report = read_to_string(path)
.context("Failed to read the report file to ignore the succeeding test cases")?;
Some(serde_json::from_str(&report).context("Failed to deserialize report")?)
}
None => None,
};
let full_context = Context::Test(Box::new(context.clone()));
let test_definitions = create_test_definitions_stream(
&full_context,
metadata_files.iter(),
&platforms_and_nodes,
only_execute_failed_tests.as_ref(),
reporter.clone(),
)
.await
@@ -101,20 +111,40 @@ pub async fn handle_differential_tests(
)));
// Creating the driver and executing all of the steps.
let driver_task = futures::future::join_all(test_definitions.iter().map(|test_definition| {
let private_key_allocator = private_key_allocator.clone();
let cached_compiler = cached_compiler.clone();
let mode = test_definition.mode.clone();
let span = info_span!(
"Executing Test Case",
metadata_file_path = %test_definition.metadata_file_path.display(),
case_idx = %test_definition.case_idx,
mode = %mode
);
async move {
let driver =
match Driver::new_root(test_definition, private_key_allocator, &cached_compiler)
.await
let semaphore = context
.concurrency_configuration
.concurrency_limit()
.map(Semaphore::new)
.map(Arc::new);
let running_task_list = Arc::new(RwLock::new(BTreeSet::<usize>::new()));
let driver_task = futures::future::join_all(test_definitions.iter().enumerate().map(
|(test_id, test_definition)| {
let running_task_list = running_task_list.clone();
let semaphore = semaphore.clone();
let private_key_allocator = private_key_allocator.clone();
let cached_compiler = cached_compiler.clone();
let mode = test_definition.mode.clone();
let span = info_span!(
"Executing Test Case",
test_id,
metadata_file_path = %test_definition.metadata_file_path.display(),
case_idx = %test_definition.case_idx,
mode = %mode,
);
async move {
let permit = match semaphore.as_ref() {
Some(semaphore) => Some(semaphore.acquire().await.expect("Can't fail")),
None => None,
};
running_task_list.write().await.insert(test_id);
let driver = match Driver::new_root(
test_definition,
private_key_allocator,
&cached_compiler,
)
.await
{
Ok(driver) => driver,
Err(error) => {
@@ -123,35 +153,52 @@ pub async fn handle_differential_tests(
.report_test_failed_event(format!("{error:#}"))
.expect("Can't fail");
error!("Test Case Failed");
drop(permit);
running_task_list.write().await.remove(&test_id);
return;
}
};
info!("Created the driver for the test case");
info!("Created the driver for the test case");
match driver.execute_all().await {
Ok(steps_executed) => test_definition
.reporter
.report_test_succeeded_event(steps_executed)
.expect("Can't fail"),
Err(error) => {
test_definition
match driver.execute_all().await {
Ok(steps_executed) => test_definition
.reporter
.report_test_failed_event(format!("{error:#}"))
.expect("Can't fail");
error!("Test Case Failed");
}
};
info!("Finished the execution of the test case")
}
.instrument(span)
}))
.report_test_succeeded_event(steps_executed)
.expect("Can't fail"),
Err(error) => {
test_definition
.reporter
.report_test_failed_event(format!("{error:#}"))
.expect("Can't fail");
error!("Test Case Failed");
}
};
info!("Finished the execution of the test case");
drop(permit);
running_task_list.write().await.remove(&test_id);
}
.instrument(span)
},
))
.inspect(|_| {
info!("Finished executing all test cases");
reporter_clone
.report_completion_event()
.expect("Can't fail")
});
let cli_reporting_task = start_cli_reporting_task(reporter);
let cli_reporting_task = start_cli_reporting_task(context.output_format, reporter);
tokio::task::spawn(async move {
loop {
let remaining_tasks = running_task_list.read().await;
info!(
count = remaining_tasks.len(),
?remaining_tasks,
"Remaining Tests"
);
tokio::time::sleep(Duration::from_secs(10)).await
}
});
futures::future::join(driver_task, cli_reporting_task).await;
@@ -159,21 +206,15 @@ pub async fn handle_differential_tests(
}
#[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
async fn start_cli_reporting_task(reporter: Reporter) {
async fn start_cli_reporting_task(output_format: OutputFormat, reporter: Reporter) {
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
drop(reporter);
let start = Instant::now();
const GREEN: &str = "\x1B[32m";
const RED: &str = "\x1B[31m";
const GREY: &str = "\x1B[90m";
const COLOR_RESET: &str = "\x1B[0m";
const BOLD: &str = "\x1B[1m";
const BOLD_RESET: &str = "\x1B[22m";
let mut number_of_successes = 0;
let mut number_of_failures = 0;
let mut global_success_count = 0;
let mut global_failure_count = 0;
let mut global_ignore_count = 0;
let mut buf = BufWriter::new(stderr());
while let Ok(event) = aggregator_events_rx.recv().await {
@@ -186,55 +227,132 @@ async fn start_cli_reporting_task(reporter: Reporter) {
continue;
};
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
for (case_idx, case_status) in case_status.into_iter() {
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
let _ = match case_status {
TestCaseStatus::Succeeded { steps_executed } => {
number_of_successes += 1;
writeln!(
buf,
"{}{}Case Succeeded{} - Steps Executed: {}{}",
GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET
)
match output_format {
OutputFormat::Legacy => {
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
for (case_idx, case_status) in case_status.into_iter() {
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
let _ = match case_status {
TestCaseStatus::Succeeded { steps_executed } => {
global_success_count += 1;
writeln!(
buf,
"{}",
ANSIStrings(&[
Color::Green.bold().paint("Case Succeeded"),
Color::Green
.paint(format!(" - Steps Executed: {steps_executed}")),
])
)
}
TestCaseStatus::Failed { reason } => {
global_failure_count += 1;
writeln!(
buf,
"{}",
ANSIStrings(&[
Color::Red.bold().paint("Case Failed"),
Color::Red.paint(format!(" - Reason: {}", reason.trim())),
])
)
}
TestCaseStatus::Ignored { reason, .. } => {
global_ignore_count += 1;
writeln!(
buf,
"{}",
ANSIStrings(&[
Color::Yellow.bold().paint("Case Ignored"),
Color::Yellow.paint(format!(" - Reason: {}", reason.trim())),
])
)
}
};
}
TestCaseStatus::Failed { reason } => {
number_of_failures += 1;
writeln!(
buf,
"{}{}Case Failed{} - Reason: {}{}",
RED,
BOLD,
BOLD_RESET,
reason.trim(),
COLOR_RESET,
)
}
TestCaseStatus::Ignored { reason, .. } => writeln!(
let _ = writeln!(buf);
}
OutputFormat::CargoTestLike => {
writeln!(
buf,
"{}{}Case Ignored{} - Reason: {}{}",
GREY,
BOLD,
BOLD_RESET,
reason.trim(),
COLOR_RESET,
),
};
"\t{} {} - {}\n",
Color::Green.paint("Running"),
metadata_file_path.display(),
mode
)
.unwrap();
let mut success_count = 0;
let mut failure_count = 0;
let mut ignored_count = 0;
writeln!(buf, "running {} tests", case_status.len()).unwrap();
for (case_idx, case_result) in case_status.iter() {
let status = match case_result {
TestCaseStatus::Succeeded { .. } => {
success_count += 1;
global_success_count += 1;
Color::Green.paint("ok")
}
TestCaseStatus::Failed { reason } => {
failure_count += 1;
global_failure_count += 1;
Color::Red.paint(format!("FAILED, {reason}"))
}
TestCaseStatus::Ignored { reason, .. } => {
ignored_count += 1;
global_ignore_count += 1;
Color::Yellow.paint(format!("ignored, {reason:?}"))
}
};
writeln!(buf, "test case_idx_{} ... {}", case_idx, status).unwrap();
}
writeln!(buf).unwrap();
let status = if failure_count > 0 {
Color::Red.paint("FAILED")
} else {
Color::Green.paint("ok")
};
writeln!(
buf,
"test result: {}. {} passed; {} failed; {} ignored",
status, success_count, failure_count, ignored_count,
)
.unwrap();
writeln!(buf).unwrap();
buf = tokio::task::spawn_blocking(move || {
buf.flush().unwrap();
buf
})
.await
.unwrap();
}
}
let _ = writeln!(buf);
}
// Summary at the end.
let _ = writeln!(
buf,
"{} cases: {}{}{} cases succeeded, {}{}{} cases failed in {} seconds",
number_of_successes + number_of_failures,
GREEN,
number_of_successes,
COLOR_RESET,
RED,
number_of_failures,
COLOR_RESET,
start.elapsed().as_secs()
);
match output_format {
OutputFormat::Legacy => {
writeln!(
buf,
"{} cases: {} cases succeeded, {} cases failed in {} seconds",
global_success_count + global_failure_count + global_ignore_count,
Color::Green.paint(global_success_count.to_string()),
Color::Red.paint(global_failure_count.to_string()),
start.elapsed().as_secs()
)
.unwrap();
}
OutputFormat::CargoTestLike => {
writeln!(
buf,
"run finished. {} passed; {} failed; {} ignored; finished in {}s",
global_success_count,
global_failure_count,
global_ignore_count,
start.elapsed().as_secs()
)
.unwrap();
}
}
}
+1 -1
View File
@@ -201,7 +201,7 @@ async fn compile_contracts(
// Puts a limit on how many compilations we can perform at any given instance which helps us
// with some of the errors we've been seeing with high concurrency on MacOS (we have not tried
// it on Linux so we don't know if these issues also persist there or not.)
static SPAWN_GATE: LazyLock<Semaphore> = LazyLock::new(|| Semaphore::new(100));
static SPAWN_GATE: LazyLock<Semaphore> = LazyLock::new(|| Semaphore::new(5));
let _permit = SPAWN_GATE.acquire().await?;
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
+39 -5
View File
@@ -5,9 +5,8 @@ use std::{borrow::Cow, path::Path};
use futures::{Stream, StreamExt, stream};
use indexmap::{IndexMap, indexmap};
use revive_dt_common::iterators::EitherIter;
use revive_dt_common::types::PlatformIdentifier;
use revive_dt_common::types::{ParsedMode, PlatformIdentifier};
use revive_dt_config::Context;
use revive_dt_format::mode::ParsedMode;
use serde_json::{Value, json};
use revive_dt_compiler::Mode;
@@ -17,7 +16,7 @@ use revive_dt_format::{
metadata::MetadataFile,
};
use revive_dt_node_interaction::EthereumNode;
use revive_dt_report::{ExecutionSpecificReporter, Reporter};
use revive_dt_report::{ExecutionSpecificReporter, Report, Reporter, TestCaseStatus};
use revive_dt_report::{TestSpecificReporter, TestSpecifier};
use tracing::{debug, error, info};
@@ -30,6 +29,7 @@ pub async fn create_test_definitions_stream<'a>(
context: &Context,
metadata_files: impl IntoIterator<Item = &'a MetadataFile>,
platforms_and_nodes: &'a BTreeMap<PlatformIdentifier, (&dyn Platform, NodePool)>,
only_execute_failed_tests: Option<&Report>,
reporter: Reporter,
) -> impl Stream<Item = TestDefinition<'a>> {
stream::iter(
@@ -140,7 +140,7 @@ pub async fn create_test_definitions_stream<'a>(
)
// Filter out the test cases which are incompatible or that can't run in the current setup.
.filter_map(move |test| async move {
match test.check_compatibility() {
match test.check_compatibility(only_execute_failed_tests) {
Ok(()) => Some(test),
Err((reason, additional_information)) => {
debug!(
@@ -200,12 +200,16 @@ pub struct TestDefinition<'a> {
impl<'a> TestDefinition<'a> {
/// Checks if this test can be ran with the current configuration.
pub fn check_compatibility(&self) -> TestCheckFunctionResult {
pub fn check_compatibility(
&self,
only_execute_failed_tests: Option<&Report>,
) -> TestCheckFunctionResult {
self.check_metadata_file_ignored()?;
self.check_case_file_ignored()?;
self.check_target_compatibility()?;
self.check_evm_version_compatibility()?;
self.check_compiler_compatibility()?;
self.check_ignore_succeeded(only_execute_failed_tests)?;
Ok(())
}
@@ -313,6 +317,36 @@ impl<'a> TestDefinition<'a> {
))
}
}
/// Checks if the test case should be executed or not based on the passed report and whether the
/// user has instructed the tool to ignore the already succeeding test cases.
fn check_ignore_succeeded(
&self,
only_execute_failed_tests: Option<&Report>,
) -> TestCheckFunctionResult {
let Some(report) = only_execute_failed_tests else {
return Ok(());
};
let test_case_status = report
.test_case_information
.get(&(self.metadata_file_path.to_path_buf().into()))
.and_then(|obj| obj.get(&self.mode))
.and_then(|obj| obj.get(&self.case_idx))
.and_then(|obj| obj.status.as_ref());
match test_case_status {
Some(TestCaseStatus::Failed { .. }) => Ok(()),
Some(TestCaseStatus::Ignored { .. }) => Err((
"Ignored since it was ignored in a previous run",
indexmap! {},
)),
Some(TestCaseStatus::Succeeded { .. }) => {
Err(("Ignored since it succeeded in a prior run", indexmap! {}))
}
None => Ok(()),
}
}
}
pub struct TestPlatformInformation<'a> {
+119 -7
View File
@@ -16,7 +16,7 @@ use revive_dt_config::*;
use revive_dt_node::{
Node, node_implementations::geth::GethNode,
node_implementations::lighthouse_geth::LighthouseGethNode,
node_implementations::substrate::SubstrateNode,
node_implementations::substrate::SubstrateNode, node_implementations::zombienet::ZombienetNode,
};
use revive_dt_node_interaction::EthereumNode;
use tracing::info;
@@ -184,6 +184,7 @@ impl Platform for KitchensinkPolkavmResolcPlatform {
let node = SubstrateNode::new(
kitchensink_path,
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
None,
context,
);
let node = spawn_node(node, genesis)?;
@@ -236,6 +237,7 @@ impl Platform for KitchensinkRevmSolcPlatform {
let node = SubstrateNode::new(
kitchensink_path,
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
None,
context,
);
let node = spawn_node(node, genesis)?;
@@ -280,14 +282,17 @@ impl Platform for ReviveDevNodePolkavmResolcPlatform {
context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
.path
.clone();
let revive_dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
let revive_dev_node_path = revive_dev_node_configuration.path.clone();
let revive_dev_node_consensus = revive_dev_node_configuration.consensus.clone();
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let node = SubstrateNode::new(
revive_dev_node_path,
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
Some(revive_dev_node_consensus),
context,
);
let node = spawn_node(node, genesis)?;
@@ -332,14 +337,17 @@ impl Platform for ReviveDevNodeRevmSolcPlatform {
context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
.path
.clone();
let revive_dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
let revive_dev_node_path = revive_dev_node_configuration.path.clone();
let revive_dev_node_consensus = revive_dev_node_configuration.consensus.clone();
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let node = SubstrateNode::new(
revive_dev_node_path,
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
Some(revive_dev_node_consensus),
context,
);
let node = spawn_node(node, genesis)?;
@@ -359,6 +367,102 @@ impl Platform for ReviveDevNodeRevmSolcPlatform {
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
pub struct ZombienetPolkavmResolcPlatform;
impl Platform for ZombienetPolkavmResolcPlatform {
fn platform_identifier(&self) -> PlatformIdentifier {
PlatformIdentifier::ZombienetPolkavmResolc
}
fn node_identifier(&self) -> NodeIdentifier {
NodeIdentifier::Zombienet
}
fn vm_identifier(&self) -> VmIdentifier {
VmIdentifier::PolkaVM
}
fn compiler_identifier(&self) -> CompilerIdentifier {
CompilerIdentifier::Resolc
}
fn new_node(
&self,
context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
.path
.clone();
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let node = ZombienetNode::new(polkadot_parachain_path, context);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
}))
}
fn new_compiler(
&self,
context: Context,
version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
Box::pin(async move {
let compiler = Resolc::new(context, version).await;
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
})
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
pub struct ZombienetRevmSolcPlatform;
impl Platform for ZombienetRevmSolcPlatform {
fn platform_identifier(&self) -> PlatformIdentifier {
PlatformIdentifier::ZombienetRevmSolc
}
fn node_identifier(&self) -> NodeIdentifier {
NodeIdentifier::Zombienet
}
fn vm_identifier(&self) -> VmIdentifier {
VmIdentifier::Evm
}
fn compiler_identifier(&self) -> CompilerIdentifier {
CompilerIdentifier::Solc
}
fn new_node(
&self,
context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
.path
.clone();
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let node = ZombienetNode::new(polkadot_parachain_path, context);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
}))
}
fn new_compiler(
&self,
context: Context,
version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
Box::pin(async move {
let compiler = Solc::new(context, version).await;
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
})
}
}
impl From<PlatformIdentifier> for Box<dyn Platform> {
fn from(value: PlatformIdentifier) -> Self {
match value {
@@ -378,6 +482,10 @@ impl From<PlatformIdentifier> for Box<dyn Platform> {
PlatformIdentifier::ReviveDevNodeRevmSolc => {
Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_>
}
PlatformIdentifier::ZombienetPolkavmResolc => {
Box::new(ZombienetPolkavmResolcPlatform) as Box<_>
}
PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>,
}
}
}
@@ -401,6 +509,10 @@ impl From<PlatformIdentifier> for &dyn Platform {
PlatformIdentifier::ReviveDevNodeRevmSolc => {
&ReviveDevNodeRevmSolcPlatform as &dyn Platform
}
PlatformIdentifier::ZombienetPolkavmResolc => {
&ZombienetPolkavmResolcPlatform as &dyn Platform
}
PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform,
}
}
}
-1
View File
@@ -16,7 +16,6 @@ revive-common = { workspace = true }
alloy = { workspace = true }
anyhow = { workspace = true }
futures = { workspace = true }
regex = { workspace = true }
tracing = { workspace = true }
schemars = { workspace = true }
semver = { workspace = true }
+5 -2
View File
@@ -1,9 +1,12 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use revive_dt_common::{macros::define_wrapper_type, types::Mode};
use revive_dt_common::{
macros::define_wrapper_type,
types::{Mode, ParsedMode},
};
use crate::{mode::ParsedMode, steps::*};
use crate::steps::*;
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)]
pub struct Case {
-1
View File
@@ -3,6 +3,5 @@
pub mod case;
pub mod corpus;
pub mod metadata;
pub mod mode;
pub mod steps;
pub mod traits;
+2 -2
View File
@@ -16,11 +16,11 @@ use revive_dt_common::{
cached_fs::read_to_string,
iterators::FilesWithExtensionIterator,
macros::define_wrapper_type,
types::{Mode, VmIdentifier},
types::{Mode, ParsedMode, VmIdentifier},
};
use tracing::error;
use crate::{case::Case, mode::ParsedMode};
use crate::case::Case;
pub const METADATA_FILE_EXTENSION: &str = "json";
pub const SOLIDITY_CASE_FILE_EXTENSION: &str = "sol";
-257
View File
@@ -1,257 +0,0 @@
use anyhow::Context as _;
use regex::Regex;
use revive_dt_common::iterators::EitherIter;
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use std::fmt::Display;
use std::str::FromStr;
use std::sync::LazyLock;
/// This represents a mode that has been parsed from test metadata.
///
/// Mode strings can take the following form (in pseudo-regex):
///
/// ```text
/// [YEILV][+-]? (M[0123sz])? <semver>?
/// ```
///
/// We can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].
#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)]
#[serde(try_from = "String", into = "String")]
pub struct ParsedMode {
pub pipeline: Option<ModePipeline>,
pub optimize_flag: Option<bool>,
pub optimize_setting: Option<ModeOptimizerSetting>,
pub version: Option<semver::VersionReq>,
}
impl FromStr for ParsedMode {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
static REGEX: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(r"(?x)
^
(?:(?P<pipeline>[YEILV])(?P<optimize_flag>[+-])?)? # Pipeline to use eg Y, E+, E-
\s*
(?P<optimize_setting>M[a-zA-Z0-9])? # Optimize setting eg M0, Ms, Mz
\s*
(?P<version>[>=<]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8
$
").unwrap()
});
let Some(caps) = REGEX.captures(s) else {
anyhow::bail!("Cannot parse mode '{s}' from string");
};
let pipeline = match caps.name("pipeline") {
Some(m) => Some(
ModePipeline::from_str(m.as_str())
.context("Failed to parse mode pipeline from string")?,
),
None => None,
};
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
let optimize_setting = match caps.name("optimize_setting") {
Some(m) => Some(
ModeOptimizerSetting::from_str(m.as_str())
.context("Failed to parse optimizer setting from string")?,
),
None => None,
};
let version = match caps.name("version") {
Some(m) => Some(
semver::VersionReq::parse(m.as_str())
.map_err(|e| {
anyhow::anyhow!(
"Cannot parse the version requirement '{}': {e}",
m.as_str()
)
})
.context("Failed to parse semver requirement from mode string")?,
),
None => None,
};
Ok(ParsedMode {
pipeline,
optimize_flag,
optimize_setting,
version,
})
}
}
impl Display for ParsedMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut has_written = false;
if let Some(pipeline) = self.pipeline {
pipeline.fmt(f)?;
if let Some(optimize_flag) = self.optimize_flag {
f.write_str(if optimize_flag { "+" } else { "-" })?;
}
has_written = true;
}
if let Some(optimize_setting) = self.optimize_setting {
if has_written {
f.write_str(" ")?;
}
optimize_setting.fmt(f)?;
has_written = true;
}
if let Some(version) = &self.version {
if has_written {
f.write_str(" ")?;
}
version.fmt(f)?;
}
Ok(())
}
}
impl From<ParsedMode> for String {
fn from(parsed_mode: ParsedMode) -> Self {
parsed_mode.to_string()
}
}
impl TryFrom<String> for ParsedMode {
type Error = anyhow::Error;
fn try_from(value: String) -> Result<Self, Self::Error> {
ParsedMode::from_str(&value)
}
}
impl ParsedMode {
/// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try.
pub fn to_modes(&self) -> impl Iterator<Item = Mode> {
let pipeline_iter = self.pipeline.as_ref().map_or_else(
|| EitherIter::A(ModePipeline::test_cases()),
|p| EitherIter::B(std::iter::once(*p)),
);
let optimize_flag_setting = self.optimize_flag.map(|flag| {
if flag {
ModeOptimizerSetting::M3
} else {
ModeOptimizerSetting::M0
}
});
let optimize_flag_iter = match optimize_flag_setting {
Some(setting) => EitherIter::A(std::iter::once(setting)),
None => EitherIter::B(ModeOptimizerSetting::test_cases()),
};
let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else(
|| EitherIter::A(optimize_flag_iter),
|s| EitherIter::B(std::iter::once(*s)),
);
pipeline_iter.flat_map(move |pipeline| {
optimize_settings_iter
.clone()
.map(move |optimize_setting| Mode {
pipeline,
optimize_setting,
version: self.version.clone(),
})
})
}
/// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s.
/// This avoids any duplicate entries.
pub fn many_to_modes<'a>(
parsed: impl Iterator<Item = &'a ParsedMode>,
) -> impl Iterator<Item = Mode> {
let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect();
modes.into_iter()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parsed_mode_from_str() {
let strings = vec![
("Mz", "Mz"),
("Y", "Y"),
("Y+", "Y+"),
("Y-", "Y-"),
("E", "E"),
("E+", "E+"),
("E-", "E-"),
("Y M0", "Y M0"),
("Y M1", "Y M1"),
("Y M2", "Y M2"),
("Y M3", "Y M3"),
("Y Ms", "Y Ms"),
("Y Mz", "Y Mz"),
("E M0", "E M0"),
("E M1", "E M1"),
("E M2", "E M2"),
("E M3", "E M3"),
("E Ms", "E Ms"),
("E Mz", "E Mz"),
// When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning)
("Y 0.8.0", "Y ^0.8.0"),
("E+ 0.8.0", "E+ ^0.8.0"),
("Y M3 >=0.8.0", "Y M3 >=0.8.0"),
("E Mz <0.7.0", "E Mz <0.7.0"),
// We can parse +- _and_ M1/M2 but the latter takes priority.
("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"),
("E- M2 0.7.0", "E- M2 ^0.7.0"),
// We don't see this in the wild but it is parsed.
("<=0.8", "<=0.8"),
];
for (actual, expected) in strings {
let parsed = ParsedMode::from_str(actual)
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
assert_eq!(
expected,
parsed.to_string(),
"Mode string '{actual}' did not parse to '{expected}': got '{parsed}'"
);
}
}
#[test]
fn test_parsed_mode_to_test_modes() {
let strings = vec![
("Mz", vec!["Y Mz", "E Mz"]),
("Y", vec!["Y M0", "Y M3"]),
("E", vec!["E M0", "E M3"]),
("Y+", vec!["Y M3"]),
("Y-", vec!["Y M0"]),
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
(
"<=0.8",
vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"],
),
];
for (actual, expected) in strings {
let parsed = ParsedMode::from_str(actual)
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect();
let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect();
assert_eq!(
expected_set, actual_set,
"Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'"
);
}
}
}
+17
View File
@@ -3,7 +3,9 @@
use std::pin::Pin;
use std::sync::Arc;
use alloy::network::Ethereum;
use alloy::primitives::{Address, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256};
use alloy::providers::DynProvider;
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace};
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest};
use anyhow::Result;
@@ -74,6 +76,9 @@ pub trait EthereumNode {
+ '_,
>,
>;
fn provider(&self)
-> Pin<Box<dyn Future<Output = anyhow::Result<DynProvider<Ethereum>>> + '_>>;
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
@@ -92,4 +97,16 @@ pub struct MinedBlockInformation {
/// The hashes of the transactions that were mined as part of the block.
pub transaction_hashes: Vec<TxHash>,
/// The ref time for substrate based chains.
pub ref_time: u128,
/// The max ref time for substrate based chains.
pub max_ref_time: u64,
/// The proof size for substrate based chains.
pub proof_size: u128,
/// The max proof size for substrate based chains.
pub max_proof_size: u64,
}
+2
View File
@@ -29,6 +29,8 @@ serde_yaml_ng = { workspace = true }
sp-core = { workspace = true }
sp-runtime = { workspace = true }
subxt = { workspace = true }
zombienet-sdk = { workspace = true }
[dev-dependencies]
temp-dir = { workspace = true }
+15 -1
View File
@@ -32,7 +32,7 @@ use alloy::{
},
};
use anyhow::Context as _;
use futures::{Stream, StreamExt};
use futures::{FutureExt, Stream, StreamExt};
use revive_common::EVMVersion;
use tokio::sync::OnceCell;
use tracing::{Instrument, error, instrument};
@@ -535,6 +535,10 @@ impl EthereumNode for GethNode {
.as_hashes()
.expect("Must be hashes")
.to_vec(),
ref_time: 0,
max_ref_time: 0,
proof_size: 0,
max_proof_size: 0,
})
});
@@ -542,6 +546,16 @@ impl EthereumNode for GethNode {
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
})
}
fn provider(
&self,
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::providers::DynProvider<Ethereum>>> + '_>>
{
Box::pin(
self.provider()
.map(|provider| provider.map(|provider| provider.erased())),
)
}
}
pub struct GethNodeResolver {
@@ -17,7 +17,7 @@ use std::{
pin::Pin,
process::{Command, Stdio},
sync::{
Arc, LazyLock,
Arc,
atomic::{AtomicU32, Ordering},
},
time::{Duration, SystemTime, UNIX_EPOCH},
@@ -43,11 +43,11 @@ use alloy::{
},
};
use anyhow::Context as _;
use futures::{Stream, StreamExt};
use futures::{FutureExt, Stream, StreamExt};
use revive_common::EVMVersion;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_with::serde_as;
use tokio::sync::{OnceCell, Semaphore};
use tokio::sync::OnceCell;
use tracing::{Instrument, info, instrument};
use revive_dt_common::{
@@ -105,7 +105,6 @@ pub struct LighthouseGethNode {
persistent_http_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
persistent_ws_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
http_provider_requests_semaphore: LazyLock<Semaphore>,
}
impl LighthouseGethNode {
@@ -176,7 +175,6 @@ impl LighthouseGethNode {
nonce_manager: Default::default(),
persistent_http_provider: OnceCell::const_new(),
persistent_ws_provider: OnceCell::const_new(),
http_provider_requests_semaphore: LazyLock::new(|| Semaphore::const_new(500)),
}
}
@@ -224,6 +222,7 @@ impl LighthouseGethNode {
"--ws.port=8546".to_string(),
"--ws.api=eth,net,web3,txpool,engine".to_string(),
"--ws.origins=*".to_string(),
"--miner.gaslimit=30000000".to_string(),
],
consensus_layer_extra_parameters: vec![
"--disable-quic".to_string(),
@@ -249,6 +248,8 @@ impl LighthouseGethNode {
.collect::<BTreeMap<_, _>>();
serde_json::to_string(&map).unwrap()
},
gas_limit: 30_000_000,
genesis_gaslimit: 30_000_000,
},
wait_for_finalization: false,
port_publisher: Some(PortPublisherParameters {
@@ -566,8 +567,6 @@ impl EthereumNode for LighthouseGethNode {
transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TxHash>> + '_>> {
Box::pin(async move {
let _permit = self.http_provider_requests_semaphore.acquire().await;
let provider = self
.http_provider()
.await
@@ -758,6 +757,10 @@ impl EthereumNode for LighthouseGethNode {
.as_hashes()
.expect("Must be hashes")
.to_vec(),
ref_time: 0,
max_ref_time: 0,
proof_size: 0,
max_proof_size: 0,
})
});
@@ -765,6 +768,16 @@ impl EthereumNode for LighthouseGethNode {
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
})
}
fn provider(
&self,
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::providers::DynProvider<Ethereum>>> + '_>>
{
Box::pin(
self.http_provider()
.map(|provider| provider.map(|provider| provider.erased())),
)
}
}
pub struct LighthouseGethNodeResolver<F: TxFiller<Ethereum>, P: Provider<Ethereum>> {
@@ -1039,6 +1052,8 @@ struct NetworkParameters {
pub num_validator_keys_per_node: u64,
pub genesis_delay: u64,
pub genesis_gaslimit: u64,
pub gas_limit: u64,
pub prefunded_accounts: String,
}
@@ -1135,6 +1150,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
// Arrange
let (context, node) = new_node();
@@ -1,3 +1,4 @@
pub mod geth;
pub mod lighthouse_geth;
pub mod substrate;
pub mod zombienet;
+110 -486
View File
@@ -11,17 +11,10 @@ use std::{
};
use alloy::{
consensus::{BlockHeader, TxEnvelope},
eips::BlockNumberOrTag,
genesis::{Genesis, GenesisAccount},
network::{
Ethereum, EthereumWallet, Network, NetworkWallet, TransactionBuilder,
TransactionBuilderError, UnbuiltTransactionError,
},
primitives::{
Address, B64, B256, BlockHash, BlockNumber, BlockTimestamp, Bloom, Bytes, StorageKey,
TxHash, U256,
},
network::{Ethereum, EthereumWallet, NetworkWallet},
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
providers::{
Provider,
ext::DebugApi,
@@ -29,24 +22,23 @@ use alloy::{
},
rpc::types::{
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
eth::{Block, Header, Transaction},
trace::geth::{
DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame,
},
},
};
use anyhow::Context as _;
use futures::{Stream, StreamExt};
use futures::{FutureExt, Stream, StreamExt};
use revive_common::EVMVersion;
use revive_dt_common::fs::clear_directory;
use revive_dt_format::traits::ResolverApi;
use serde::{Deserialize, Serialize};
use serde_json::{Value as JsonValue, json};
use sp_core::crypto::Ss58Codec;
use sp_runtime::AccountId32;
use revive_dt_config::*;
use revive_dt_node_interaction::{EthereumNode, MinedBlockInformation};
use subxt::{OnlineClient, SubstrateConfig};
use tokio::sync::OnceCell;
use tracing::instrument;
@@ -54,7 +46,10 @@ use crate::{
Node,
constants::{CHAIN_ID, INITIAL_BALANCE},
helpers::{Process, ProcessReadinessWaitBehavior},
provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider},
provider_utils::{
ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider,
execute_transaction,
},
};
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
@@ -76,11 +71,12 @@ pub struct SubstrateNode {
eth_proxy_process: Option<Process>,
wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<ReviveNetwork, Arc<EthereumWallet>>>,
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
consensus: Option<String>,
}
impl SubstrateNode {
const BASE_DIRECTORY: &str = "Substrate";
const BASE_DIRECTORY: &str = "substrate";
const LOGS_DIRECTORY: &str = "logs";
const DATA_DIRECTORY: &str = "chains";
@@ -99,6 +95,7 @@ impl SubstrateNode {
pub fn new(
node_path: PathBuf,
export_chainspec_command: &str,
consensus: Option<String>,
context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<EthRpcConfiguration>
+ AsRef<WalletConfiguration>,
@@ -128,6 +125,7 @@ impl SubstrateNode {
wallet: wallet.clone(),
nonce_manager: Default::default(),
provider: Default::default(),
consensus,
}
}
@@ -225,7 +223,7 @@ impl SubstrateNode {
self.logs_directory.as_path(),
self.node_binary.as_path(),
|command, stdout_file, stderr_file| {
command
let cmd = command
.arg("--dev")
.arg("--chain")
.arg(chainspec_path)
@@ -242,9 +240,16 @@ impl SubstrateNode {
.arg("all")
.arg("--rpc-max-connections")
.arg(u32::MAX.to_string())
.arg("--pool-limit")
.arg(u32::MAX.to_string())
.arg("--pool-kbytes")
.arg(u32::MAX.to_string())
.env("RUST_LOG", Self::SUBSTRATE_LOG_ENV)
.stdout(stdout_file)
.stderr(stderr_file);
if let Some(consensus) = self.consensus.as_ref() {
cmd.arg("--consensus").arg(consensus.clone());
}
},
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
max_wait_duration: Duration::from_secs(30),
@@ -339,14 +344,12 @@ impl SubstrateNode {
Ok(String::from_utf8_lossy(&output).trim().to_string())
}
async fn provider(
&self,
) -> anyhow::Result<ConcreteProvider<ReviveNetwork, Arc<EthereumWallet>>> {
async fn provider(&self) -> anyhow::Result<ConcreteProvider<Ethereum, Arc<EthereumWallet>>> {
self.provider
.get_or_try_init(|| async move {
construct_concurrency_limited_provider::<ReviveNetwork, _>(
construct_concurrency_limited_provider::<Ethereum, _>(
self.rpc_url.as_str(),
FallbackGasFiller::new(250_000_000, 5_000_000_000, 1_000_000_000),
FallbackGasFiller::new(u64::MAX, 5_000_000_000, 1_000_000_000),
ChainIdFiller::new(Some(CHAIN_ID)),
NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(),
@@ -408,23 +411,12 @@ impl EthereumNode for SubstrateNode {
&self,
transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
static SEMAPHORE: std::sync::LazyLock<tokio::sync::Semaphore> =
std::sync::LazyLock::new(|| tokio::sync::Semaphore::new(500));
Box::pin(async move {
let _permit = SEMAPHORE.acquire().await?;
let receipt = self
let provider = self
.provider()
.await
.context("Failed to create provider for transaction submission")?
.send_transaction(transaction)
.await
.context("Failed to submit transaction to substrate proxy")?
.get_receipt()
.await
.context("Failed to fetch transaction receipt from substrate proxy")?;
Ok(receipt)
.context("Failed to create the provider")?;
execute_transaction(provider, transaction).await
})
}
@@ -516,44 +508,97 @@ impl EthereumNode for SubstrateNode {
+ '_,
>,
> {
Box::pin(async move {
let provider = self
.provider()
.await
.context("Failed to create the provider for block subscription")?;
let mut block_subscription = provider
.watch_full_blocks()
.await
.context("Failed to create the blocks stream")?;
block_subscription.set_channel_size(0xFFFF);
block_subscription.set_poll_interval(Duration::from_secs(1));
let block_stream = block_subscription.into_stream();
#[subxt::subxt(runtime_metadata_path = "../../assets/revive_metadata.scale")]
pub mod revive {}
let mined_block_information_stream = block_stream.filter_map(|block| async {
let block = block.ok()?;
Some(MinedBlockInformation {
block_number: block.number(),
block_timestamp: block.header.timestamp,
mined_gas: block.header.gas_used as _,
block_gas_limit: block.header.gas_limit,
transaction_hashes: block
.transactions
.into_hashes()
.as_hashes()
.expect("Must be hashes")
.to_vec(),
})
Box::pin(async move {
let substrate_rpc_port = Self::BASE_SUBSTRATE_RPC_PORT + self.id as u16;
let substrate_rpc_url = format!("ws://127.0.0.1:{substrate_rpc_port}");
let api = OnlineClient::<SubstrateConfig>::from_url(substrate_rpc_url)
.await
.context("Failed to create subxt rpc client")?;
let provider = self.provider().await.context("Failed to create provider")?;
let block_stream = api
.blocks()
.subscribe_all()
.await
.context("Failed to subscribe to blocks")?;
let mined_block_information_stream = block_stream.filter_map(move |block| {
let api = api.clone();
let provider = provider.clone();
async move {
let substrate_block = block.ok()?;
let revive_block = provider
.get_block_by_number(
BlockNumberOrTag::Number(substrate_block.number() as _),
)
.await
.expect("TODO: Remove")
.expect("TODO: Remove");
let used = api
.storage()
.at(substrate_block.reference())
.fetch_or_default(&revive::storage().system().block_weight())
.await
.expect("TODO: Remove");
let block_ref_time = (used.normal.ref_time as u128)
+ (used.operational.ref_time as u128)
+ (used.mandatory.ref_time as u128);
let block_proof_size = (used.normal.proof_size as u128)
+ (used.operational.proof_size as u128)
+ (used.mandatory.proof_size as u128);
let limits = api
.constants()
.at(&revive::constants().system().block_weights())
.expect("TODO: Remove");
let max_ref_time = limits.max_block.ref_time;
let max_proof_size = limits.max_block.proof_size;
Some(MinedBlockInformation {
block_number: substrate_block.number() as _,
block_timestamp: revive_block.header.timestamp,
mined_gas: revive_block.header.gas_used as _,
block_gas_limit: revive_block.header.gas_limit as _,
transaction_hashes: revive_block
.transactions
.into_hashes()
.as_hashes()
.expect("Must be hashes")
.to_vec(),
ref_time: block_ref_time,
max_ref_time,
proof_size: block_proof_size,
max_proof_size,
})
}
});
Ok(Box::pin(mined_block_information_stream)
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
})
}
fn provider(
&self,
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::providers::DynProvider<Ethereum>>> + '_>>
{
Box::pin(
self.provider()
.map(|provider| provider.map(|provider| provider.erased())),
)
}
}
pub struct SubstrateNodeResolver {
id: u32,
provider: ConcreteProvider<ReviveNetwork, Arc<EthereumWallet>>,
provider: ConcreteProvider<Ethereum, Arc<EthereumWallet>>,
}
impl ResolverApi for SubstrateNodeResolver {
@@ -717,430 +762,6 @@ impl Drop for SubstrateNode {
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct ReviveNetwork;
impl Network for ReviveNetwork {
type TxType = <Ethereum as Network>::TxType;
type TxEnvelope = <Ethereum as Network>::TxEnvelope;
type UnsignedTx = <Ethereum as Network>::UnsignedTx;
type ReceiptEnvelope = <Ethereum as Network>::ReceiptEnvelope;
type Header = ReviveHeader;
type TransactionRequest = <Ethereum as Network>::TransactionRequest;
type TransactionResponse = <Ethereum as Network>::TransactionResponse;
type ReceiptResponse = <Ethereum as Network>::ReceiptResponse;
type HeaderResponse = Header<ReviveHeader>;
type BlockResponse = Block<Transaction<TxEnvelope>, Header<ReviveHeader>>;
}
impl TransactionBuilder<ReviveNetwork> for <Ethereum as Network>::TransactionRequest {
fn chain_id(&self) -> Option<alloy::primitives::ChainId> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::chain_id(self)
}
fn set_chain_id(&mut self, chain_id: alloy::primitives::ChainId) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_chain_id(
self, chain_id,
)
}
fn nonce(&self) -> Option<u64> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::nonce(self)
}
fn set_nonce(&mut self, nonce: u64) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_nonce(
self, nonce,
)
}
fn take_nonce(&mut self) -> Option<u64> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::take_nonce(
self,
)
}
fn input(&self) -> Option<&alloy::primitives::Bytes> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::input(self)
}
fn set_input<T: Into<alloy::primitives::Bytes>>(&mut self, input: T) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_input(
self, input,
)
}
fn from(&self) -> Option<Address> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::from(self)
}
fn set_from(&mut self, from: Address) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_from(
self, from,
)
}
fn kind(&self) -> Option<alloy::primitives::TxKind> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::kind(self)
}
fn clear_kind(&mut self) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::clear_kind(
self,
)
}
fn set_kind(&mut self, kind: alloy::primitives::TxKind) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_kind(
self, kind,
)
}
fn value(&self) -> Option<alloy::primitives::U256> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::value(self)
}
fn set_value(&mut self, value: alloy::primitives::U256) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_value(
self, value,
)
}
fn gas_price(&self) -> Option<u128> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::gas_price(self)
}
fn set_gas_price(&mut self, gas_price: u128) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_gas_price(
self, gas_price,
)
}
fn max_fee_per_gas(&self) -> Option<u128> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::max_fee_per_gas(
self,
)
}
fn set_max_fee_per_gas(&mut self, max_fee_per_gas: u128) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_max_fee_per_gas(
self, max_fee_per_gas
)
}
fn max_priority_fee_per_gas(&self) -> Option<u128> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::max_priority_fee_per_gas(
self,
)
}
fn set_max_priority_fee_per_gas(&mut self, max_priority_fee_per_gas: u128) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_max_priority_fee_per_gas(
self, max_priority_fee_per_gas
)
}
fn gas_limit(&self) -> Option<u64> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::gas_limit(self)
}
fn set_gas_limit(&mut self, gas_limit: u64) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_gas_limit(
self, gas_limit,
)
}
fn access_list(&self) -> Option<&alloy::rpc::types::AccessList> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::access_list(
self,
)
}
fn set_access_list(&mut self, access_list: alloy::rpc::types::AccessList) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_access_list(
self,
access_list,
)
}
fn complete_type(
&self,
ty: <ReviveNetwork as Network>::TxType,
) -> Result<(), Vec<&'static str>> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::complete_type(
self, ty,
)
}
fn can_submit(&self) -> bool {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::can_submit(
self,
)
}
fn can_build(&self) -> bool {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::can_build(self)
}
fn output_tx_type(&self) -> <ReviveNetwork as Network>::TxType {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::output_tx_type(
self,
)
}
fn output_tx_type_checked(&self) -> Option<<ReviveNetwork as Network>::TxType> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::output_tx_type_checked(
self,
)
}
fn prep_for_submission(&mut self) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::prep_for_submission(
self,
)
}
fn build_unsigned(
self,
) -> alloy::network::BuildResult<<ReviveNetwork as Network>::UnsignedTx, ReviveNetwork> {
let result = <<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::build_unsigned(
self,
);
match result {
Ok(unsigned_tx) => Ok(unsigned_tx),
Err(UnbuiltTransactionError { request, error }) => {
Err(UnbuiltTransactionError::<ReviveNetwork> {
request,
error: match error {
TransactionBuilderError::InvalidTransactionRequest(tx_type, items) => {
TransactionBuilderError::InvalidTransactionRequest(tx_type, items)
}
TransactionBuilderError::UnsupportedSignatureType => {
TransactionBuilderError::UnsupportedSignatureType
}
TransactionBuilderError::Signer(error) => {
TransactionBuilderError::Signer(error)
}
TransactionBuilderError::Custom(error) => {
TransactionBuilderError::Custom(error)
}
},
})
}
}
}
async fn build<W: alloy::network::NetworkWallet<ReviveNetwork>>(
self,
wallet: &W,
) -> Result<<ReviveNetwork as Network>::TxEnvelope, TransactionBuilderError<ReviveNetwork>>
{
Ok(wallet.sign_request(self).await?)
}
}
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ReviveHeader {
/// The Keccak 256-bit hash of the parent
/// blocks header, in its entirety; formally Hp.
pub parent_hash: B256,
/// The Keccak 256-bit hash of the ommers list portion of this block; formally Ho.
#[serde(rename = "sha3Uncles", alias = "ommersHash")]
pub ommers_hash: B256,
/// The 160-bit address to which all fees collected from the successful mining of this block
/// be transferred; formally Hc.
#[serde(rename = "miner", alias = "beneficiary")]
pub beneficiary: Address,
/// The Keccak 256-bit hash of the root node of the state trie, after all transactions are
/// executed and finalisations applied; formally Hr.
pub state_root: B256,
/// The Keccak 256-bit hash of the root node of the trie structure populated with each
/// transaction in the transactions list portion of the block; formally Ht.
pub transactions_root: B256,
/// The Keccak 256-bit hash of the root node of the trie structure populated with the receipts
/// of each transaction in the transactions list portion of the block; formally He.
pub receipts_root: B256,
/// The Bloom filter composed from indexable information (logger address and log topics)
/// contained in each log entry from the receipt of each transaction in the transactions list;
/// formally Hb.
pub logs_bloom: Bloom,
/// A scalar value corresponding to the difficulty level of this block. This can be calculated
/// from the previous blocks difficulty level and the timestamp; formally Hd.
pub difficulty: U256,
/// A scalar value equal to the number of ancestor blocks. The genesis block has a number of
/// zero; formally Hi.
#[serde(with = "alloy::serde::quantity")]
pub number: BlockNumber,
/// A scalar value equal to the current limit of gas expenditure per block; formally Hl.
// This is the main difference over the Ethereum network implementation. We use u128 here and
// not u64.
#[serde(with = "alloy::serde::quantity")]
pub gas_limit: u128,
/// A scalar value equal to the total gas used in transactions in this block; formally Hg.
#[serde(with = "alloy::serde::quantity")]
pub gas_used: u64,
/// A scalar value equal to the reasonable output of Unixs time() at this blocks inception;
/// formally Hs.
#[serde(with = "alloy::serde::quantity")]
pub timestamp: u64,
/// An arbitrary byte array containing data relevant to this block. This must be 32 bytes or
/// fewer; formally Hx.
pub extra_data: Bytes,
/// A 256-bit hash which, combined with the
/// nonce, proves that a sufficient amount of computation has been carried out on this block;
/// formally Hm.
pub mix_hash: B256,
/// A 64-bit value which, combined with the mixhash, proves that a sufficient amount of
/// computation has been carried out on this block; formally Hn.
pub nonce: B64,
/// A scalar representing EIP1559 base fee which can move up or down each block according
/// to a formula which is a function of gas used in parent block and gas target
/// (block gas limit divided by elasticity multiplier) of parent block.
/// The algorithm results in the base fee per gas increasing when blocks are
/// above the gas target, and decreasing when blocks are below the gas target. The base fee per
/// gas is burned.
#[serde(
default,
with = "alloy::serde::quantity::opt",
skip_serializing_if = "Option::is_none"
)]
pub base_fee_per_gas: Option<u64>,
/// The Keccak 256-bit hash of the withdrawals list portion of this block.
/// <https://eips.ethereum.org/EIPS/eip-4895>
#[serde(default, skip_serializing_if = "Option::is_none")]
pub withdrawals_root: Option<B256>,
/// The total amount of blob gas consumed by the transactions within the block, added in
/// EIP-4844.
#[serde(
default,
with = "alloy::serde::quantity::opt",
skip_serializing_if = "Option::is_none"
)]
pub blob_gas_used: Option<u64>,
/// A running total of blob gas consumed in excess of the target, prior to the block. Blocks
/// with above-target blob gas consumption increase this value, blocks with below-target blob
/// gas consumption decrease it (bounded at 0). This was added in EIP-4844.
#[serde(
default,
with = "alloy::serde::quantity::opt",
skip_serializing_if = "Option::is_none"
)]
pub excess_blob_gas: Option<u64>,
/// The hash of the parent beacon block's root is included in execution blocks, as proposed by
/// EIP-4788.
///
/// This enables trust-minimized access to consensus state, supporting staking pools, bridges,
/// and more.
///
/// The beacon roots contract handles root storage, enhancing Ethereum's functionalities.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parent_beacon_block_root: Option<B256>,
/// The Keccak 256-bit hash of the an RLP encoded list with each
/// [EIP-7685] request in the block body.
///
/// [EIP-7685]: https://eips.ethereum.org/EIPS/eip-7685
#[serde(default, skip_serializing_if = "Option::is_none")]
pub requests_hash: Option<B256>,
}
impl BlockHeader for ReviveHeader {
fn parent_hash(&self) -> B256 {
self.parent_hash
}
fn ommers_hash(&self) -> B256 {
self.ommers_hash
}
fn beneficiary(&self) -> Address {
self.beneficiary
}
fn state_root(&self) -> B256 {
self.state_root
}
fn transactions_root(&self) -> B256 {
self.transactions_root
}
fn receipts_root(&self) -> B256 {
self.receipts_root
}
fn withdrawals_root(&self) -> Option<B256> {
self.withdrawals_root
}
fn logs_bloom(&self) -> Bloom {
self.logs_bloom
}
fn difficulty(&self) -> U256 {
self.difficulty
}
fn number(&self) -> BlockNumber {
self.number
}
// There's sadly nothing that we can do about this. We're required to implement this trait on
// any type that represents a header and the gas limit type used here is a u64.
fn gas_limit(&self) -> u64 {
self.gas_limit.try_into().unwrap_or(u64::MAX)
}
fn gas_used(&self) -> u64 {
self.gas_used
}
fn timestamp(&self) -> u64 {
self.timestamp
}
fn mix_hash(&self) -> Option<B256> {
Some(self.mix_hash)
}
fn nonce(&self) -> Option<B64> {
Some(self.nonce)
}
fn base_fee_per_gas(&self) -> Option<u64> {
self.base_fee_per_gas
}
fn blob_gas_used(&self) -> Option<u64> {
self.blob_gas_used
}
fn excess_blob_gas(&self) -> Option<u64> {
self.excess_blob_gas
}
fn parent_beacon_block_root(&self) -> Option<B256> {
self.parent_beacon_block_root
}
fn requests_hash(&self) -> Option<B256> {
self.requests_hash
}
fn extra_data(&self) -> &Bytes {
&self.extra_data
}
}
#[cfg(test)]
mod tests {
use alloy::rpc::types::TransactionRequest;
@@ -1178,6 +799,7 @@ mod tests {
let mut node = SubstrateNode::new(
context.kitchensink_configuration.path.clone(),
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
None,
&context,
);
node.init(context.genesis_configuration.genesis().unwrap().clone())
@@ -1243,6 +865,7 @@ mod tests {
let mut dummy_node = SubstrateNode::new(
context.kitchensink_configuration.path.clone(),
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
None,
&context,
);
@@ -1295,6 +918,7 @@ mod tests {
let node = SubstrateNode::new(
context.kitchensink_configuration.path.clone(),
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
None,
&context,
);
File diff suppressed because it is too large Load Diff
@@ -7,6 +7,10 @@ use alloy::{
transports::TransportResult,
};
// Percentage padding applied to estimated gas (e.g. 120 = 20% padding)
const GAS_ESTIMATE_PADDING_NUMERATOR: u64 = 120;
const GAS_ESTIMATE_PADDING_DENOMINATOR: u64 = 100;
#[derive(Clone, Debug)]
pub struct FallbackGasFiller {
inner: GasFiller,
@@ -56,8 +60,6 @@ where
provider: &P,
tx: &<N as Network>::TransactionRequest,
) -> TransportResult<Self::Fillable> {
// Try to fetch GasFillers “fillable” (gas_price, base_fee, estimate_gas, …)
// If it errors (i.e. tx would revert under eth_estimateGas), swallow it.
match self.inner.prepare(provider, tx).await {
Ok(fill) => Ok(Some(fill)),
Err(_) => Ok(None),
@@ -70,8 +72,17 @@ where
mut tx: alloy::providers::SendableTx<N>,
) -> TransportResult<SendableTx<N>> {
if let Some(fill) = fillable {
// our inner GasFiller succeeded — use it
self.inner.fill(fill, tx).await
let mut tx = self.inner.fill(fill, tx).await?;
if let Some(builder) = tx.as_mut_builder() {
if let Some(estimated) = builder.gas_limit() {
let padded = estimated
.checked_mul(GAS_ESTIMATE_PADDING_NUMERATOR)
.and_then(|v| v.checked_div(GAS_ESTIMATE_PADDING_DENOMINATOR))
.unwrap_or(u64::MAX);
builder.set_gas_limit(padded);
}
}
Ok(tx)
} else {
if let Some(builder) = tx.as_mut_builder() {
builder.set_gas_limit(self.default_gas_limit);
+2 -2
View File
@@ -1,7 +1,7 @@
mod concurrency_limiter;
mod fallback_gas_provider;
mod fallback_gas_filler;
mod provider;
pub use concurrency_limiter::*;
pub use fallback_gas_provider::*;
pub use fallback_gas_filler::*;
pub use provider::*;
+73 -4
View File
@@ -1,14 +1,16 @@
use std::sync::LazyLock;
use std::{ops::ControlFlow, sync::LazyLock, time::Duration};
use alloy::{
network::{Network, NetworkWallet, TransactionBuilder4844},
network::{Ethereum, Network, NetworkWallet, TransactionBuilder4844},
providers::{
Identity, ProviderBuilder, RootProvider,
Identity, PendingTransactionBuilder, Provider, ProviderBuilder, RootProvider,
fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller},
},
rpc::client::ClientBuilder,
};
use anyhow::{Context, Result};
use revive_dt_common::futures::{PollingWaitBehavior, poll};
use tracing::{Instrument, debug, info, info_span};
use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller};
@@ -42,7 +44,7 @@ where
// requests at any point of time and no more than that. This is done in an effort to stabilize
// the framework from some of the interment issues that we've been seeing related to RPC calls.
static GLOBAL_CONCURRENCY_LIMITER_LAYER: LazyLock<ConcurrencyLimiterLayer> =
LazyLock::new(|| ConcurrencyLimiterLayer::new(10));
LazyLock::new(|| ConcurrencyLimiterLayer::new(1000));
let client = ClientBuilder::default()
.layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone())
@@ -61,3 +63,70 @@ where
Ok(provider)
}
pub async fn execute_transaction<N, W>(
provider: ConcreteProvider<N, W>,
transaction: N::TransactionRequest,
) -> Result<N::ReceiptResponse>
where
N: Network<
TransactionRequest: TransactionBuilder4844,
TxEnvelope = <Ethereum as Network>::TxEnvelope,
>,
W: NetworkWallet<N>,
Identity: TxFiller<N>,
FallbackGasFiller: TxFiller<N>,
ChainIdFiller: TxFiller<N>,
NonceFiller: TxFiller<N>,
WalletFiller<W>: TxFiller<N>,
{
let sendable_transaction = provider
.fill(transaction)
.await
.context("Failed to fill transaction")?;
let transaction_envelope = sendable_transaction
.try_into_envelope()
.context("Failed to convert transaction into an envelope")?;
let tx_hash = *transaction_envelope.tx_hash();
let mut pending_transaction = match provider.send_tx_envelope(transaction_envelope).await {
Ok(pending_transaction) => pending_transaction,
Err(error) => {
let error_string = error.to_string();
if error_string.contains("Transaction Already Imported") {
PendingTransactionBuilder::<N>::new(provider.root().clone(), tx_hash)
} else {
return Err(error).context(format!("Failed to submit transaction {tx_hash}"));
}
}
};
debug!(%tx_hash, "Submitted Transaction");
pending_transaction.set_timeout(Some(Duration::from_secs(120)));
let tx_hash = pending_transaction.watch().await.context(format!(
"Transaction inclusion watching timeout for {tx_hash}"
))?;
poll(
Duration::from_secs(60),
PollingWaitBehavior::Constant(Duration::from_secs(3)),
|| {
let provider = provider.clone();
async move {
match provider.get_transaction_receipt(tx_hash).await {
Ok(Some(receipt)) => {
info!("Found the transaction receipt");
Ok(ControlFlow::Break(receipt))
}
_ => Ok(ControlFlow::Continue(())),
}
}
},
)
.instrument(info_span!("Polling for receipt", %tx_hash))
.await
.context(format!("Polling for receipt failed for {tx_hash}"))
}
+7 -7
View File
@@ -16,7 +16,7 @@ use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
use revive_dt_config::Context;
use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance};
use semver::Version;
use serde::Serialize;
use serde::{Deserialize, Serialize};
use serde_with::{DisplayFromStr, serde_as};
use tokio::sync::{
broadcast::{Sender, channel},
@@ -415,7 +415,7 @@ impl ReportAggregator {
}
#[serde_as]
#[derive(Clone, Debug, Serialize)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Report {
/// The context that the tool was started up with.
pub context: Context,
@@ -440,7 +440,7 @@ impl Report {
}
}
#[derive(Clone, Debug, Serialize, Default)]
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
pub struct TestCaseReport {
/// Information on the status of the test case and whether it succeeded, failed, or was ignored.
#[serde(skip_serializing_if = "Option::is_none")]
@@ -451,7 +451,7 @@ pub struct TestCaseReport {
/// Information related to the status of the test. Could be that the test succeeded, failed, or that
/// it was ignored.
#[derive(Clone, Debug, Serialize)]
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(tag = "status")]
pub enum TestCaseStatus {
/// The test case succeeded.
@@ -475,7 +475,7 @@ pub enum TestCaseStatus {
}
/// Information related to the platform node that's being used to execute the step.
#[derive(Clone, Debug, Serialize)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct TestCaseNodeInformation {
/// The ID of the node that this case is being executed on.
pub id: usize,
@@ -486,7 +486,7 @@ pub struct TestCaseNodeInformation {
}
/// Execution information tied to the platform.
#[derive(Clone, Debug, Default, Serialize)]
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct ExecutionInformation {
/// Information related to the node assigned to this test case.
#[serde(skip_serializing_if = "Option::is_none")]
@@ -506,7 +506,7 @@ pub struct ExecutionInformation {
}
/// Information related to compilation
#[derive(Clone, Debug, Serialize)]
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(tag = "status")]
pub enum CompilationStatus {
/// The compilation was successful.
+6 -7
View File
@@ -76,9 +76,7 @@ cat > "$CORPUS_FILE" << EOF
{
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
"paths": [
"$(realpath "$TEST_REPO_DIR/fixtures/solidity/translated_semantic_tests")",
"$(realpath "$TEST_REPO_DIR/fixtures/solidity/complex")",
"$(realpath "$TEST_REPO_DIR/fixtures/solidity/simple")"
"$(realpath "$TEST_REPO_DIR/fixtures/solidity")"
]
}
EOF
@@ -95,16 +93,17 @@ echo ""
# Run the tool
cargo build --release;
RUST_LOG="info,alloy_pubsub::service=error" ./target/release/retester test \
--platform geth-evm-solc \
--platform revive-dev-node-revm-solc \
--platform revive-dev-node-polkavm-resolc \
--corpus "$CORPUS_FILE" \
--working-directory "$WORKDIR" \
--concurrency.number-of-nodes 5 \
--concurrency.number-of-nodes 10 \
--concurrency.number-of-threads 5 \
--concurrency.number-of-concurrent-tasks 500 \
--wallet.additional-keys 100000 \
--kitchensink.path "$SUBSTRATE_NODE_BIN" \
--revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
--eth-rpc.path "$ETH_RPC_BIN" \
> logs.log \
2> output.log
2> output.log
echo -e "${GREEN}=== Test run completed! ===${NC}"