Compare commits

..

12 Commits

Author SHA1 Message Date
James Wilson b6e0701ec9 Remove another unused dep 2025-08-27 15:29:00 +01:00
James Wilson f99b3dbc3c Improve error message 2025-08-27 13:16:55 +01:00
James Wilson 0e79594bb2 Remove semver from report crate 2025-08-27 13:15:24 +01:00
James Wilson 5c75228496 Pass solc version into compiler rather than returning from compilation. Allows for better reporting 2025-08-27 13:13:55 +01:00
James Wilson 8229675ba8 re-add context bits lost in merge conflicts 2025-08-27 10:47:35 +01:00
James Wilson dd94b12b34 Return solc_version in CompilerOutput 2025-08-27 10:45:22 +01:00
James Wilson 4af9f6695d Merge branch 'main' into jsdw-use-mode-solc-version 2025-08-27 09:48:58 +01:00
James Wilson 4d8adb553c Merge branch 'main' into jsdw-use-mode-solc-version
Note: some reporting in cached_compiler.rs is commented out and needs handling
2025-08-26 11:34:32 +01:00
James Wilson bdd2eab194 Tidyup 2025-08-21 12:45:36 +01:00
James Wilson eb754bc9e8 remove temp_dir; tempfile has all we need 2025-08-21 12:36:23 +01:00
James Wilson 0e5e57e703 Merge branch 'main' into jsdw-use-mode-solc-version 2025-08-21 12:26:05 +01:00
James Wilson 491a9a32b2 Use the solc version required in tests rather than one on PATH 2025-08-21 12:23:54 +01:00
36 changed files with 1351 additions and 1488 deletions
Generated
+7 -14
View File
@@ -4492,6 +4492,7 @@ dependencies = [
"semver 1.0.26",
"serde",
"serde_json",
"tempfile",
"tokio",
"tracing",
]
@@ -4501,13 +4502,10 @@ name = "revive-dt-config"
version = "0.1.0"
dependencies = [
"alloy",
"anyhow",
"clap",
"semver 1.0.26",
"serde",
"serde_json",
"strum",
"temp-dir",
"tempfile",
]
[[package]]
@@ -4521,6 +4519,7 @@ dependencies = [
"clap",
"futures",
"indexmap 2.10.0",
"once_cell",
"revive-dt-common",
"revive-dt-compiler",
"revive-dt-config",
@@ -4531,6 +4530,7 @@ dependencies = [
"semver 1.0.26",
"serde",
"serde_json",
"tempfile",
"tokio",
"tracing",
"tracing-appender",
@@ -4571,7 +4571,7 @@ dependencies = [
"serde_json",
"sp-core",
"sp-runtime",
"temp-dir",
"tempfile",
"tokio",
"tracing",
]
@@ -4596,7 +4596,6 @@ dependencies = [
"revive-dt-compiler",
"revive-dt-config",
"revive-dt-format",
"semver 1.0.26",
"serde",
"serde_json",
"serde_with",
@@ -5692,9 +5691,9 @@ checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
[[package]]
name = "strum"
version = "0.27.2"
version = "0.27.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf"
checksum = "f64def088c51c9510a8579e3c5d67c65349dcf755e5479ad3d010aa6454e2c32"
dependencies = [
"strum_macros",
]
@@ -5818,12 +5817,6 @@ version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
[[package]]
name = "temp-dir"
version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "83176759e9416cf81ee66cb6508dbfe9c96f20b8b56265a39917551c23c70964"
[[package]]
name = "tempfile"
version = "3.20.0"
-2
View File
@@ -48,8 +48,6 @@ serde_with = { version = "3.14.0" }
sha2 = { version = "0.10.9" }
sp-core = "36.1.0"
sp-runtime = "41.1.0"
strum = { version = "0.27.2", features = ["derive"] }
temp-dir = { version = "0.1.16" }
tempfile = "3.3"
thiserror = "2"
tokio = { version = "1.47.0", default-features = false, features = [
+3 -4
View File
@@ -187,11 +187,10 @@ The above corpus file instructs the tool to look for all of the test cases conta
The simplest command to run this tool is the following:
```bash
RUST_LOG="info" cargo run --release -- execute-tests \
--follower geth \
RUST_LOG="info" cargo run --release -- \
--corpus path_to_your_corpus_file.json \
--working-directory path_to_a_temporary_directory_to_cache_things_in \
--concurrency.number-of-nodes 5 \
--workdir path_to_a_temporary_directory_to_cache_things_in \
--number-of-nodes 5 \
> logs.log \
2> output.log
```
@@ -1,21 +0,0 @@
/// An iterator that could be either of two iterators.
#[derive(Clone, Debug)]
pub enum EitherIter<A, B> {
A(A),
B(B),
}
impl<A, B, T> Iterator for EitherIter<A, B>
where
A: Iterator<Item = T>,
B: Iterator<Item = T>,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
match self {
EitherIter::A(iter) => iter.next(),
EitherIter::B(iter) => iter.next(),
}
}
}
-2
View File
@@ -1,5 +1,3 @@
mod either_iter;
mod files_with_extension_iterator;
pub use either_iter::*;
pub use files_with_extension_iterator::*;
+8 -14
View File
@@ -3,7 +3,6 @@ use semver::Version;
use serde::{Deserialize, Serialize};
use std::fmt::Display;
use std::str::FromStr;
use std::sync::LazyLock;
/// This represents a mode that a given test should be run with, if possible.
///
@@ -35,19 +34,14 @@ impl Display for Mode {
impl Mode {
/// Return all of the available mode combinations.
pub fn all() -> impl Iterator<Item = &'static Mode> {
static ALL_MODES: LazyLock<Vec<Mode>> = LazyLock::new(|| {
ModePipeline::test_cases()
.flat_map(|pipeline| {
ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode {
pipeline,
optimize_setting,
version: None,
})
})
.collect::<Vec<_>>()
});
ALL_MODES.iter()
pub fn all() -> impl Iterator<Item = Mode> {
ModePipeline::test_cases().flat_map(|pipeline| {
ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode {
pipeline,
optimize_setting,
version: None,
})
})
}
/// Resolves the [`Mode`]'s solidity version requirement into a [`VersionOrRequirement`] if
@@ -6,6 +6,42 @@ pub enum VersionOrRequirement {
Requirement(VersionReq),
}
impl VersionOrRequirement {
/// A helper function to convert a [`semver::Version`] into a [`semver::VersionReq`].
pub fn version_to_requirement(version: &Version) -> VersionReq {
// Ignoring "build" metadata in the version, we can turn
// it into a requirement which is an exact match for the
// given version and nothing else:
VersionReq {
comparators: vec![semver::Comparator {
op: semver::Op::Exact,
major: version.major,
minor: Some(version.minor),
patch: Some(version.patch),
pre: version.pre.clone(),
}],
}
}
}
impl serde::Serialize for VersionOrRequirement {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match self {
VersionOrRequirement::Version(v) => serializer.serialize_str(&v.to_string()),
VersionOrRequirement::Requirement(r) => serializer.serialize_str(&r.to_string()),
}
}
}
impl Default for VersionOrRequirement {
fn default() -> Self {
VersionOrRequirement::Requirement(VersionReq::STAR)
}
}
impl From<Version> for VersionOrRequirement {
fn from(value: Version) -> Self {
Self::Version(value)
@@ -18,24 +54,45 @@ impl From<VersionReq> for VersionOrRequirement {
}
}
impl From<VersionOrRequirement> for VersionReq {
fn from(value: VersionOrRequirement) -> Self {
match value {
VersionOrRequirement::Version(version) => {
VersionOrRequirement::version_to_requirement(&version)
}
VersionOrRequirement::Requirement(version_req) => version_req,
}
}
}
impl TryFrom<VersionOrRequirement> for Version {
type Error = anyhow::Error;
fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> {
let VersionOrRequirement::Version(version) = value else {
anyhow::bail!("Version or requirement was not a version");
};
Ok(version)
}
}
impl TryFrom<VersionOrRequirement> for VersionReq {
type Error = anyhow::Error;
fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> {
let VersionOrRequirement::Requirement(requirement) = value else {
anyhow::bail!("Version or requirement was not a requirement");
};
Ok(requirement)
match value {
VersionOrRequirement::Version(version) => Ok(version),
VersionOrRequirement::Requirement(mut version_req) => {
if version_req.comparators.len() != 1 {
anyhow::bail!(
"The version requirement in VersionOrRequirement is not a single exact version"
);
}
let c = version_req.comparators.pop().unwrap();
let (semver::Op::Exact, Some(minor), Some(patch)) = (c.op, c.minor, c.patch) else {
anyhow::bail!(
"The version requirement in VersionOrRequirement is not an exact version"
);
};
Ok(Version {
major: c.major,
minor,
patch,
pre: c.pre,
build: Default::default(),
})
}
}
}
}
+3
View File
@@ -26,5 +26,8 @@ serde_json = { workspace = true }
tracing = { workspace = true }
tokio = { workspace = true }
[dev-dependencies]
tempfile = { workspace = true }
[lints]
workspace = true
+53 -38
View File
@@ -3,6 +3,8 @@
//! - Polkadot revive resolc compiler
//! - Polkadot revive Wasm compiler
mod utils;
use std::{
collections::HashMap,
hash::Hash,
@@ -11,58 +13,48 @@ use std::{
use alloy::json_abi::JsonAbi;
use alloy_primitives::Address;
use anyhow::{Context as _, Result};
use semver::Version;
use anyhow::Context;
use serde::{Deserialize, Serialize};
use revive_common::EVMVersion;
use revive_dt_common::cached_fs::read_to_string;
use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
use revive_dt_config::Arguments;
// Re-export this as it's a part of the compiler interface.
pub use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
// Expose functionality for instantiating a SolcCompiler.
pub use utils::{SolcCompiler, solc_compiler};
pub mod revive_js;
pub mod revive_resolc;
pub mod solc;
/// A common interface for all supported Solidity compilers.
pub trait SolidityCompiler: Sized {
/// Instantiates a new compiler object.
///
/// Based on the given [`Context`] and [`VersionOrRequirement`] this function instantiates a
/// new compiler object. Certain implementations of this trait might choose to cache cache the
/// compiler objects and return the same ones over and over again.
fn new(
context: impl AsRef<SolcConfiguration>
+ AsRef<ResolcConfiguration>
+ AsRef<WorkingDirectoryConfiguration>,
version: impl Into<Option<VersionOrRequirement>>,
) -> impl Future<Output = Result<Self>>;
/// Returns the version of the compiler.
fn version(&self) -> &Version;
/// Returns the path of the compiler executable.
fn path(&self) -> &Path;
pub trait SolidityCompiler {
/// Extra options specific to the compiler.
type Options: Default + PartialEq + Eq + Hash;
/// The low-level compiler interface.
fn build(&self, input: CompilerInput) -> impl Future<Output = Result<CompilerOutput>>;
/// Does the compiler support the provided mode and version settings.
fn supports_mode(
fn build(
&self,
optimizer_setting: ModeOptimizerSetting,
pipeline: ModePipeline,
) -> bool;
input: CompilerInput,
additional_options: Self::Options,
) -> impl Future<Output = anyhow::Result<CompilerOutput>>;
/// Instantiate a new compiler.
fn new(config: &Arguments) -> Self;
/// Does the compiler support the provided mode and version settings?
fn supports_mode(optimize_setting: ModeOptimizerSetting, pipeline: ModePipeline) -> bool;
}
/// The generic compilation input configuration.
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize)]
pub struct CompilerInput {
pub pipeline: Option<ModePipeline>,
pub optimization: Option<ModeOptimizerSetting>,
pub solc: Option<SolcCompiler>,
pub evm_version: Option<EVMVersion>,
pub allow_paths: Vec<PathBuf>,
pub base_path: Option<PathBuf>,
@@ -80,17 +72,27 @@ pub struct CompilerOutput {
}
/// A generic builder style interface for configuring the supported compiler options.
#[derive(Default)]
pub struct Compiler {
pub struct Compiler<T: SolidityCompiler> {
input: CompilerInput,
additional_options: T::Options,
}
impl Compiler {
impl Default for Compiler<solc::Solc> {
fn default() -> Self {
Self::new()
}
}
impl<T> Compiler<T>
where
T: SolidityCompiler,
{
pub fn new() -> Self {
Self {
input: CompilerInput {
pipeline: Default::default(),
optimization: Default::default(),
solc: Default::default(),
evm_version: Default::default(),
allow_paths: Default::default(),
base_path: Default::default(),
@@ -98,9 +100,15 @@ impl Compiler {
libraries: Default::default(),
revert_string_handling: Default::default(),
},
additional_options: T::Options::default(),
}
}
pub fn with_solc(mut self, value: impl Into<Option<SolcCompiler>>) -> Self {
self.input.solc = value.into();
self
}
pub fn with_optimization(mut self, value: impl Into<Option<ModeOptimizerSetting>>) -> Self {
self.input.optimization = value.into();
self
@@ -126,7 +134,7 @@ impl Compiler {
self
}
pub fn with_source(mut self, path: impl AsRef<Path>) -> Result<Self> {
pub fn with_source(mut self, path: impl AsRef<Path>) -> anyhow::Result<Self> {
self.input.sources.insert(
path.as_ref().to_path_buf(),
read_to_string(path.as_ref()).context("Failed to read the contract source")?,
@@ -156,6 +164,11 @@ impl Compiler {
self
}
pub fn with_additional_options(mut self, options: impl Into<T::Options>) -> Self {
self.additional_options = options.into();
self
}
pub fn then(self, callback: impl FnOnce(Self) -> Self) -> Self {
callback(self)
}
@@ -164,12 +177,14 @@ impl Compiler {
callback(self)
}
pub async fn try_build(self, compiler: &impl SolidityCompiler) -> Result<CompilerOutput> {
compiler.build(self.input).await
pub async fn try_build(self, config: &Arguments) -> anyhow::Result<CompilerOutput> {
T::new(config)
.build(self.input, self.additional_options)
.await
}
pub fn input(&self) -> &CompilerInput {
&self.input
pub fn input(&self) -> CompilerInput {
self.input.clone()
}
}
+25 -65
View File
@@ -1,80 +1,31 @@
//! Implements the [SolidityCompiler] trait with `resolc` for
//! compiling contracts to PolkaVM (PVM) bytecode.
use std::{
path::PathBuf,
process::Stdio,
sync::{Arc, LazyLock},
};
use std::{path::PathBuf, process::Stdio};
use dashmap::DashMap;
use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
use revive_dt_config::Arguments;
use revive_solc_json_interface::{
SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection,
SolcStandardJsonOutput,
};
use crate::{
CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler, solc::Solc,
};
use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
use alloy::json_abi::JsonAbi;
use anyhow::{Context as _, Result};
use anyhow::Context;
use semver::Version;
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
/// A wrapper around the `resolc` binary, emitting PVM-compatible bytecode.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Resolc(Arc<ResolcInner>);
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
struct ResolcInner {
/// The internal solc compiler that the resolc compiler uses as a compiler frontend.
solc: Solc,
#[derive(Debug)]
pub struct Resolc {
/// Path to the `resolc` executable
resolc_path: PathBuf,
}
impl SolidityCompiler for Resolc {
async fn new(
context: impl AsRef<SolcConfiguration>
+ AsRef<ResolcConfiguration>
+ AsRef<WorkingDirectoryConfiguration>,
version: impl Into<Option<VersionOrRequirement>>,
) -> Result<Self> {
/// This is a cache of all of the resolc compiler objects. Since we do not currently support
/// multiple resolc compiler versions, so our cache is just keyed by the solc compiler and
/// its version to the resolc compiler.
static COMPILERS_CACHE: LazyLock<DashMap<Solc, Resolc>> = LazyLock::new(Default::default);
let resolc_configuration = AsRef::<ResolcConfiguration>::as_ref(&context);
let solc = Solc::new(&context, version)
.await
.context("Failed to create the solc compiler frontend for resolc")?;
Ok(COMPILERS_CACHE
.entry(solc.clone())
.or_insert_with(|| {
Self(Arc::new(ResolcInner {
solc,
resolc_path: resolc_configuration.path.clone(),
}))
})
.clone())
}
fn version(&self) -> &Version {
// We currently return the solc compiler version since we do not support multiple resolc
// compiler versions.
self.0.solc.version()
}
fn path(&self) -> &std::path::Path {
&self.0.resolc_path
}
type Options = Vec<String>;
#[tracing::instrument(level = "debug", ret)]
async fn build(
@@ -82,6 +33,7 @@ impl SolidityCompiler for Resolc {
CompilerInput {
pipeline,
optimization,
solc,
evm_version,
allow_paths,
base_path,
@@ -91,13 +43,15 @@ impl SolidityCompiler for Resolc {
// resolc. So, we need to go back to this later once it's supported.
revert_string_handling: _,
}: CompilerInput,
) -> Result<CompilerOutput> {
additional_options: Self::Options,
) -> anyhow::Result<CompilerOutput> {
if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) {
anyhow::bail!(
"Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}"
);
}
let solc = solc.ok_or_else(|| anyhow::anyhow!("solc compiler not provided to resolc."))?;
let input = SolcStandardJsonInput {
language: SolcStandardJsonInputLanguage::Solidity,
sources: sources
@@ -138,11 +92,13 @@ impl SolidityCompiler for Resolc {
},
};
let mut command = AsyncCommand::new(self.path());
let mut command = AsyncCommand::new(&self.resolc_path);
command
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.arg("--solc")
.arg(&solc.path)
.arg("--standard-json");
if let Some(ref base_path) = base_path {
@@ -159,7 +115,7 @@ impl SolidityCompiler for Resolc {
}
let mut child = command
.spawn()
.with_context(|| format!("Failed to spawn resolc at {}", self.path().display()))?;
.with_context(|| format!("Failed to spawn resolc at {}", self.resolc_path.display()))?;
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
let serialized_input = serde_json::to_vec(&input)
@@ -276,11 +232,15 @@ impl SolidityCompiler for Resolc {
Ok(compiler_output)
}
fn supports_mode(
&self,
optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline,
) -> bool {
pipeline == ModePipeline::ViaYulIR && self.0.solc.supports_mode(optimize_setting, pipeline)
fn new(config: &Arguments) -> Self {
Resolc {
resolc_path: config.resolc.clone(),
}
}
fn supports_mode(_optimize_setting: ModeOptimizerSetting, pipeline: ModePipeline) -> bool {
// We only support the Y (IE compile via Yul IR) mode here. We must always compile
// via Yul IR as resolc needs this to translate to LLVM IR and then RISCV.
pipeline == ModePipeline::ViaYulIR
}
}
+23 -88
View File
@@ -1,20 +1,9 @@
//! Implements the [SolidityCompiler] trait with solc for
//! compiling contracts to EVM bytecode.
use std::{
path::PathBuf,
process::Stdio,
sync::{Arc, LazyLock},
};
use dashmap::DashMap;
use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
use revive_dt_solc_binaries::download_solc;
use super::utils;
use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
use anyhow::{Context as _, Result};
use anyhow::Context;
use foundry_compilers_artifacts::{
output_selection::{
BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection,
@@ -22,67 +11,15 @@ use foundry_compilers_artifacts::{
solc::CompilerOutput as SolcOutput,
solc::*,
};
use semver::Version;
use revive_dt_config::Arguments;
use std::process::Stdio;
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Solc(Arc<SolcInner>);
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
struct SolcInner {
/// The path of the solidity compiler executable that this object uses.
solc_path: PathBuf,
/// The version of the solidity compiler executable that this object uses.
solc_version: Version,
}
#[derive(Debug)]
pub struct Solc {}
impl SolidityCompiler for Solc {
async fn new(
context: impl AsRef<SolcConfiguration>
+ AsRef<ResolcConfiguration>
+ AsRef<WorkingDirectoryConfiguration>,
version: impl Into<Option<VersionOrRequirement>>,
) -> Result<Self> {
// This is a cache for the compiler objects so that whenever the same compiler version is
// requested the same object is returned. We do this as we do not want to keep cloning the
// compiler around.
static COMPILERS_CACHE: LazyLock<DashMap<(PathBuf, Version), Solc>> =
LazyLock::new(Default::default);
let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
let solc_configuration = AsRef::<SolcConfiguration>::as_ref(&context);
// We attempt to download the solc binary. Note the following: this call does the version
// resolution for us. Therefore, even if the download didn't proceed, this function will
// resolve the version requirement into a canonical version of the compiler. It's then up
// to us to either use the provided path or not.
let version = version
.into()
.unwrap_or_else(|| solc_configuration.version.clone().into());
let (version, path) =
download_solc(working_directory_configuration.as_path(), version, false)
.await
.context("Failed to download/get path to solc binary")?;
Ok(COMPILERS_CACHE
.entry((path.clone(), version.clone()))
.or_insert_with(|| {
Self(Arc::new(SolcInner {
solc_path: path,
solc_version: version,
}))
})
.clone())
}
fn version(&self) -> &Version {
&self.0.solc_version
}
fn path(&self) -> &std::path::Path {
&self.0.solc_path
}
type Options = ();
#[tracing::instrument(level = "debug", ret)]
async fn build(
@@ -90,6 +27,7 @@ impl SolidityCompiler for Solc {
CompilerInput {
pipeline,
optimization,
solc,
evm_version,
allow_paths,
base_path,
@@ -97,12 +35,17 @@ impl SolidityCompiler for Solc {
libraries,
revert_string_handling,
}: CompilerInput,
) -> Result<CompilerOutput> {
_: Self::Options,
) -> anyhow::Result<CompilerOutput> {
let solc = solc.ok_or_else(|| anyhow::anyhow!("solc compiler not provided to resolc."))?;
let compiler_supports_via_ir =
utils::solc_versions_supporting_yul_ir().matches(&solc.version);
// Be careful to entirely omit the viaIR field if the compiler does not support it,
// as it will error if you provide fields it does not know about. Because
// `supports_mode` is called prior to instantiating a compiler, we should never
// ask for something which is invalid.
let via_ir = match (pipeline, self.compiler_supports_yul()) {
let via_ir = match (pipeline, compiler_supports_via_ir) {
(pipeline, true) => pipeline.map(|p| p.via_yul_ir()),
(_pipeline, false) => None,
};
@@ -162,7 +105,7 @@ impl SolidityCompiler for Solc {
},
};
let mut command = AsyncCommand::new(self.path());
let mut command = AsyncCommand::new(&solc.path);
command
.stdin(Stdio::piped())
.stdout(Stdio::piped())
@@ -183,7 +126,7 @@ impl SolidityCompiler for Solc {
}
let mut child = command
.spawn()
.with_context(|| format!("Failed to spawn solc at {}", self.path().display()))?;
.with_context(|| format!("Failed to spawn solc at {}", solc.path.display()))?;
let stdin = child.stdin.as_mut().expect("should be piped");
let serialized_input = serde_json::to_vec(&input)
@@ -263,21 +206,13 @@ impl SolidityCompiler for Solc {
Ok(compiler_output)
}
fn supports_mode(
&self,
_optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline,
) -> bool {
fn new(_config: &Arguments) -> Self {
Self {}
}
fn supports_mode(_optimize_setting: ModeOptimizerSetting, pipeline: ModePipeline) -> bool {
// solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support mode E
// (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler is new enough.
pipeline == ModePipeline::ViaEVMAssembly
|| (pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul())
}
}
impl Solc {
fn compiler_supports_yul(&self) -> bool {
const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13);
self.version() >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR
pipeline == ModePipeline::ViaEVMAssembly || pipeline == ModePipeline::ViaYulIR
}
}
+159
View File
@@ -0,0 +1,159 @@
use serde::{Deserialize, Serialize};
use std::{
path::{Path, PathBuf},
process::{Command, Stdio},
sync::LazyLock,
};
use anyhow::Context;
use dashmap::DashMap;
use revive_dt_common::types::{ModePipeline, VersionOrRequirement};
use semver::{Version, VersionReq};
/// Return the path and version of a suitable `solc` compiler given the requirements provided.
///
/// This caches any compiler binaries/paths that are downloaded as a result of calling this.
pub async fn solc_compiler(
cache_directory: &Path,
fallback_version: &Version,
required_version: Option<&VersionReq>,
pipeline: ModePipeline,
) -> anyhow::Result<SolcCompiler> {
// Require Yul compatible solc, or any if we don't care about compiling via Yul.
let mut version_req = if pipeline == ModePipeline::ViaYulIR {
solc_versions_supporting_yul_ir()
} else {
VersionReq::STAR
};
// Take into account the version requirements passed in, too.
if let Some(other_version_req) = required_version {
version_req
.comparators
.extend(other_version_req.comparators.iter().cloned());
}
// If no requirements yet then fall back to the fallback version.
let version_req = if version_req == VersionReq::STAR {
VersionOrRequirement::version_to_requirement(fallback_version)
} else {
version_req
};
// Download (or pull from cache) a suitable solc compiler given this.
let solc_path =
revive_dt_solc_binaries::download_solc(cache_directory, version_req, false).await?;
let solc_version = solc_version(&solc_path).await?;
Ok(SolcCompiler {
version: solc_version,
path: solc_path,
})
}
/// A `solc` compiler, returned from [`solc_compiler`].
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SolcCompiler {
/// Version of the compiler.
pub version: Version,
/// Path to the compiler executable.
pub path: PathBuf,
}
/// Fetch the solc version given a path to the executable
async fn solc_version(solc_path: &Path) -> anyhow::Result<semver::Version> {
/// This is a cache of the path of the compiler to the version number of the compiler. We
/// choose to cache the version in this way rather than through a field on the struct since
/// compiler objects are being created all the time from the path and the compiler object is
/// not reused over time.
static VERSION_CACHE: LazyLock<DashMap<PathBuf, Version>> = LazyLock::new(Default::default);
match VERSION_CACHE.entry(solc_path.to_path_buf()) {
dashmap::Entry::Occupied(occupied_entry) => Ok(occupied_entry.get().clone()),
dashmap::Entry::Vacant(vacant_entry) => {
// The following is the parsing code for the version from the solc version strings
// which look like the following:
// ```
// solc, the solidity compiler commandline interface
// Version: 0.8.30+commit.73712a01.Darwin.appleclang
// ```
let child = Command::new(solc_path)
.arg("--version")
.stdout(Stdio::piped())
.spawn()?;
let output = child.wait_with_output()?;
let output = String::from_utf8_lossy(&output.stdout);
let version_line = output
.split("Version: ")
.nth(1)
.context("Version parsing failed")?;
let version_string = version_line
.split("+")
.next()
.context("Version parsing failed")?;
let version = Version::parse(version_string)?;
vacant_entry.insert(version.clone());
Ok(version)
}
}
}
/// This returns the solc versions which support Yul IR.
pub fn solc_versions_supporting_yul_ir() -> VersionReq {
use semver::{Comparator, Op, Prerelease, VersionReq};
VersionReq {
comparators: vec![Comparator {
op: Op::GreaterEq,
major: 0,
minor: Some(8),
patch: Some(13),
pre: Prerelease::EMPTY,
}],
}
}
#[cfg(test)]
mod test {
use super::*;
#[tokio::test]
async fn compiler_version_can_be_obtained() {
// Arrange
let temp_dir = tempfile::tempdir().expect("can create tempdir");
let solc_path =
revive_dt_solc_binaries::download_solc(temp_dir.path(), Version::new(0, 7, 6), false)
.await
.expect("can download solc");
// Act
let version = solc_version(&solc_path).await;
// Assert
assert_eq!(
version.expect("Failed to get version"),
Version::new(0, 7, 6)
)
}
#[tokio::test]
async fn compiler_version_can_be_obtained1() {
// Arrange
let temp_dir = tempfile::tempdir().expect("can create tempdir");
let solc_path =
revive_dt_solc_binaries::download_solc(temp_dir.path(), Version::new(0, 4, 21), false)
.await
.expect("can download solc");
// Act
let version = solc_version(&solc_path).await;
// Assert
assert_eq!(
version.expect("Failed to get version"),
Version::new(0, 4, 21)
)
}
}
+8 -16
View File
@@ -1,25 +1,20 @@
use std::path::PathBuf;
use revive_dt_common::types::VersionOrRequirement;
use revive_dt_compiler::{Compiler, SolidityCompiler, revive_resolc::Resolc, solc::Solc};
use revive_dt_config::ExecutionContext;
use semver::Version;
use revive_dt_compiler::{Compiler, revive_resolc::Resolc, solc::Solc};
use revive_dt_config::Arguments;
#[tokio::test]
async fn contracts_can_be_compiled_with_solc() {
// Arrange
let args = ExecutionContext::default();
let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
.await
.unwrap();
let args = Arguments::default();
// Act
let output = Compiler::new()
let output = Compiler::<Solc>::new()
.with_source("./tests/assets/array_one_element/callable.sol")
.unwrap()
.with_source("./tests/assets/array_one_element/main.sol")
.unwrap()
.try_build(&solc)
.try_build(&args)
.await;
// Assert
@@ -49,18 +44,15 @@ async fn contracts_can_be_compiled_with_solc() {
#[tokio::test]
async fn contracts_can_be_compiled_with_resolc() {
// Arrange
let args = ExecutionContext::default();
let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
.await
.unwrap();
let args = Arguments::default();
// Act
let output = Compiler::new()
let output = Compiler::<Resolc>::new()
.with_source("./tests/assets/array_one_element/callable.sol")
.unwrap()
.with_source("./tests/assets/array_one_element/main.sol")
.unwrap()
.try_build(&resolc)
.try_build(&args)
.await;
// Assert
+1 -4
View File
@@ -10,13 +10,10 @@ rust-version.workspace = true
[dependencies]
alloy = { workspace = true }
anyhow = { workspace = true }
clap = { workspace = true }
semver = { workspace = true }
temp-dir = { workspace = true }
tempfile = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
strum = { workspace = true }
[lints]
workspace = true
+138 -480
View File
@@ -2,561 +2,219 @@
use std::{
fmt::Display,
fs::read_to_string,
ops::Deref,
path::{Path, PathBuf},
str::FromStr,
sync::{Arc, LazyLock, OnceLock},
time::Duration,
sync::LazyLock,
};
use alloy::{
genesis::Genesis,
hex::ToHexExt,
network::EthereumWallet,
primitives::{FixedBytes, U256},
signers::local::PrivateKeySigner,
};
use clap::{Parser, ValueEnum, ValueHint};
use alloy::{network::EthereumWallet, signers::local::PrivateKeySigner};
use clap::{Parser, ValueEnum};
use semver::Version;
use serde::{Serialize, Serializer};
use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
use temp_dir::TempDir;
use serde::{Deserialize, Serialize};
use tempfile::TempDir;
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Debug, Parser, Clone, Serialize, Deserialize)]
#[command(name = "retester")]
pub enum Context {
/// Executes tests in the MatterLabs format differentially against a leader and a follower.
ExecuteTests(ExecutionContext),
}
pub struct Arguments {
/// The `solc` version to use if the test didn't specify it explicitly.
#[arg(long = "solc", short, default_value = "0.8.29")]
pub solc: Version,
impl Context {
pub fn working_directory_configuration(&self) -> &WorkingDirectoryConfiguration {
self.as_ref()
}
/// Use the Wasm compiler versions.
#[arg(long = "wasm")]
pub wasm: bool,
pub fn report_configuration(&self) -> &ReportConfiguration {
self.as_ref()
}
}
impl AsRef<WorkingDirectoryConfiguration> for Context {
fn as_ref(&self) -> &WorkingDirectoryConfiguration {
match self {
Context::ExecuteTests(execution_context) => &execution_context.working_directory,
}
}
}
impl AsRef<ReportConfiguration> for Context {
fn as_ref(&self) -> &ReportConfiguration {
match self {
Context::ExecuteTests(execution_context) => &execution_context.report_configuration,
}
}
}
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ExecutionContext {
/// The working directory that the program will use for all of the temporary artifacts needed at
/// runtime.
/// The path to the `resolc` executable to be tested.
///
/// If not specified, then a temporary directory will be created and used by the program for all
/// temporary artifacts.
#[clap(
short,
long,
default_value = "",
value_hint = ValueHint::DirPath,
)]
pub working_directory: WorkingDirectoryConfiguration,
/// The differential testing leader node implementation.
#[arg(short, long = "leader", default_value_t = TestingPlatform::Geth)]
pub leader: TestingPlatform,
/// The differential testing follower node implementation.
#[arg(short, long = "follower", default_value_t = TestingPlatform::Kitchensink)]
pub follower: TestingPlatform,
/// By default it uses the `resolc` binary found in `$PATH`.
///
/// If `--wasm` is set, this should point to the resolc Wasm ile.
#[arg(long = "resolc", short, default_value = "resolc")]
pub resolc: PathBuf,
/// A list of test corpus JSON files to be tested.
#[arg(long = "corpus", short)]
pub corpus: Vec<PathBuf>,
/// Configuration parameters for the solc compiler.
#[clap(flatten, next_help_heading = "Solc Configuration")]
pub solc_configuration: SolcConfiguration,
/// Configuration parameters for the resolc compiler.
#[clap(flatten, next_help_heading = "Resolc Configuration")]
pub resolc_configuration: ResolcConfiguration,
/// Configuration parameters for the geth node.
#[clap(flatten, next_help_heading = "Geth Configuration")]
pub geth_configuration: GethConfiguration,
/// Configuration parameters for the Kitchensink.
#[clap(flatten, next_help_heading = "Kitchensink Configuration")]
pub kitchensink_configuration: KitchensinkConfiguration,
/// Configuration parameters for the Revive Dev Node.
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
/// Configuration parameters for the Eth Rpc.
#[clap(flatten, next_help_heading = "Eth RPC Configuration")]
pub eth_rpc_configuration: EthRpcConfiguration,
/// Configuration parameters for the genesis.
#[clap(flatten, next_help_heading = "Genesis Configuration")]
pub genesis_configuration: GenesisConfiguration,
/// Configuration parameters for the wallet.
#[clap(flatten, next_help_heading = "Wallet Configuration")]
pub wallet_configuration: WalletConfiguration,
/// Configuration parameters for concurrency.
#[clap(flatten, next_help_heading = "Concurrency Configuration")]
pub concurrency_configuration: ConcurrencyConfiguration,
/// Configuration parameters for the compilers and compilation.
#[clap(flatten, next_help_heading = "Compilation Configuration")]
pub compilation_configuration: CompilationConfiguration,
/// Configuration parameters for the report.
#[clap(flatten, next_help_heading = "Report Configuration")]
pub report_configuration: ReportConfiguration,
}
impl Default for ExecutionContext {
fn default() -> Self {
Self::parse_from(["execution-context"])
}
}
impl AsRef<WorkingDirectoryConfiguration> for ExecutionContext {
fn as_ref(&self) -> &WorkingDirectoryConfiguration {
&self.working_directory
}
}
impl AsRef<SolcConfiguration> for ExecutionContext {
fn as_ref(&self) -> &SolcConfiguration {
&self.solc_configuration
}
}
impl AsRef<ResolcConfiguration> for ExecutionContext {
fn as_ref(&self) -> &ResolcConfiguration {
&self.resolc_configuration
}
}
impl AsRef<GethConfiguration> for ExecutionContext {
fn as_ref(&self) -> &GethConfiguration {
&self.geth_configuration
}
}
impl AsRef<KitchensinkConfiguration> for ExecutionContext {
fn as_ref(&self) -> &KitchensinkConfiguration {
&self.kitchensink_configuration
}
}
impl AsRef<ReviveDevNodeConfiguration> for ExecutionContext {
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
&self.revive_dev_node_configuration
}
}
impl AsRef<EthRpcConfiguration> for ExecutionContext {
fn as_ref(&self) -> &EthRpcConfiguration {
&self.eth_rpc_configuration
}
}
impl AsRef<GenesisConfiguration> for ExecutionContext {
fn as_ref(&self) -> &GenesisConfiguration {
&self.genesis_configuration
}
}
impl AsRef<WalletConfiguration> for ExecutionContext {
fn as_ref(&self) -> &WalletConfiguration {
&self.wallet_configuration
}
}
impl AsRef<ConcurrencyConfiguration> for ExecutionContext {
fn as_ref(&self) -> &ConcurrencyConfiguration {
&self.concurrency_configuration
}
}
impl AsRef<CompilationConfiguration> for ExecutionContext {
fn as_ref(&self) -> &CompilationConfiguration {
&self.compilation_configuration
}
}
impl AsRef<ReportConfiguration> for ExecutionContext {
fn as_ref(&self) -> &ReportConfiguration {
&self.report_configuration
}
}
/// A set of configuration parameters for Solc.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct SolcConfiguration {
/// Specifies the default version of the Solc compiler that should be used if there is no
/// override specified by one of the test cases.
#[clap(long = "solc.version", default_value = "0.8.29")]
pub version: Version,
}
/// A set of configuration parameters for Resolc.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ResolcConfiguration {
/// Specifies the path of the resolc compiler to be used by the tool.
/// A place to store temporary artifacts during test execution.
///
/// If this is not specified, then the tool assumes that it should use the resolc binary that's
/// provided in the user's $PATH.
#[clap(id = "resolc.path", long = "resolc.path", default_value = "resolc")]
pub path: PathBuf,
}
/// Creates a temporary dir if not specified.
#[arg(long = "workdir", short)]
pub working_directory: Option<PathBuf>,
/// A set of configuration parameters for Geth.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct GethConfiguration {
/// Specifies the path of the geth node to be used by the tool.
/// Add a tempdir manually if `working_directory` was not given.
///
/// If this is not specified, then the tool assumes that it should use the geth binary that's
/// provided in the user's $PATH.
#[clap(id = "geth.path", long = "geth.path", default_value = "geth")]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "geth.start-timeout-ms",
long = "geth.start-timeout-ms",
default_value = "5000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
}
/// A set of configuration parameters for Kitchensink.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct KitchensinkConfiguration {
/// Specifies the path of the kitchensink node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the kitchensink binary
/// that's provided in the user's $PATH.
#[clap(
id = "kitchensink.path",
long = "kitchensink.path",
default_value = "substrate-node"
)]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "kitchensink.start-timeout-ms",
long = "kitchensink.start-timeout-ms",
default_value = "5000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
/// This configures the tool to use Kitchensink instead of using the revive-dev-node.
#[clap(long = "kitchensink.dont-use-dev-node")]
pub use_kitchensink: bool,
}
/// A set of configuration parameters for the revive dev node.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ReviveDevNodeConfiguration {
/// Specifies the path of the revive dev node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the revive dev node binary
/// that's provided in the user's $PATH.
#[clap(
id = "revive-dev-node.path",
long = "revive-dev-node.path",
default_value = "revive-dev-node"
)]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "revive-dev-node.start-timeout-ms",
long = "revive-dev-node.start-timeout-ms",
default_value = "5000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
}
/// A set of configuration parameters for the ETH RPC.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct EthRpcConfiguration {
/// Specifies the path of the ETH RPC to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the ETH RPC binary
/// that's provided in the user's $PATH.
#[clap(id = "eth-rpc.path", long = "eth-rpc.path", default_value = "eth-rpc")]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "eth-rpc.start-timeout-ms",
long = "eth-rpc.start-timeout-ms",
default_value = "5000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
}
/// A set of configuration parameters for the genesis.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct GenesisConfiguration {
/// Specifies the path of the genesis file to use for the nodes that are started.
///
/// This is expected to be the path of a JSON geth genesis file.
#[clap(id = "genesis.path", long = "genesis.path")]
path: Option<PathBuf>,
/// The genesis object found at the provided path.
/// We attach it here because [TempDir] prunes itself on drop.
#[clap(skip)]
#[serde(skip)]
genesis: OnceLock<Genesis>,
}
pub temp_dir: Option<&'static TempDir>,
impl GenesisConfiguration {
pub fn genesis(&self) -> anyhow::Result<&Genesis> {
static DEFAULT_GENESIS: LazyLock<Genesis> = LazyLock::new(|| {
let genesis = include_str!("../../../genesis.json");
serde_json::from_str(genesis).unwrap()
});
/// The path to the `geth` executable.
///
/// By default it uses `geth` binary found in `$PATH`.
#[arg(short, long = "geth", default_value = "geth")]
pub geth: PathBuf,
match self.genesis.get() {
Some(genesis) => Ok(genesis),
None => {
let genesis = match self.path.as_ref() {
Some(genesis_path) => {
let genesis_content = read_to_string(genesis_path)?;
serde_json::from_str(genesis_content.as_str())?
}
None => DEFAULT_GENESIS.clone(),
};
Ok(self.genesis.get_or_init(|| genesis))
}
}
}
}
/// The maximum time in milliseconds to wait for geth to start.
#[arg(long = "geth-start-timeout", default_value = "5000")]
pub geth_start_timeout: u64,
/// A set of configuration parameters for the wallet.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct WalletConfiguration {
/// The private key of the default signer.
#[clap(
long = "wallet.default-private-key",
/// Configure nodes according to this genesis.json file.
#[arg(long = "genesis", default_value = "genesis.json")]
pub genesis_file: PathBuf,
/// The signing account private key.
#[arg(
short,
long = "account",
default_value = "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d"
)]
#[serde(serialize_with = "serialize_private_key")]
default_key: PrivateKeySigner,
pub account: String,
/// This argument controls which private keys the nodes should have access to and be added to
/// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set
/// of the node.
#[clap(long = "wallet.additional-keys", default_value_t = 100_000)]
additional_keys: usize,
#[arg(long = "private-keys-count", default_value_t = 100_000)]
pub private_keys_to_add: usize,
/// The wallet object that will be used.
#[clap(skip)]
#[serde(skip)]
wallet: OnceLock<Arc<EthereumWallet>>,
}
/// The differential testing leader node implementation.
#[arg(short, long = "leader", default_value = "geth")]
pub leader: TestingPlatform,
impl WalletConfiguration {
pub fn wallet(&self) -> Arc<EthereumWallet> {
self.wallet
.get_or_init(|| {
let mut wallet = EthereumWallet::new(self.default_key.clone());
for signer in (1..=self.additional_keys)
.map(|id| U256::from(id))
.map(|id| id.to_be_bytes::<32>())
.map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap())
{
wallet.register_signer(signer);
}
Arc::new(wallet)
})
.clone()
}
}
/// The differential testing follower node implementation.
#[arg(short, long = "follower", default_value = "kitchensink")]
pub follower: TestingPlatform,
fn serialize_private_key<S>(value: &PrivateKeySigner, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
value.to_bytes().encode_hex().serialize(serializer)
}
/// Only compile against this testing platform (doesn't execute the tests).
#[arg(long = "compile-only")]
pub compile_only: Option<TestingPlatform>,
/// A set of configuration for concurrency.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ConcurrencyConfiguration {
/// Determines the amount of nodes that will be spawned for each chain.
#[clap(long = "concurrency.number-of-nodes", default_value_t = 5)]
#[arg(long, default_value = "1")]
pub number_of_nodes: usize,
/// Determines the amount of tokio worker threads that will will be used.
#[arg(
long = "concurrency.number-of-threads",
long,
default_value_t = std::thread::available_parallelism()
.map(|n| n.get())
.unwrap_or(1)
)]
pub number_of_threads: usize,
/// Determines the amount of concurrent tasks that will be spawned to run tests.
/// Determines the amount of concurrent tasks that will be spawned to run tests. Defaults to 10 x the number of nodes.
#[arg(long)]
pub number_concurrent_tasks: Option<usize>,
/// Extract problems back to the test corpus.
#[arg(short, long = "extract-problems")]
pub extract_problems: bool,
/// The path to the `kitchensink` executable.
///
/// Defaults to 10 x the number of nodes.
#[arg(long = "concurrency.number-of-concurrent-tasks")]
number_concurrent_tasks: Option<usize>,
/// By default it uses `substrate-node` binary found in `$PATH`.
#[arg(short, long = "kitchensink", default_value = "substrate-node")]
pub kitchensink: PathBuf,
/// Determines if the concurrency limit should be ignored or not.
#[arg(long = "concurrency.ignore-concurrency-limit")]
ignore_concurrency_limit: bool,
}
/// The path to the `revive-dev-node` executable.
///
/// By default it uses `revive-dev-node` binary found in `$PATH`.
#[arg(long = "revive-dev-node", default_value = "revive-dev-node")]
pub revive_dev_node: PathBuf,
impl ConcurrencyConfiguration {
pub fn concurrency_limit(&self) -> Option<usize> {
match self.ignore_concurrency_limit {
true => None,
false => Some(
self.number_concurrent_tasks
.unwrap_or(20 * self.number_of_nodes),
),
}
}
}
/// By default the tool uses the revive-dev-node when it's running differential tests against
/// PolkaVM since the dev-node is much faster than kitchensink. This flag allows the caller to
/// configure the tool to use kitchensink rather than the dev-node.
#[arg(long)]
pub use_kitchensink_not_dev_node: bool,
/// The path to the `eth_proxy` executable.
///
/// By default it uses `eth-rpc` binary found in `$PATH`.
#[arg(short = 'p', long = "eth_proxy", default_value = "eth-rpc")]
pub eth_proxy: PathBuf,
#[derive(Clone, Debug, Parser, Serialize)]
pub struct CompilationConfiguration {
/// Controls if the compilation cache should be invalidated or not.
#[arg(long = "compilation.invalidate-cache")]
#[arg(short, long)]
pub invalidate_compilation_cache: bool,
}
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ReportConfiguration {
/// Controls if the compiler input is included in the final report.
#[clap(long = "report.include-compiler-input")]
pub include_compiler_input: bool,
pub report_include_compiler_input: bool,
/// Controls if the compiler output is included in the final report.
#[clap(long = "report.include-compiler-output")]
pub include_compiler_output: bool,
pub report_include_compiler_output: bool,
}
/// Represents the working directory that the program uses.
#[derive(Debug, Clone)]
pub enum WorkingDirectoryConfiguration {
/// A temporary directory is used as the working directory. This will be removed when dropped.
TemporaryDirectory(Arc<TempDir>),
/// A directory with a path is used as the working directory.
Path(PathBuf),
}
impl WorkingDirectoryConfiguration {
pub fn as_path(&self) -> &Path {
self.as_ref()
}
}
impl Deref for WorkingDirectoryConfiguration {
type Target = Path;
fn deref(&self) -> &Self::Target {
self.as_path()
}
}
impl AsRef<Path> for WorkingDirectoryConfiguration {
fn as_ref(&self) -> &Path {
match self {
WorkingDirectoryConfiguration::TemporaryDirectory(temp_dir) => temp_dir.path(),
WorkingDirectoryConfiguration::Path(path) => path.as_path(),
impl Arguments {
/// Return the configured working directory with the following precedence:
/// 1. `self.working_directory` if it was provided.
/// 2. `self.temp_dir` if it it was provided
/// 3. Panic.
pub fn directory(&self) -> &Path {
if let Some(path) = &self.working_directory {
return path.as_path();
}
if let Some(temp_dir) = &self.temp_dir {
return temp_dir.path();
}
panic!("should have a workdir configured")
}
/// Return the number of concurrent tasks to run. This is provided via the
/// `--number-concurrent-tasks` argument, and otherwise defaults to --number-of-nodes * 20.
pub fn number_of_concurrent_tasks(&self) -> usize {
self.number_concurrent_tasks
.unwrap_or(20 * self.number_of_nodes)
}
/// Try to parse `self.account` into a [PrivateKeySigner],
/// panicing on error.
pub fn wallet(&self) -> EthereumWallet {
let signer = self
.account
.parse::<PrivateKeySigner>()
.unwrap_or_else(|error| {
panic!("private key '{}' parsing error: {error}", self.account);
});
EthereumWallet::new(signer)
}
}
impl Default for WorkingDirectoryConfiguration {
impl Default for Arguments {
fn default() -> Self {
TempDir::new()
.map(Arc::new)
.map(Self::TemporaryDirectory)
.expect("Failed to create the temporary directory")
}
}
static TEMP_DIR: LazyLock<TempDir> = LazyLock::new(|| TempDir::new().unwrap());
impl FromStr for WorkingDirectoryConfiguration {
type Err = anyhow::Error;
let default = Arguments::parse_from(["retester"]);
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"" => Ok(Default::default()),
_ => Ok(Self::Path(PathBuf::from(s))),
Arguments {
temp_dir: Some(&TEMP_DIR),
..default
}
}
}
impl Display for WorkingDirectoryConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self.as_path().display(), f)
}
}
impl Serialize for WorkingDirectoryConfiguration {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.as_path().serialize(serializer)
}
}
fn parse_duration(s: &str) -> anyhow::Result<Duration> {
u64::from_str(s)
.map(Duration::from_millis)
.map_err(Into::into)
}
/// The Solidity compatible node implementation.
///
/// This describes the solutions to be tested against on a high level.
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Serialize,
ValueEnum,
EnumString,
Display,
AsRefStr,
IntoStaticStr,
Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, ValueEnum, Serialize, Deserialize,
)]
#[strum(serialize_all = "kebab-case")]
#[clap(rename_all = "lower")]
pub enum TestingPlatform {
/// The go-ethereum reference full node EVM implementation.
Geth,
/// The kitchensink runtime provides the PolkaVM (PVM) based node implementation.
/// The kitchensink runtime provides the PolkaVM (PVM) based node implentation.
Kitchensink,
}
impl Display for TestingPlatform {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Geth => f.write_str("geth"),
Self::Kitchensink => f.write_str("revive"),
}
}
}
+2
View File
@@ -28,6 +28,7 @@ cacache = { workspace = true }
clap = { workspace = true }
futures = { workspace = true }
indexmap = { workspace = true }
once_cell = { workspace = true }
tokio = { workspace = true }
tracing = { workspace = true }
tracing-appender = { workspace = true }
@@ -35,6 +36,7 @@ tracing-subscriber = { workspace = true }
semver = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
tempfile = { workspace = true }
[lints]
workspace = true
+65 -118
View File
@@ -2,7 +2,6 @@
//! be reused between runs.
use std::{
borrow::Cow,
collections::HashMap,
path::{Path, PathBuf},
sync::Arc,
@@ -10,13 +9,13 @@ use std::{
use futures::FutureExt;
use revive_dt_common::iterators::FilesWithExtensionIterator;
use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler};
use revive_dt_config::TestingPlatform;
use revive_dt_compiler::{Compiler, CompilerInput, CompilerOutput, Mode, SolcCompiler};
use revive_dt_config::Arguments;
use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata};
use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address};
use anyhow::{Context as _, Error, Result};
use revive_dt_report::ExecutionSpecificReporter;
use once_cell::sync::Lazy;
use semver::Version;
use serde::{Deserialize, Serialize};
use tokio::sync::{Mutex, RwLock};
@@ -24,17 +23,9 @@ use tracing::{Instrument, debug, debug_span, instrument};
use crate::Platform;
pub struct CachedCompiler<'a> {
/// The cache that stores the compiled contracts.
artifacts_cache: ArtifactsCache,
pub struct CachedCompiler(ArtifactsCache);
/// This is a mechanism that the cached compiler uses so that if multiple compilation requests
/// come in for the same contract we never compile all of them and only compile it once and all
/// other tasks that request this same compilation concurrently get the cached version.
cache_key_lock: RwLock<HashMap<CacheKey<'a>, Arc<Mutex<()>>>>,
}
impl<'a> CachedCompiler<'a> {
impl CachedCompiler {
pub async fn new(path: impl AsRef<Path>, invalidate_cache: bool) -> Result<Self> {
let mut cache = ArtifactsCache::new(path);
if invalidate_cache {
@@ -43,10 +34,7 @@ impl<'a> CachedCompiler<'a> {
.await
.context("Failed to invalidate compilation cache directory")?;
}
Ok(Self {
artifacts_cache: cache,
cache_key_lock: Default::default(),
})
Ok(Self(cache))
}
/// Compiles or gets the compilation artifacts from the cache.
@@ -55,7 +43,7 @@ impl<'a> CachedCompiler<'a> {
level = "debug",
skip_all,
fields(
metadata_file_path = %metadata_file_path.display(),
metadata_file_path = %metadata_file_path.as_ref().display(),
%mode,
platform = P::config_id().to_string()
),
@@ -63,21 +51,30 @@ impl<'a> CachedCompiler<'a> {
)]
pub async fn compile_contracts<P: Platform>(
&self,
metadata: &'a Metadata,
metadata_file_path: &'a Path,
mode: Cow<'a, Mode>,
metadata: &Metadata,
metadata_file_path: impl AsRef<Path>,
solc: SolcCompiler,
mode: &Mode,
config: &Arguments,
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
compiler: &P::Compiler,
reporter: &ExecutionSpecificReporter,
compilation_success_report_callback: impl Fn(bool, Option<CompilerInput>, CompilerOutput)
+ Clone,
compilation_failure_report_callback: impl Fn(Option<CompilerInput>, String),
) -> Result<CompilerOutput> {
static CACHE_KEY_LOCK: Lazy<RwLock<HashMap<CacheKey, Arc<Mutex<()>>>>> =
Lazy::new(Default::default);
let cache_key = CacheKey {
platform_key: P::config_id(),
compiler_version: compiler.version().clone(),
metadata_file_path,
platform_key: P::config_id().to_string(),
compiler_version: solc.version.clone(),
metadata_file_path: metadata_file_path.as_ref().to_path_buf(),
solc_mode: mode.clone(),
};
let compilation_callback = || {
// let compiler_path = compiler_path.clone();
// let compiler_version = compiler_version.clone();
let compilation_success_report_callback = compilation_success_report_callback.clone();
async move {
compile_contracts::<P>(
metadata
@@ -86,10 +83,12 @@ impl<'a> CachedCompiler<'a> {
metadata
.files_to_compile()
.context("Failed to enumerate files to compile from metadata")?,
&mode,
config,
solc,
mode,
deployed_libraries,
compiler,
reporter,
compilation_success_report_callback,
compilation_failure_report_callback,
)
.map(|compilation_result| compilation_result.map(CacheValue::new))
.await
@@ -122,15 +121,12 @@ impl<'a> CachedCompiler<'a> {
// Lock this specific cache key such that we do not get inconsistent state. We want
// that when multiple cases come in asking for the compilation artifacts then they
// don't all trigger a compilation if there's a cache miss. Hence, the lock here.
let read_guard = self.cache_key_lock.read().await;
let read_guard = CACHE_KEY_LOCK.read().await;
let mutex = match read_guard.get(&cache_key).cloned() {
Some(value) => {
drop(read_guard);
value
}
Some(value) => value,
None => {
drop(read_guard);
self.cache_key_lock
CACHE_KEY_LOCK
.write()
.await
.entry(cache_key.clone())
@@ -140,29 +136,13 @@ impl<'a> CachedCompiler<'a> {
};
let _guard = mutex.lock().await;
match self.artifacts_cache.get(&cache_key).await {
match self.0.get(&cache_key).await {
Some(cache_value) => {
if deployed_libraries.is_some() {
reporter
.report_post_link_contracts_compilation_succeeded_event(
compiler.version().clone(),
compiler.path(),
true,
None,
cache_value.compiler_output.clone(),
)
.expect("Can't happen");
} else {
reporter
.report_pre_link_contracts_compilation_succeeded_event(
compiler.version().clone(),
compiler.path(),
true,
None,
cache_value.compiler_output.clone(),
)
.expect("Can't happen");
}
compilation_success_report_callback(
true,
None,
cache_value.compiler_output.clone(),
);
cache_value.compiler_output
}
None => {
@@ -179,20 +159,24 @@ impl<'a> CachedCompiler<'a> {
}
}
#[allow(clippy::too_many_arguments)]
async fn compile_contracts<P: Platform>(
metadata_directory: impl AsRef<Path>,
mut files_to_compile: impl Iterator<Item = PathBuf>,
config: &Arguments,
solc: SolcCompiler,
mode: &Mode,
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
compiler: &P::Compiler,
reporter: &ExecutionSpecificReporter,
compilation_success_report_callback: impl Fn(bool, Option<CompilerInput>, CompilerOutput),
compilation_failure_report_callback: impl Fn(Option<CompilerInput>, String),
) -> Result<CompilerOutput> {
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
.with_allowed_extension("sol")
.with_use_cached_fs(true)
.collect::<Vec<_>>();
let compilation = Compiler::new()
let compiler = Compiler::<P::Compiler>::new()
.with_solc(solc)
.with_allow_path(metadata_directory)
// Handling the modes
.with_optimization(mode.optimize_setting)
@@ -200,7 +184,8 @@ async fn compile_contracts<P: Platform>(
// Adding the contract sources to the compiler.
.try_then(|compiler| {
files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path))
})?
})
.inspect_err(|err| compilation_failure_report_callback(None, format!("{err:#}")))?
// Adding the deployed libraries to the compiler.
.then(|compiler| {
deployed_libraries
@@ -217,55 +202,17 @@ async fn compile_contracts<P: Platform>(
})
});
let input = compilation.input().clone();
let output = compilation.try_build(compiler).await;
let compiler_input = compiler.input();
let compiler_output = compiler
.try_build(config)
.await
.inspect_err(|err| {
compilation_failure_report_callback(Some(compiler_input.clone()), format!("{err:#}"))
})
.context("Failed to configure compiler with sources and options")?;
match (output.as_ref(), deployed_libraries.is_some()) {
(Ok(output), true) => {
reporter
.report_post_link_contracts_compilation_succeeded_event(
compiler.version().clone(),
compiler.path(),
false,
input,
output.clone(),
)
.expect("Can't happen");
}
(Ok(output), false) => {
reporter
.report_pre_link_contracts_compilation_succeeded_event(
compiler.version().clone(),
compiler.path(),
false,
input,
output.clone(),
)
.expect("Can't happen");
}
(Err(err), true) => {
reporter
.report_post_link_contracts_compilation_failed_event(
compiler.version().clone(),
compiler.path().to_path_buf(),
input,
format!("{err:#}"),
)
.expect("Can't happen");
}
(Err(err), false) => {
reporter
.report_pre_link_contracts_compilation_failed_event(
compiler.version().clone(),
compiler.path().to_path_buf(),
input,
format!("{err:#}"),
)
.expect("Can't happen");
}
}
output
compilation_success_report_callback(false, Some(compiler_input), compiler_output.clone());
Ok(compiler_output)
}
struct ArtifactsCache {
@@ -289,7 +236,7 @@ impl ArtifactsCache {
}
#[instrument(level = "debug", skip_all, err)]
pub async fn insert(&self, key: &CacheKey<'_>, value: &CacheValue) -> Result<()> {
pub async fn insert(&self, key: &CacheKey, value: &CacheValue) -> Result<()> {
let key = bson::to_vec(key).context("Failed to serialize cache key (bson)")?;
let value = bson::to_vec(value).context("Failed to serialize cache value (bson)")?;
cacache::write(self.path.as_path(), key.encode_hex(), value)
@@ -300,7 +247,7 @@ impl ArtifactsCache {
Ok(())
}
pub async fn get(&self, key: &CacheKey<'_>) -> Option<CacheValue> {
pub async fn get(&self, key: &CacheKey) -> Option<CacheValue> {
let key = bson::to_vec(key).ok()?;
let value = cacache::read(self.path.as_path(), key.encode_hex())
.await
@@ -312,7 +259,7 @@ impl ArtifactsCache {
#[instrument(level = "debug", skip_all, err)]
pub async fn get_or_insert_with(
&self,
key: &CacheKey<'_>,
key: &CacheKey,
callback: impl AsyncFnOnce() -> Result<CacheValue>,
) -> Result<CacheValue> {
match self.get(key).await {
@@ -330,20 +277,20 @@ impl ArtifactsCache {
}
}
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)]
struct CacheKey<'a> {
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
struct CacheKey {
/// The platform name that this artifact was compiled for. For example, this could be EVM or
/// PVM.
platform_key: &'a TestingPlatform,
platform_key: String,
/// The version of the compiler that was used to compile the artifacts.
compiler_version: Version,
/// The path of the metadata file that the compilation artifacts are for.
metadata_file_path: &'a Path,
metadata_file_path: PathBuf,
/// The mode that the compilation artifacts where compiled with.
solc_mode: Cow<'a, Mode>,
solc_mode: Mode,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
+1 -1
View File
@@ -18,7 +18,7 @@ use alloy::{
primitives::Address,
rpc::types::{TransactionRequest, trace::geth::DiffMode},
};
use anyhow::Context as _;
use anyhow::Context;
use futures::TryStreamExt;
use indexmap::IndexMap;
use revive_dt_format::traits::{ResolutionContext, ResolverApi};
+5 -5
View File
@@ -19,7 +19,7 @@ pub trait Platform {
type Compiler: SolidityCompiler;
/// Returns the matching [TestingPlatform] of the [revive_dt_config::Arguments].
fn config_id() -> &'static TestingPlatform;
fn config_id() -> TestingPlatform;
}
#[derive(Default)]
@@ -29,8 +29,8 @@ impl Platform for Geth {
type Blockchain = geth::GethNode;
type Compiler = solc::Solc;
fn config_id() -> &'static TestingPlatform {
&TestingPlatform::Geth
fn config_id() -> TestingPlatform {
TestingPlatform::Geth
}
}
@@ -41,7 +41,7 @@ impl Platform for Kitchensink {
type Blockchain = KitchensinkNode;
type Compiler = revive_resolc::Resolc;
fn config_id() -> &'static TestingPlatform {
&TestingPlatform::Kitchensink
fn config_id() -> TestingPlatform {
TestingPlatform::Kitchensink
}
}
+516 -325
View File
File diff suppressed because it is too large Load Diff
+1 -1
View File
@@ -64,7 +64,7 @@ impl Case {
pub fn solc_modes(&self) -> Vec<Mode> {
match &self.modes {
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
None => Mode::all().cloned().collect(),
None => Mode::all().collect(),
}
}
}
+1 -1
View File
@@ -8,7 +8,7 @@ use alloy::{
rpc::types::TransactionRequest,
};
use alloy_primitives::{FixedBytes, utils::parse_units};
use anyhow::Context as _;
use anyhow::Context;
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, stream};
use semver::VersionReq;
use serde::{Deserialize, Serialize};
+1 -1
View File
@@ -99,7 +99,7 @@ impl Metadata {
pub fn solc_modes(&self) -> Vec<Mode> {
match &self.modes {
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
None => Mode::all().cloned().collect(),
None => Mode::all().collect(),
}
}
+22 -2
View File
@@ -1,6 +1,5 @@
use anyhow::Context as _;
use anyhow::Context;
use regex::Regex;
use revive_dt_common::iterators::EitherIter;
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
@@ -177,6 +176,27 @@ impl ParsedMode {
}
}
/// An iterator that could be either of two iterators.
#[derive(Clone, Debug)]
enum EitherIter<A, B> {
A(A),
B(B),
}
impl<A, B> Iterator for EitherIter<A, B>
where
A: Iterator,
B: Iterator<Item = A::Item>,
{
type Item = A::Item;
fn next(&mut self) -> Option<Self::Item> {
match self {
EitherIter::A(iter) => iter.next(),
EitherIter::B(iter) => iter.next(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
+1 -1
View File
@@ -27,7 +27,7 @@ sp-core = { workspace = true }
sp-runtime = { workspace = true }
[dev-dependencies]
temp-dir = { workspace = true }
tempfile = { workspace = true }
tokio = { workspace = true }
[lints]
+65 -46
View File
@@ -17,7 +17,9 @@ use alloy::{
eips::BlockNumberOrTag,
genesis::{Genesis, GenesisAccount},
network::{Ethereum, EthereumWallet, NetworkWallet},
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
primitives::{
Address, BlockHash, BlockNumber, BlockTimestamp, FixedBytes, StorageKey, TxHash, U256,
},
providers::{
Provider, ProviderBuilder,
ext::DebugApi,
@@ -27,8 +29,9 @@ use alloy::{
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
},
signers::local::PrivateKeySigner,
};
use anyhow::Context as _;
use anyhow::Context;
use revive_common::EVMVersion;
use tracing::{Instrument, instrument};
@@ -36,7 +39,7 @@ use revive_dt_common::{
fs::clear_directory,
futures::{PollingWaitBehavior, poll},
};
use revive_dt_config::*;
use revive_dt_config::Arguments;
use revive_dt_format::traits::ResolverApi;
use revive_dt_node_interaction::EthereumNode;
@@ -61,7 +64,7 @@ pub struct GethNode {
geth: PathBuf,
id: u32,
handle: Option<Child>,
start_timeout: Duration,
start_timeout: u64,
wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager,
chain_id_filler: ChainIdFiller,
@@ -94,7 +97,7 @@ impl GethNode {
/// Create the node directory and call `geth init` to configure the genesis.
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
fn init(&mut self, genesis: String) -> anyhow::Result<&mut Self> {
let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory);
@@ -103,6 +106,8 @@ impl GethNode {
create_dir_all(&self.logs_directory)
.context("Failed to create logs directory for geth node")?;
let mut genesis = serde_json::from_str::<Genesis>(&genesis)
.context("Failed to deserialize geth genesis JSON")?;
for signer_address in
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
{
@@ -235,7 +240,7 @@ impl GethNode {
.open(self.geth_stderr_log_file_path())
.context("Failed to open geth stderr logs file for readiness check")?;
let maximum_wait_time = self.start_timeout;
let maximum_wait_time = Duration::from_millis(self.start_timeout);
let mut stderr = BufReader::new(logs_file).lines();
let mut lines = vec![];
loop {
@@ -251,7 +256,7 @@ impl GethNode {
if Instant::now().duration_since(start_time) > maximum_wait_time {
anyhow::bail!(
"Timeout in starting geth: took longer than {}ms. stdout:\n\n{}\n",
self.start_timeout.as_millis(),
self.start_timeout,
lines.join("\n")
);
}
@@ -551,40 +556,30 @@ impl ResolverApi for GethNode {
}
impl Node for GethNode {
fn new(
context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<ConcurrencyConfiguration>
+ AsRef<GenesisConfiguration>
+ AsRef<WalletConfiguration>
+ AsRef<GethConfiguration>
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Clone,
) -> Self {
let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context);
let geth_configuration = AsRef::<GethConfiguration>::as_ref(&context);
let geth_directory = working_directory_configuration
.as_path()
.join(Self::BASE_DIRECTORY);
fn new(config: &Arguments) -> Self {
let geth_directory = config.directory().join(Self::BASE_DIRECTORY);
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
let base_directory = geth_directory.join(id.to_string());
let wallet = wallet_configuration.wallet();
let mut wallet = config.wallet();
for signer in (1..=config.private_keys_to_add)
.map(|id| U256::from(id))
.map(|id| id.to_be_bytes::<32>())
.map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap())
{
wallet.register_signer(signer);
}
Self {
connection_string: base_directory.join(Self::IPC_FILE).display().to_string(),
data_directory: base_directory.join(Self::DATA_DIRECTORY),
logs_directory: base_directory.join(Self::LOGS_DIRECTORY),
base_directory,
geth: geth_configuration.path.clone(),
geth: config.geth.clone(),
id,
handle: None,
start_timeout: geth_configuration.start_timeout_ms,
wallet: wallet.clone(),
start_timeout: config.geth_start_timeout,
wallet: Arc::new(wallet),
chain_id_filler: Default::default(),
nonce_manager: Default::default(),
// We know that we only need to be storing 2 files so we can specify that when creating
@@ -626,7 +621,7 @@ impl Node for GethNode {
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
fn spawn(&mut self, genesis: String) -> anyhow::Result<()> {
self.init(genesis)?.spawn_process()?;
Ok(())
}
@@ -667,25 +662,49 @@ impl Drop for GethNode {
#[cfg(test)]
mod tests {
use revive_dt_config::Arguments;
use tempfile::TempDir;
use crate::{GENESIS_JSON, Node};
use super::*;
fn test_config() -> ExecutionContext {
ExecutionContext::default()
fn test_config() -> (Arguments, TempDir) {
let mut config = Arguments::default();
let temp_dir = TempDir::new().unwrap();
config.working_directory = temp_dir.path().to_path_buf().into();
(config, temp_dir)
}
fn new_node() -> (ExecutionContext, GethNode) {
let context = test_config();
let mut node = GethNode::new(&context);
node.init(context.genesis_configuration.genesis().unwrap().clone())
fn new_node() -> (GethNode, TempDir) {
let (args, temp_dir) = test_config();
let mut node = GethNode::new(&args);
node.init(GENESIS_JSON.to_owned())
.expect("Failed to initialize the node")
.spawn_process()
.expect("Failed to spawn the node process");
(context, node)
(node, temp_dir)
}
#[test]
fn init_works() {
GethNode::new(&test_config().0)
.init(GENESIS_JSON.to_string())
.unwrap();
}
#[test]
fn spawn_works() {
GethNode::new(&test_config().0)
.spawn(GENESIS_JSON.to_string())
.unwrap();
}
#[test]
fn version_works() {
let version = GethNode::new(&test_config()).version().unwrap();
let version = GethNode::new(&test_config().0).version().unwrap();
assert!(
version.starts_with("geth version"),
"expected version string, got: '{version}'"
@@ -695,7 +714,7 @@ mod tests {
#[tokio::test]
async fn can_get_chain_id_from_node() {
// Arrange
let (_context, node) = new_node();
let (node, _temp_dir) = new_node();
// Act
let chain_id = node.chain_id().await;
@@ -708,7 +727,7 @@ mod tests {
#[tokio::test]
async fn can_get_gas_limit_from_node() {
// Arrange
let (_context, node) = new_node();
let (node, _temp_dir) = new_node();
// Act
let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest).await;
@@ -721,7 +740,7 @@ mod tests {
#[tokio::test]
async fn can_get_coinbase_from_node() {
// Arrange
let (_context, node) = new_node();
let (node, _temp_dir) = new_node();
// Act
let coinbase = node.block_coinbase(BlockNumberOrTag::Latest).await;
@@ -734,7 +753,7 @@ mod tests {
#[tokio::test]
async fn can_get_block_difficulty_from_node() {
// Arrange
let (_context, node) = new_node();
let (node, _temp_dir) = new_node();
// Act
let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest).await;
@@ -747,7 +766,7 @@ mod tests {
#[tokio::test]
async fn can_get_block_hash_from_node() {
// Arrange
let (_context, node) = new_node();
let (node, _temp_dir) = new_node();
// Act
let block_hash = node.block_hash(BlockNumberOrTag::Latest).await;
@@ -759,7 +778,7 @@ mod tests {
#[tokio::test]
async fn can_get_block_timestamp_from_node() {
// Arrange
let (_context, node) = new_node();
let (node, _temp_dir) = new_node();
// Act
let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest).await;
@@ -771,7 +790,7 @@ mod tests {
#[tokio::test]
async fn can_get_block_number_from_node() {
// Arrange
let (_context, node) = new_node();
let (node, _temp_dir) = new_node();
// Act
let block_number = node.last_block_number().await;
+61 -60
View File
@@ -19,8 +19,8 @@ use alloy::{
TransactionBuilderError, UnbuiltTransactionError,
},
primitives::{
Address, B64, B256, BlockHash, BlockNumber, BlockTimestamp, Bloom, Bytes, StorageKey,
TxHash, U256,
Address, B64, B256, BlockHash, BlockNumber, BlockTimestamp, Bloom, Bytes, FixedBytes,
StorageKey, TxHash, U256,
},
providers::{
Provider, ProviderBuilder,
@@ -32,8 +32,9 @@ use alloy::{
eth::{Block, Header, Transaction},
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
},
signers::local::PrivateKeySigner,
};
use anyhow::Context as _;
use anyhow::Context;
use revive_common::EVMVersion;
use revive_dt_common::fs::clear_directory;
use revive_dt_format::traits::ResolverApi;
@@ -42,7 +43,7 @@ use serde_json::{Value as JsonValue, json};
use sp_core::crypto::Ss58Codec;
use sp_runtime::AccountId32;
use revive_dt_config::*;
use revive_dt_config::Arguments;
use revive_dt_node_interaction::EthereumNode;
use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE};
@@ -91,7 +92,7 @@ impl KitchensinkNode {
const PROXY_STDOUT_LOG_FILE_NAME: &str = "proxy_stdout.log";
const PROXY_STDERR_LOG_FILE_NAME: &str = "proxy_stderr.log";
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
fn init(&mut self, genesis: &str) -> anyhow::Result<&mut Self> {
let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory);
@@ -152,6 +153,8 @@ impl KitchensinkNode {
})
.collect();
let mut eth_balances = {
let mut genesis = serde_json::from_str::<Genesis>(genesis)
.context("Failed to deserialize EVM genesis JSON for kitchensink")?;
for signer_address in
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
{
@@ -583,47 +586,35 @@ impl ResolverApi for KitchensinkNode {
}
impl Node for KitchensinkNode {
fn new(
context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<ConcurrencyConfiguration>
+ AsRef<GenesisConfiguration>
+ AsRef<WalletConfiguration>
+ AsRef<GethConfiguration>
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Clone,
) -> Self {
let kitchensink_configuration = AsRef::<KitchensinkConfiguration>::as_ref(&context);
let dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
let eth_rpc_configuration = AsRef::<EthRpcConfiguration>::as_ref(&context);
let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context);
let kitchensink_directory = working_directory_configuration
.as_path()
.join(Self::BASE_DIRECTORY);
fn new(config: &Arguments) -> Self {
let kitchensink_directory = config.directory().join(Self::BASE_DIRECTORY);
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
let base_directory = kitchensink_directory.join(id.to_string());
let logs_directory = base_directory.join(Self::LOGS_DIRECTORY);
let wallet = wallet_configuration.wallet();
let mut wallet = config.wallet();
for signer in (1..=config.private_keys_to_add)
.map(|id| U256::from(id))
.map(|id| id.to_be_bytes::<32>())
.map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap())
{
wallet.register_signer(signer);
}
Self {
id,
substrate_binary: kitchensink_configuration.path.clone(),
dev_node_binary: dev_node_configuration.path.clone(),
eth_proxy_binary: eth_rpc_configuration.path.clone(),
substrate_binary: config.kitchensink.clone(),
dev_node_binary: config.revive_dev_node.clone(),
eth_proxy_binary: config.eth_proxy.clone(),
rpc_url: String::new(),
base_directory,
logs_directory,
process_substrate: None,
process_proxy: None,
wallet: wallet.clone(),
wallet: Arc::new(wallet),
chain_id_filler: Default::default(),
nonce_manager: Default::default(),
use_kitchensink_not_dev_node: kitchensink_configuration.use_kitchensink,
use_kitchensink_not_dev_node: config.use_kitchensink_not_dev_node,
// We know that we only need to be storing 4 files so we can specify that when creating
// the vector. It's the stdout and stderr of the substrate-node and the eth-rpc.
logs_file_to_flush: Vec::with_capacity(4),
@@ -664,8 +655,8 @@ impl Node for KitchensinkNode {
Ok(())
}
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
self.init(genesis)?.spawn_process()
fn spawn(&mut self, genesis: String) -> anyhow::Result<()> {
self.init(&genesis)?.spawn_process()
}
fn version(&self) -> anyhow::Result<String> {
@@ -1130,20 +1121,25 @@ impl BlockHeader for KitchenSinkHeader {
#[cfg(test)]
mod tests {
use alloy::rpc::types::TransactionRequest;
use revive_dt_config::Arguments;
use std::path::PathBuf;
use std::sync::{LazyLock, Mutex};
use std::fs;
use super::*;
use crate::Node;
use crate::{GENESIS_JSON, Node};
fn test_config() -> ExecutionContext {
let mut context = ExecutionContext::default();
context.kitchensink_configuration.use_kitchensink = true;
context
fn test_config() -> Arguments {
Arguments {
kitchensink: PathBuf::from("substrate-node"),
eth_proxy: PathBuf::from("eth-rpc"),
use_kitchensink_not_dev_node: true,
..Default::default()
}
}
fn new_node() -> (ExecutionContext, KitchensinkNode) {
fn new_node() -> (KitchensinkNode, Arguments) {
// Note: When we run the tests in the CI we found that if they're all
// run in parallel then the CI is unable to start all of the nodes in
// time and their start up times-out. Therefore, we want all of the
@@ -1162,36 +1158,32 @@ mod tests {
static NODE_START_MUTEX: Mutex<()> = Mutex::new(());
let _guard = NODE_START_MUTEX.lock().unwrap();
let context = test_config();
let mut node = KitchensinkNode::new(&context);
node.init(context.genesis_configuration.genesis().unwrap().clone())
let args = test_config();
let mut node = KitchensinkNode::new(&args);
node.init(GENESIS_JSON)
.expect("Failed to initialize the node")
.spawn_process()
.expect("Failed to spawn the node process");
(context, node)
(node, args)
}
/// A shared node that multiple tests can use. It starts up once.
fn shared_node() -> &'static KitchensinkNode {
static NODE: LazyLock<(ExecutionContext, KitchensinkNode)> = LazyLock::new(|| {
let (context, node) = new_node();
(context, node)
static NODE: LazyLock<(KitchensinkNode, Arguments)> = LazyLock::new(|| {
let (node, args) = new_node();
(node, args)
});
&NODE.1
&NODE.0
}
#[tokio::test]
async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
// Arrange
let (context, node) = new_node();
let (node, args) = new_node();
let provider = node.provider().await.expect("Failed to create provider");
let account_address = context
.wallet_configuration
.wallet()
.default_signer()
.address();
let account_address = args.wallet().default_signer().address();
let transaction = TransactionRequest::default()
.to(account_address)
.value(U256::from(100_000_000_000_000u128));
@@ -1225,9 +1217,7 @@ mod tests {
let mut dummy_node = KitchensinkNode::new(&test_config());
// Call `init()`
dummy_node
.init(serde_json::from_str(genesis_content).unwrap())
.expect("init failed");
dummy_node.init(genesis_content).expect("init failed");
// Check that the patched chainspec file was generated
let final_chainspec_path = dummy_node
@@ -1338,9 +1328,19 @@ mod tests {
}
#[test]
fn version_works() {
let node = shared_node();
fn spawn_works() {
let config = test_config();
let mut node = KitchensinkNode::new(&config);
node.spawn(GENESIS_JSON.to_string()).unwrap();
}
#[test]
fn version_works() {
let config = test_config();
let node = KitchensinkNode::new(&config);
let version = node.version().unwrap();
assert!(
@@ -1351,8 +1351,9 @@ mod tests {
#[test]
fn eth_rpc_version_works() {
let node = shared_node();
let config = test_config();
let node = KitchensinkNode::new(&config);
let version = node.eth_rpc_version().unwrap();
assert!(
+6 -14
View File
@@ -1,8 +1,7 @@
//! This crate implements the testing nodes.
use alloy::genesis::Genesis;
use revive_common::EVMVersion;
use revive_dt_config::*;
use revive_dt_config::Arguments;
use revive_dt_node_interaction::EthereumNode;
pub mod common;
@@ -11,20 +10,13 @@ pub mod geth;
pub mod kitchensink;
pub mod pool;
/// The default genesis configuration.
pub const GENESIS_JSON: &str = include_str!("../../../genesis.json");
/// An abstract interface for testing nodes.
pub trait Node: EthereumNode {
/// Create a new uninitialized instance.
fn new(
context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<ConcurrencyConfiguration>
+ AsRef<GenesisConfiguration>
+ AsRef<WalletConfiguration>
+ AsRef<GethConfiguration>
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Clone,
) -> Self;
fn new(config: &Arguments) -> Self;
/// Returns the identifier of the node.
fn id(&self) -> usize;
@@ -32,7 +24,7 @@ pub trait Node: EthereumNode {
/// Spawns a node configured according to the genesis json.
///
/// Blocking until it's ready to accept transactions.
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()>;
fn spawn(&mut self, genesis: String) -> anyhow::Result<()>;
/// Prune the node instance and related data.
///
+14 -42
View File
@@ -5,13 +5,10 @@ use std::{
thread,
};
use alloy::genesis::Genesis;
use anyhow::Context as _;
use revive_dt_config::{
ConcurrencyConfiguration, EthRpcConfiguration, GenesisConfiguration, GethConfiguration,
KitchensinkConfiguration, ReviveDevNodeConfiguration, WalletConfiguration,
WorkingDirectoryConfiguration,
};
use revive_dt_common::cached_fs::read_to_string;
use anyhow::Context;
use revive_dt_config::Arguments;
use tracing::info;
use crate::Node;
@@ -28,31 +25,18 @@ where
T: Node + Send + 'static,
{
/// Create a new Pool. This will start as many nodes as there are workers in `config`.
pub fn new(
context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<ConcurrencyConfiguration>
+ AsRef<GenesisConfiguration>
+ AsRef<WalletConfiguration>
+ AsRef<GethConfiguration>
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Send
+ Sync
+ Clone
+ 'static,
) -> anyhow::Result<Self> {
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let nodes = concurrency_configuration.number_of_nodes;
let genesis = genesis_configuration.genesis()?;
pub fn new(config: &Arguments) -> anyhow::Result<Self> {
let nodes = config.number_of_nodes;
let genesis = read_to_string(&config.genesis_file).context(format!(
"can not read genesis file: {}",
config.genesis_file.display()
))?;
let mut handles = Vec::with_capacity(nodes);
for _ in 0..nodes {
let context = context.clone();
let config = config.clone();
let genesis = genesis.clone();
handles.push(thread::spawn(move || spawn_node::<T>(context, genesis)));
handles.push(thread::spawn(move || spawn_node::<T>(&config, genesis)));
}
let mut nodes = Vec::with_capacity(nodes);
@@ -80,20 +64,8 @@ where
}
}
fn spawn_node<T: Node + Send>(
context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<ConcurrencyConfiguration>
+ AsRef<GenesisConfiguration>
+ AsRef<WalletConfiguration>
+ AsRef<GethConfiguration>
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Clone
+ 'static,
genesis: Genesis,
) -> anyhow::Result<T> {
let mut node = T::new(context);
fn spawn_node<T: Node + Send>(args: &Arguments, genesis: String) -> anyhow::Result<T> {
let mut node = T::new(args);
info!(
id = node.id(),
connection_string = node.connection_string(),
-1
View File
@@ -17,7 +17,6 @@ alloy-primitives = { workspace = true }
anyhow = { workspace = true }
paste = { workspace = true }
indexmap = { workspace = true, features = ["serde"] }
semver = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_with = { workspace = true }
+50 -61
View File
@@ -4,17 +4,15 @@
use std::{
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
fs::OpenOptions,
path::PathBuf,
time::{SystemTime, UNIX_EPOCH},
};
use alloy_primitives::Address;
use anyhow::{Context as _, Result};
use indexmap::IndexMap;
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
use revive_dt_config::{Context, TestingPlatform};
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode, SolcCompiler};
use revive_dt_config::{Arguments, TestingPlatform};
use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance};
use semver::Version;
use serde::Serialize;
use serde_with::{DisplayFromStr, serde_as};
use tokio::sync::{
@@ -36,11 +34,11 @@ pub struct ReportAggregator {
}
impl ReportAggregator {
pub fn new(context: Context) -> Self {
pub fn new(config: Arguments) -> Self {
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
let (listener_tx, _) = channel::<ReporterEvent>(1024);
Self {
report: Report::new(context),
report: Report::new(config),
remaining_cases: Default::default(),
runner_tx: Some(runner_tx),
runner_rx,
@@ -121,12 +119,7 @@ impl ReportAggregator {
file_name.push_str(".json");
file_name
};
let file_path = self
.report
.context
.working_directory_configuration()
.as_path()
.join(file_name);
let file_path = self.report.config.directory().join(file_name);
let file = OpenOptions::new()
.create(true)
.write(true)
@@ -287,16 +280,8 @@ impl ReportAggregator {
&mut self,
event: PreLinkContractsCompilationSucceededEvent,
) {
let include_input = self
.report
.context
.report_configuration()
.include_compiler_input;
let include_output = self
.report
.context
.report_configuration()
.include_compiler_output;
let include_input = self.report.config.report_include_compiler_input;
let include_output = self.report.config.report_include_compiler_output;
let execution_information = self.execution_information(&event.execution_specifier);
@@ -313,8 +298,7 @@ impl ReportAggregator {
execution_information.pre_link_compilation_status = Some(CompilationStatus::Success {
is_cached: event.is_cached,
compiler_version: event.compiler_version,
compiler_path: event.compiler_path,
solc_info: event.solc_info,
compiler_input,
compiler_output,
});
@@ -324,16 +308,8 @@ impl ReportAggregator {
&mut self,
event: PostLinkContractsCompilationSucceededEvent,
) {
let include_input = self
.report
.context
.report_configuration()
.include_compiler_input;
let include_output = self
.report
.context
.report_configuration()
.include_compiler_output;
let include_input = self.report.config.report_include_compiler_input;
let include_output = self.report.config.report_include_compiler_output;
let execution_information = self.execution_information(&event.execution_specifier);
@@ -350,8 +326,7 @@ impl ReportAggregator {
execution_information.post_link_compilation_status = Some(CompilationStatus::Success {
is_cached: event.is_cached,
compiler_version: event.compiler_version,
compiler_path: event.compiler_path,
solc_info: event.solc_info,
compiler_input,
compiler_output,
});
@@ -361,13 +336,20 @@ impl ReportAggregator {
&mut self,
event: PreLinkContractsCompilationFailedEvent,
) {
let include_input = self.report.config.report_include_compiler_input;
let execution_information = self.execution_information(&event.execution_specifier);
let compiler_input = if include_input {
event.compiler_input
} else {
None
};
execution_information.pre_link_compilation_status = Some(CompilationStatus::Failure {
reason: event.reason,
compiler_version: event.compiler_version,
compiler_path: event.compiler_path,
compiler_input: event.compiler_input,
solc_info: event.solc_info,
compiler_input,
});
}
@@ -375,13 +357,20 @@ impl ReportAggregator {
&mut self,
event: PostLinkContractsCompilationFailedEvent,
) {
let include_input = self.report.config.report_include_compiler_input;
let execution_information = self.execution_information(&event.execution_specifier);
let compiler_input = if include_input {
event.compiler_input
} else {
None
};
execution_information.post_link_compilation_status = Some(CompilationStatus::Failure {
reason: event.reason,
compiler_version: event.compiler_version,
compiler_path: event.compiler_path,
compiler_input: event.compiler_input,
solc_info: event.solc_info,
compiler_input,
});
}
@@ -427,8 +416,12 @@ impl ReportAggregator {
#[serde_as]
#[derive(Clone, Debug, Serialize)]
pub struct Report {
/// The context that the tool was started up with.
pub context: Context,
/// The configuration that the tool was started up with.
pub config: Arguments,
/// The platform of the leader chain.
pub leader_platform: TestingPlatform,
/// The platform of the follower chain.
pub follower_platform: TestingPlatform,
/// The list of corpus files that the tool found.
pub corpora: Vec<Corpus>,
/// The list of metadata files that were found by the tool.
@@ -440,9 +433,11 @@ pub struct Report {
}
impl Report {
pub fn new(context: Context) -> Self {
pub fn new(config: Arguments) -> Self {
Self {
context,
leader_platform: config.leader,
follower_platform: config.follower,
config,
corpora: Default::default(),
metadata_files: Default::default(),
test_case_information: Default::default(),
@@ -527,17 +522,15 @@ pub enum CompilationStatus {
Success {
/// A flag with information on whether the compilation artifacts were cached or not.
is_cached: bool,
/// The version of the compiler used to compile the contracts.
compiler_version: Version,
/// The path of the compiler used to compile the contracts.
compiler_path: PathBuf,
/// The version and path of the solc compiler used to compile the contracts.
solc_info: SolcCompiler,
/// The input provided to the compiler to compile the contracts. This is only included if
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
/// the compiler was invoked.
/// the appropriate flag is set in the CLI configuration and if the contracts were not
/// cached and the compiler was invoked.
#[serde(skip_serializing_if = "Option::is_none")]
compiler_input: Option<CompilerInput>,
/// The output of the compiler. This is only included if the appropriate flag is set in the
/// CLI contexts.
/// CLI configurations.
#[serde(skip_serializing_if = "Option::is_none")]
compiler_output: Option<CompilerOutput>,
},
@@ -545,15 +538,11 @@ pub enum CompilationStatus {
Failure {
/// The failure reason.
reason: String,
/// The version of the compiler used to compile the contracts.
#[serde(skip_serializing_if = "Option::is_none")]
compiler_version: Option<Version>,
/// The path of the compiler used to compile the contracts.
#[serde(skip_serializing_if = "Option::is_none")]
compiler_path: Option<PathBuf>,
/// The version and path of the solc compiler used to compile the contracts.
solc_info: SolcCompiler,
/// The input provided to the compiler to compile the contracts. This is only included if
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
/// the compiler was invoked.
/// the appropriate flag is set in the CLI configuration and if the contracts were not
/// cached and the compiler was invoked.
#[serde(skip_serializing_if = "Option::is_none")]
compiler_input: Option<CompilerInput>,
},
+10 -19
View File
@@ -1,16 +1,15 @@
//! The types associated with the events sent by the runner to the reporter.
#![allow(dead_code)]
use std::{collections::BTreeMap, path::PathBuf, sync::Arc};
use std::{collections::BTreeMap, sync::Arc};
use alloy_primitives::Address;
use anyhow::Context as _;
use indexmap::IndexMap;
use revive_dt_compiler::{CompilerInput, CompilerOutput};
use revive_dt_compiler::{CompilerInput, CompilerOutput, SolcCompiler};
use revive_dt_config::TestingPlatform;
use revive_dt_format::metadata::Metadata;
use revive_dt_format::{corpus::Corpus, metadata::ContractInstance};
use semver::Version;
use tokio::sync::{broadcast, oneshot};
use crate::{ExecutionSpecifier, ReporterEvent, TestSpecifier, common::MetadataFilePath};
@@ -547,13 +546,11 @@ define_event! {
PreLinkContractsCompilationSucceeded {
/// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>,
/// The version of the compiler used to compile the contracts.
compiler_version: Version,
/// The path of the compiler used to compile the contracts.
compiler_path: PathBuf,
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
/// anew.
is_cached: bool,
/// The version and path of the solc compiler used to compile the contracts.
solc_info: SolcCompiler,
/// The input provided to the compiler - this is optional and not provided if the
/// contracts were obtained from the cache.
compiler_input: Option<CompilerInput>,
@@ -565,13 +562,11 @@ define_event! {
PostLinkContractsCompilationSucceeded {
/// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>,
/// The version of the compiler used to compile the contracts.
compiler_version: Version,
/// The path of the compiler used to compile the contracts.
compiler_path: PathBuf,
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
/// anew.
is_cached: bool,
/// The version and path of the solc compiler used to compile the contracts.
solc_info: SolcCompiler,
/// The input provided to the compiler - this is optional and not provided if the
/// contracts were obtained from the cache.
compiler_input: Option<CompilerInput>,
@@ -583,10 +578,8 @@ define_event! {
PreLinkContractsCompilationFailed {
/// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>,
/// The version of the compiler used to compile the contracts.
compiler_version: Option<Version>,
/// The path of the compiler used to compile the contracts.
compiler_path: Option<PathBuf>,
/// The version and path of the solc compiler used to compile the contracts.
solc_info: SolcCompiler,
/// The input provided to the compiler - this is optional and not provided if the
/// contracts were obtained from the cache.
compiler_input: Option<CompilerInput>,
@@ -598,10 +591,8 @@ define_event! {
PostLinkContractsCompilationFailed {
/// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>,
/// The version of the compiler used to compile the contracts.
compiler_version: Option<Version>,
/// The path of the compiler used to compile the contracts.
compiler_path: Option<PathBuf>,
/// The version and path of the solc compiler used to compile the contracts.
solc_info: SolcCompiler,
/// The input provided to the compiler - this is optional and not provided if the
/// contracts were obtained from the cache.
compiler_input: Option<CompilerInput>,
+4 -5
View File
@@ -9,11 +9,10 @@ use std::{
sync::LazyLock,
};
use semver::Version;
use tokio::sync::Mutex;
use crate::download::SolcDownloader;
use anyhow::Context as _;
use anyhow::Context;
pub const SOLC_CACHE_DIRECTORY: &str = "solc";
pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default);
@@ -21,7 +20,7 @@ pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new
pub(crate) async fn get_or_download(
working_directory: &Path,
downloader: &SolcDownloader,
) -> anyhow::Result<(Version, PathBuf)> {
) -> anyhow::Result<PathBuf> {
let target_directory = working_directory
.join(SOLC_CACHE_DIRECTORY)
.join(downloader.version.to_string());
@@ -30,7 +29,7 @@ pub(crate) async fn get_or_download(
let mut cache = SOLC_CACHER.lock().await;
if cache.contains(&target_file) {
tracing::debug!("using cached solc: {}", target_file.display());
return Ok((downloader.version.clone(), target_file));
return Ok(target_file);
}
create_dir_all(&target_directory).with_context(|| {
@@ -49,7 +48,7 @@ pub(crate) async fn get_or_download(
})?;
cache.insert(target_file.clone());
Ok((downloader.version.clone(), target_file))
Ok(target_file)
}
async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::Result<()> {
+2 -2
View File
@@ -11,7 +11,7 @@ use semver::Version;
use sha2::{Digest, Sha256};
use crate::list::List;
use anyhow::Context as _;
use anyhow::Context;
pub static LIST_CACHE: LazyLock<Mutex<HashMap<&'static str, List>>> =
LazyLock::new(Default::default);
@@ -82,7 +82,7 @@ impl SolcDownloader {
.filter(|version| requirement.matches(version))
.max()
else {
anyhow::bail!("Failed to find a version that satisfies {requirement:?}");
anyhow::bail!("Failed to find a version that satisfies {requirement}");
};
Ok(Self {
version,
+18 -14
View File
@@ -3,30 +3,23 @@
//!
//! [0]: https://binaries.soliditylang.org
use std::path::{Path, PathBuf};
use anyhow::Context as _;
use cache::get_or_download;
use download::SolcDownloader;
use std::path::{Path, PathBuf};
use revive_dt_common::types::VersionOrRequirement;
use semver::Version;
pub mod cache;
pub mod download;
pub mod list;
/// Downloads the solc binary for Wasm is `wasm` is set, otherwise for
/// the target platform.
///
/// Subsequent calls for the same version will use a cached artifact
/// and not download it again.
pub async fn download_solc(
cache_directory: &Path,
/// Return a [`SolcDownloader`] which can be used to download a `solc`
/// binary or return the resolved version it will download.
async fn downloader(
version: impl Into<VersionOrRequirement>,
wasm: bool,
) -> anyhow::Result<(Version, PathBuf)> {
let downloader = if wasm {
) -> anyhow::Result<SolcDownloader> {
if wasm {
SolcDownloader::wasm(version).await
} else if cfg!(target_os = "linux") {
SolcDownloader::linux(version).await
@@ -37,7 +30,18 @@ pub async fn download_solc(
} else {
unimplemented!()
}
.context("Failed to initialize the Solc Downloader")?;
}
/// Downloads the solc binary for Wasm is `wasm` is set, otherwise for
/// the target platform.
///
/// Subsequent calls for the same version will use a cached artifact
/// and not download it again.
pub async fn download_solc(
cache_directory: &Path,
version: impl Into<VersionOrRequirement>,
wasm: bool,
) -> anyhow::Result<PathBuf> {
let downloader = downloader(version, wasm).await?;
get_or_download(cache_directory, &downloader).await
}
+6 -6
View File
@@ -89,13 +89,13 @@ echo "This may take a while..."
echo ""
# Run the tool
RUST_LOG="error" cargo run --release -- execute-tests \
RUST_LOG="error" cargo run --release -- \
--corpus "$CORPUS_FILE" \
--working-directory "$WORKDIR" \
--concurrency.number-of-nodes 5 \
--kitchensink.path "$SUBSTRATE_NODE_BIN" \
--revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
--eth-rpc.path "$ETH_RPC_BIN" \
--workdir "$WORKDIR" \
--number-of-nodes 5 \
--kitchensink "$SUBSTRATE_NODE_BIN" \
--revive-dev-node "$REVIVE_DEV_NODE_BIN" \
--eth_proxy "$ETH_RPC_BIN" \
> logs.log \
2> output.log