Compare commits

..

1 Commits

Author SHA1 Message Date
Omar Abdulla 7802a56d47 Better errors in report 2025-08-26 18:29:28 +03:00
20 changed files with 957 additions and 609 deletions
Generated
+2
View File
@@ -4518,6 +4518,7 @@ dependencies = [
"clap", "clap",
"futures", "futures",
"indexmap 2.10.0", "indexmap 2.10.0",
"once_cell",
"revive-dt-common", "revive-dt-common",
"revive-dt-compiler", "revive-dt-compiler",
"revive-dt-config", "revive-dt-config",
@@ -4529,6 +4530,7 @@ dependencies = [
"serde", "serde",
"serde_json", "serde_json",
"temp-dir", "temp-dir",
"tempfile",
"tokio", "tokio",
"tracing", "tracing",
"tracing-appender", "tracing-appender",
@@ -1,21 +0,0 @@
/// An iterator that could be either of two iterators.
#[derive(Clone, Debug)]
pub enum EitherIter<A, B> {
A(A),
B(B),
}
impl<A, B, T> Iterator for EitherIter<A, B>
where
A: Iterator<Item = T>,
B: Iterator<Item = T>,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
match self {
EitherIter::A(iter) => iter.next(),
EitherIter::B(iter) => iter.next(),
}
}
}
-2
View File
@@ -1,5 +1,3 @@
mod either_iter;
mod files_with_extension_iterator; mod files_with_extension_iterator;
pub use either_iter::*;
pub use files_with_extension_iterator::*; pub use files_with_extension_iterator::*;
+8 -14
View File
@@ -3,7 +3,6 @@ use semver::Version;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::fmt::Display; use std::fmt::Display;
use std::str::FromStr; use std::str::FromStr;
use std::sync::LazyLock;
/// This represents a mode that a given test should be run with, if possible. /// This represents a mode that a given test should be run with, if possible.
/// ///
@@ -35,19 +34,14 @@ impl Display for Mode {
impl Mode { impl Mode {
/// Return all of the available mode combinations. /// Return all of the available mode combinations.
pub fn all() -> impl Iterator<Item = &'static Mode> { pub fn all() -> impl Iterator<Item = Mode> {
static ALL_MODES: LazyLock<Vec<Mode>> = LazyLock::new(|| { ModePipeline::test_cases().flat_map(|pipeline| {
ModePipeline::test_cases() ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode {
.flat_map(|pipeline| { pipeline,
ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode { optimize_setting,
pipeline, version: None,
optimize_setting, })
version: None, })
})
})
.collect::<Vec<_>>()
});
ALL_MODES.iter()
} }
/// Resolves the [`Mode`]'s solidity version requirement into a [`VersionOrRequirement`] if /// Resolves the [`Mode`]'s solidity version requirement into a [`VersionOrRequirement`] if
+4
View File
@@ -0,0 +1,4 @@
use semver::Version;
/// This is the first version of solc that supports the `--via-ir` flag / "viaIR" input JSON.
pub const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13);
+53 -31
View File
@@ -3,6 +3,8 @@
//! - Polkadot revive resolc compiler //! - Polkadot revive resolc compiler
//! - Polkadot revive Wasm compiler //! - Polkadot revive Wasm compiler
mod constants;
use std::{ use std::{
collections::HashMap, collections::HashMap,
hash::Hash, hash::Hash,
@@ -11,7 +13,7 @@ use std::{
use alloy::json_abi::JsonAbi; use alloy::json_abi::JsonAbi;
use alloy_primitives::Address; use alloy_primitives::Address;
use anyhow::{Context, Result}; use anyhow::Context;
use semver::Version; use semver::Version;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -28,36 +30,36 @@ pub mod revive_resolc;
pub mod solc; pub mod solc;
/// A common interface for all supported Solidity compilers. /// A common interface for all supported Solidity compilers.
pub trait SolidityCompiler: Sized { pub trait SolidityCompiler {
/// Instantiates a new compiler object. /// Extra options specific to the compiler.
/// type Options: Default + PartialEq + Eq + Hash;
/// Based on the given [`Arguments`] and [`VersionOrRequirement`] this function instantiates a
/// new compiler object. Certain implementations of this trait might choose to cache cache the
/// compiler objects and return the same ones over and over again.
fn new(
config: &Arguments,
version: impl Into<Option<VersionOrRequirement>>,
) -> impl Future<Output = Result<Self>>;
/// Returns the version of the compiler.
fn version(&self) -> &Version;
/// Returns the path of the compiler executable.
fn path(&self) -> &Path;
/// The low-level compiler interface. /// The low-level compiler interface.
fn build(&self, input: CompilerInput) -> impl Future<Output = Result<CompilerOutput>>; fn build(
/// Does the compiler support the provided mode and version settings.
fn supports_mode(
&self, &self,
optimizer_setting: ModeOptimizerSetting, input: CompilerInput,
additional_options: Self::Options,
) -> impl Future<Output = anyhow::Result<CompilerOutput>>;
fn new(solc_executable: PathBuf) -> Self;
fn get_compiler_executable(
config: &Arguments,
version: impl Into<VersionOrRequirement>,
) -> impl Future<Output = anyhow::Result<PathBuf>>;
fn version(&self) -> impl Future<Output = anyhow::Result<Version>>;
/// Does the compiler support the provided mode and version settings?
fn supports_mode(
compiler_version: &Version,
optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline, pipeline: ModePipeline,
) -> bool; ) -> bool;
} }
/// The generic compilation input configuration. /// The generic compilation input configuration.
#[derive(Clone, Debug, Default, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CompilerInput { pub struct CompilerInput {
pub pipeline: Option<ModePipeline>, pub pipeline: Option<ModePipeline>,
pub optimization: Option<ModeOptimizerSetting>, pub optimization: Option<ModeOptimizerSetting>,
@@ -78,12 +80,21 @@ pub struct CompilerOutput {
} }
/// A generic builder style interface for configuring the supported compiler options. /// A generic builder style interface for configuring the supported compiler options.
#[derive(Default)] pub struct Compiler<T: SolidityCompiler> {
pub struct Compiler {
input: CompilerInput, input: CompilerInput,
additional_options: T::Options,
} }
impl Compiler { impl Default for Compiler<solc::Solc> {
fn default() -> Self {
Self::new()
}
}
impl<T> Compiler<T>
where
T: SolidityCompiler,
{
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
input: CompilerInput { input: CompilerInput {
@@ -96,6 +107,7 @@ impl Compiler {
libraries: Default::default(), libraries: Default::default(),
revert_string_handling: Default::default(), revert_string_handling: Default::default(),
}, },
additional_options: T::Options::default(),
} }
} }
@@ -124,7 +136,7 @@ impl Compiler {
self self
} }
pub fn with_source(mut self, path: impl AsRef<Path>) -> Result<Self> { pub fn with_source(mut self, path: impl AsRef<Path>) -> anyhow::Result<Self> {
self.input.sources.insert( self.input.sources.insert(
path.as_ref().to_path_buf(), path.as_ref().to_path_buf(),
read_to_string(path.as_ref()).context("Failed to read the contract source")?, read_to_string(path.as_ref()).context("Failed to read the contract source")?,
@@ -154,6 +166,11 @@ impl Compiler {
self self
} }
pub fn with_additional_options(mut self, options: impl Into<T::Options>) -> Self {
self.additional_options = options.into();
self
}
pub fn then(self, callback: impl FnOnce(Self) -> Self) -> Self { pub fn then(self, callback: impl FnOnce(Self) -> Self) -> Self {
callback(self) callback(self)
} }
@@ -162,12 +179,17 @@ impl Compiler {
callback(self) callback(self)
} }
pub async fn try_build(self, compiler: &impl SolidityCompiler) -> Result<CompilerOutput> { pub async fn try_build(
compiler.build(self.input).await self,
compiler_path: impl AsRef<Path>,
) -> anyhow::Result<CompilerOutput> {
T::new(compiler_path.as_ref().to_path_buf())
.build(self.input, self.additional_options)
.await
} }
pub fn input(&self) -> &CompilerInput { pub fn input(&self) -> CompilerInput {
&self.input self.input.clone()
} }
} }
+115 -52
View File
@@ -3,8 +3,8 @@
use std::{ use std::{
path::PathBuf, path::PathBuf,
process::Stdio, process::{Command, Stdio},
sync::{Arc, LazyLock}, sync::LazyLock,
}; };
use dashmap::DashMap; use dashmap::DashMap;
@@ -16,61 +16,26 @@ use revive_solc_json_interface::{
SolcStandardJsonOutput, SolcStandardJsonOutput,
}; };
use crate::{ use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler, solc::Solc,
};
use alloy::json_abi::JsonAbi; use alloy::json_abi::JsonAbi;
use anyhow::{Context, Result}; use anyhow::Context;
use semver::Version; use semver::Version;
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand}; use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
/// A wrapper around the `resolc` binary, emitting PVM-compatible bytecode. // TODO: I believe that we need to also pass the solc compiler to resolc so that resolc uses the
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] // specified solc compiler. I believe that currently we completely ignore the specified solc binary
pub struct Resolc(Arc<ResolcInner>); // when invoking resolc which doesn't seem right if we're using solc as a compiler frontend.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] /// A wrapper around the `resolc` binary, emitting PVM-compatible bytecode.
struct ResolcInner { #[derive(Debug)]
/// The internal solc compiler that the resolc compiler uses as a compiler frontend. pub struct Resolc {
solc: Solc,
/// Path to the `resolc` executable /// Path to the `resolc` executable
resolc_path: PathBuf, resolc_path: PathBuf,
} }
impl SolidityCompiler for Resolc { impl SolidityCompiler for Resolc {
async fn new( type Options = Vec<String>;
config: &Arguments,
version: impl Into<Option<VersionOrRequirement>>,
) -> Result<Self> {
/// This is a cache of all of the resolc compiler objects. Since we do not currently support
/// multiple resolc compiler versions, so our cache is just keyed by the solc compiler and
/// its version to the resolc compiler.
static COMPILERS_CACHE: LazyLock<DashMap<Solc, Resolc>> = LazyLock::new(Default::default);
let solc = Solc::new(config, version)
.await
.context("Failed to create the solc compiler frontend for resolc")?;
Ok(COMPILERS_CACHE
.entry(solc.clone())
.or_insert_with(|| {
Self(Arc::new(ResolcInner {
solc,
resolc_path: config.resolc.clone(),
}))
})
.clone())
}
fn version(&self) -> &Version {
// We currently return the solc compiler version since we do not support multiple resolc
// compiler versions.
self.0.solc.version()
}
fn path(&self) -> &std::path::Path {
&self.0.resolc_path
}
#[tracing::instrument(level = "debug", ret)] #[tracing::instrument(level = "debug", ret)]
async fn build( async fn build(
@@ -87,7 +52,8 @@ impl SolidityCompiler for Resolc {
// resolc. So, we need to go back to this later once it's supported. // resolc. So, we need to go back to this later once it's supported.
revert_string_handling: _, revert_string_handling: _,
}: CompilerInput, }: CompilerInput,
) -> Result<CompilerOutput> { additional_options: Self::Options,
) -> anyhow::Result<CompilerOutput> {
if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) { if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) {
anyhow::bail!( anyhow::bail!(
"Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}" "Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}"
@@ -134,7 +100,7 @@ impl SolidityCompiler for Resolc {
}, },
}; };
let mut command = AsyncCommand::new(self.path()); let mut command = AsyncCommand::new(&self.resolc_path);
command command
.stdin(Stdio::piped()) .stdin(Stdio::piped())
.stdout(Stdio::piped()) .stdout(Stdio::piped())
@@ -155,7 +121,7 @@ impl SolidityCompiler for Resolc {
} }
let mut child = command let mut child = command
.spawn() .spawn()
.with_context(|| format!("Failed to spawn resolc at {}", self.path().display()))?; .with_context(|| format!("Failed to spawn resolc at {}", self.resolc_path.display()))?;
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped"); let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
let serialized_input = serde_json::to_vec(&input) let serialized_input = serde_json::to_vec(&input)
@@ -272,11 +238,108 @@ impl SolidityCompiler for Resolc {
Ok(compiler_output) Ok(compiler_output)
} }
fn new(resolc_path: PathBuf) -> Self {
Resolc { resolc_path }
}
async fn get_compiler_executable(
config: &Arguments,
_version: impl Into<VersionOrRequirement>,
) -> anyhow::Result<PathBuf> {
if !config.resolc.as_os_str().is_empty() {
return Ok(config.resolc.clone());
}
Ok(PathBuf::from("resolc"))
}
async fn version(&self) -> anyhow::Result<semver::Version> {
/// This is a cache of the path of the compiler to the version number of the compiler. We
/// choose to cache the version in this way rather than through a field on the struct since
/// compiler objects are being created all the time from the path and the compiler object is
/// not reused over time.
static VERSION_CACHE: LazyLock<DashMap<PathBuf, Version>> = LazyLock::new(Default::default);
match VERSION_CACHE.entry(self.resolc_path.clone()) {
dashmap::Entry::Occupied(occupied_entry) => Ok(occupied_entry.get().clone()),
dashmap::Entry::Vacant(vacant_entry) => {
let output = Command::new(self.resolc_path.as_path())
.arg("--version")
.stdout(Stdio::piped())
.spawn()
.with_context(|| {
format!(
"Failed to spawn resolc at {} to get version",
self.resolc_path.display()
)
})?
.wait_with_output()
.with_context(|| {
format!(
"Failed waiting for resolc at {} to finish --version",
self.resolc_path.display()
)
})?
.stdout;
let output = String::from_utf8_lossy(&output);
let version_string = output
.split("version ")
.nth(1)
.context("Version parsing failed")?
.split("+")
.next()
.context("Version parsing failed")?;
let version = Version::parse(version_string).with_context(|| {
format!("Failed to parse resolc semver from '{version_string}'")
})?;
vacant_entry.insert(version.clone());
Ok(version)
}
}
}
fn supports_mode( fn supports_mode(
&self, _compiler_version: &Version,
optimize_setting: ModeOptimizerSetting, _optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline, pipeline: ModePipeline,
) -> bool { ) -> bool {
pipeline == ModePipeline::ViaYulIR && self.0.solc.supports_mode(optimize_setting, pipeline) // We only support the Y (IE compile via Yul IR) mode here, which also means that we can
// only use solc version 0.8.13 and above. We must always compile via Yul IR as resolc
// needs this to translate to LLVM IR and then RISCV.
// Note: the original implementation of this function looked like the following:
// ```
// pipeline == ModePipeline::ViaYulIR && compiler_version >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR
// ```
// However, that implementation is sadly incorrect since the version that's passed into this
// function is not the version of solc but the version of resolc. This is despite the fact
// that resolc depends on Solc for the initial Yul codegen. Therefore, we have skipped the
// version check until we do a better integrations between resolc and solc.
pipeline == ModePipeline::ViaYulIR
}
}
#[cfg(test)]
mod test {
use super::*;
#[tokio::test]
async fn compiler_version_can_be_obtained() {
// Arrange
let args = Arguments::default();
let path = Resolc::get_compiler_executable(&args, Version::new(0, 7, 6))
.await
.unwrap();
let compiler = Resolc::new(path);
// Act
let version = compiler.version().await;
// Assert
let _ = version.expect("Failed to get version");
} }
} }
+128 -57
View File
@@ -3,8 +3,8 @@
use std::{ use std::{
path::PathBuf, path::PathBuf,
process::Stdio, process::{Command, Stdio},
sync::{Arc, LazyLock}, sync::LazyLock,
}; };
use dashmap::DashMap; use dashmap::DashMap;
@@ -12,9 +12,10 @@ use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::Arguments; use revive_dt_config::Arguments;
use revive_dt_solc_binaries::download_solc; use revive_dt_solc_binaries::download_solc;
use super::constants::SOLC_VERSION_SUPPORTING_VIA_YUL_IR;
use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler}; use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
use anyhow::{Context, Result}; use anyhow::Context;
use foundry_compilers_artifacts::{ use foundry_compilers_artifacts::{
output_selection::{ output_selection::{
BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection, BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection,
@@ -25,54 +26,13 @@ use foundry_compilers_artifacts::{
use semver::Version; use semver::Version;
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand}; use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Debug)]
pub struct Solc(Arc<SolcInner>); pub struct Solc {
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
struct SolcInner {
/// The path of the solidity compiler executable that this object uses.
solc_path: PathBuf, solc_path: PathBuf,
/// The version of the solidity compiler executable that this object uses.
solc_version: Version,
} }
impl SolidityCompiler for Solc { impl SolidityCompiler for Solc {
async fn new( type Options = ();
config: &Arguments,
version: impl Into<Option<VersionOrRequirement>>,
) -> Result<Self> {
// This is a cache for the compiler objects so that whenever the same compiler version is
// requested the same object is returned. We do this as we do not want to keep cloning the
// compiler around.
static COMPILERS_CACHE: LazyLock<DashMap<Version, Solc>> = LazyLock::new(Default::default);
// We attempt to download the solc binary. Note the following: this call does the version
// resolution for us. Therefore, even if the download didn't proceed, this function will
// resolve the version requirement into a canonical version of the compiler. It's then up
// to us to either use the provided path or not.
let version = version.into().unwrap_or_else(|| config.solc.clone().into());
let (version, path) = download_solc(config.directory(), version, false)
.await
.context("Failed to download/get path to solc binary")?;
Ok(COMPILERS_CACHE
.entry(version.clone())
.or_insert_with(|| {
Self(Arc::new(SolcInner {
solc_path: path,
solc_version: version,
}))
})
.clone())
}
fn version(&self) -> &Version {
&self.0.solc_version
}
fn path(&self) -> &std::path::Path {
&self.0.solc_path
}
#[tracing::instrument(level = "debug", ret)] #[tracing::instrument(level = "debug", ret)]
async fn build( async fn build(
@@ -87,12 +47,19 @@ impl SolidityCompiler for Solc {
libraries, libraries,
revert_string_handling, revert_string_handling,
}: CompilerInput, }: CompilerInput,
) -> Result<CompilerOutput> { _: Self::Options,
) -> anyhow::Result<CompilerOutput> {
let compiler_supports_via_ir = self
.version()
.await
.context("Failed to query solc version to determine via-ir support")?
>= SOLC_VERSION_SUPPORTING_VIA_YUL_IR;
// Be careful to entirely omit the viaIR field if the compiler does not support it, // Be careful to entirely omit the viaIR field if the compiler does not support it,
// as it will error if you provide fields it does not know about. Because // as it will error if you provide fields it does not know about. Because
// `supports_mode` is called prior to instantiating a compiler, we should never // `supports_mode` is called prior to instantiating a compiler, we should never
// ask for something which is invalid. // ask for something which is invalid.
let via_ir = match (pipeline, self.compiler_supports_yul()) { let via_ir = match (pipeline, compiler_supports_via_ir) {
(pipeline, true) => pipeline.map(|p| p.via_yul_ir()), (pipeline, true) => pipeline.map(|p| p.via_yul_ir()),
(_pipeline, false) => None, (_pipeline, false) => None,
}; };
@@ -152,7 +119,7 @@ impl SolidityCompiler for Solc {
}, },
}; };
let mut command = AsyncCommand::new(self.path()); let mut command = AsyncCommand::new(&self.solc_path);
command command
.stdin(Stdio::piped()) .stdin(Stdio::piped())
.stdout(Stdio::piped()) .stdout(Stdio::piped())
@@ -173,7 +140,7 @@ impl SolidityCompiler for Solc {
} }
let mut child = command let mut child = command
.spawn() .spawn()
.with_context(|| format!("Failed to spawn solc at {}", self.path().display()))?; .with_context(|| format!("Failed to spawn solc at {}", self.solc_path.display()))?;
let stdin = child.stdin.as_mut().expect("should be piped"); let stdin = child.stdin.as_mut().expect("should be piped");
let serialized_input = serde_json::to_vec(&input) let serialized_input = serde_json::to_vec(&input)
@@ -253,21 +220,125 @@ impl SolidityCompiler for Solc {
Ok(compiler_output) Ok(compiler_output)
} }
fn new(solc_path: PathBuf) -> Self {
Self { solc_path }
}
async fn get_compiler_executable(
config: &Arguments,
version: impl Into<VersionOrRequirement>,
) -> anyhow::Result<PathBuf> {
let path = download_solc(config.directory(), version, config.wasm)
.await
.context("Failed to download/get path to solc binary")?;
Ok(path)
}
async fn version(&self) -> anyhow::Result<semver::Version> {
/// This is a cache of the path of the compiler to the version number of the compiler. We
/// choose to cache the version in this way rather than through a field on the struct since
/// compiler objects are being created all the time from the path and the compiler object is
/// not reused over time.
static VERSION_CACHE: LazyLock<DashMap<PathBuf, Version>> = LazyLock::new(Default::default);
match VERSION_CACHE.entry(self.solc_path.clone()) {
dashmap::Entry::Occupied(occupied_entry) => Ok(occupied_entry.get().clone()),
dashmap::Entry::Vacant(vacant_entry) => {
// The following is the parsing code for the version from the solc version strings
// which look like the following:
// ```
// solc, the solidity compiler commandline interface
// Version: 0.8.30+commit.73712a01.Darwin.appleclang
// ```
let child = Command::new(self.solc_path.as_path())
.arg("--version")
.stdout(Stdio::piped())
.spawn()
.with_context(|| {
format!(
"Failed to spawn solc at {} to get version",
self.solc_path.display()
)
})?;
let output = child.wait_with_output().with_context(|| {
format!(
"Failed waiting for solc at {} to finish --version",
self.solc_path.display()
)
})?;
let output = String::from_utf8_lossy(&output.stdout);
let version_line = output
.split("Version: ")
.nth(1)
.context("Version parsing failed")?;
let version_string = version_line
.split("+")
.next()
.context("Version parsing failed")?;
let version = Version::parse(version_string).with_context(|| {
format!("Failed to parse solc semver from '{version_string}'")
})?;
vacant_entry.insert(version.clone());
Ok(version)
}
}
}
fn supports_mode( fn supports_mode(
&self, compiler_version: &Version,
_optimize_setting: ModeOptimizerSetting, _optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline, pipeline: ModePipeline,
) -> bool { ) -> bool {
// solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support mode E // solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support mode E
// (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler is new enough. // (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler is new enough.
pipeline == ModePipeline::ViaEVMAssembly pipeline == ModePipeline::ViaEVMAssembly
|| (pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul()) || (pipeline == ModePipeline::ViaYulIR
&& compiler_version >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR)
} }
} }
impl Solc { #[cfg(test)]
fn compiler_supports_yul(&self) -> bool { mod test {
const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13); use super::*;
self.version() >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR
#[tokio::test]
async fn compiler_version_can_be_obtained() {
// Arrange
let args = Arguments::default();
let path = Solc::get_compiler_executable(&args, Version::new(0, 7, 6))
.await
.unwrap();
let compiler = Solc::new(path);
// Act
let version = compiler.version().await;
// Assert
assert_eq!(
version.expect("Failed to get version"),
Version::new(0, 7, 6)
)
}
#[tokio::test]
async fn compiler_version_can_be_obtained1() {
// Arrange
let args = Arguments::default();
let path = Solc::get_compiler_executable(&args, Version::new(0, 4, 21))
.await
.unwrap();
let compiler = Solc::new(path);
// Act
let version = compiler.version().await;
// Assert
assert_eq!(
version.expect("Failed to get version"),
Version::new(0, 4, 21)
)
} }
} }
+6 -7
View File
@@ -1,6 +1,5 @@
use std::path::PathBuf; use std::path::PathBuf;
use revive_dt_common::types::VersionOrRequirement;
use revive_dt_compiler::{Compiler, SolidityCompiler, revive_resolc::Resolc, solc::Solc}; use revive_dt_compiler::{Compiler, SolidityCompiler, revive_resolc::Resolc, solc::Solc};
use revive_dt_config::Arguments; use revive_dt_config::Arguments;
use semver::Version; use semver::Version;
@@ -9,17 +8,17 @@ use semver::Version;
async fn contracts_can_be_compiled_with_solc() { async fn contracts_can_be_compiled_with_solc() {
// Arrange // Arrange
let args = Arguments::default(); let args = Arguments::default();
let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30))) let compiler_path = Solc::get_compiler_executable(&args, Version::new(0, 8, 30))
.await .await
.unwrap(); .unwrap();
// Act // Act
let output = Compiler::new() let output = Compiler::<Solc>::new()
.with_source("./tests/assets/array_one_element/callable.sol") .with_source("./tests/assets/array_one_element/callable.sol")
.unwrap() .unwrap()
.with_source("./tests/assets/array_one_element/main.sol") .with_source("./tests/assets/array_one_element/main.sol")
.unwrap() .unwrap()
.try_build(&solc) .try_build(compiler_path)
.await; .await;
// Assert // Assert
@@ -50,17 +49,17 @@ async fn contracts_can_be_compiled_with_solc() {
async fn contracts_can_be_compiled_with_resolc() { async fn contracts_can_be_compiled_with_resolc() {
// Arrange // Arrange
let args = Arguments::default(); let args = Arguments::default();
let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30))) let compiler_path = Resolc::get_compiler_executable(&args, Version::new(0, 8, 30))
.await .await
.unwrap(); .unwrap();
// Act // Act
let output = Compiler::new() let output = Compiler::<Resolc>::new()
.with_source("./tests/assets/array_one_element/callable.sol") .with_source("./tests/assets/array_one_element/callable.sol")
.unwrap() .unwrap()
.with_source("./tests/assets/array_one_element/main.sol") .with_source("./tests/assets/array_one_element/main.sol")
.unwrap() .unwrap()
.try_build(&resolc) .try_build(compiler_path)
.await; .await;
// Assert // Assert
+4
View File
@@ -84,6 +84,10 @@ pub struct Arguments {
#[arg(short, long = "follower", default_value = "kitchensink")] #[arg(short, long = "follower", default_value = "kitchensink")]
pub follower: TestingPlatform, pub follower: TestingPlatform,
/// Only compile against this testing platform (doesn't execute the tests).
#[arg(long = "compile-only")]
pub compile_only: Option<TestingPlatform>,
/// Determines the amount of nodes that will be spawned for each chain. /// Determines the amount of nodes that will be spawned for each chain.
#[arg(long, default_value = "1")] #[arg(long, default_value = "1")]
pub number_of_nodes: usize, pub number_of_nodes: usize,
+2
View File
@@ -28,6 +28,7 @@ cacache = { workspace = true }
clap = { workspace = true } clap = { workspace = true }
futures = { workspace = true } futures = { workspace = true }
indexmap = { workspace = true } indexmap = { workspace = true }
once_cell = { workspace = true }
tokio = { workspace = true } tokio = { workspace = true }
tracing = { workspace = true } tracing = { workspace = true }
tracing-appender = { workspace = true } tracing-appender = { workspace = true }
@@ -36,6 +37,7 @@ semver = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
serde_json = { workspace = true } serde_json = { workspace = true }
temp-dir = { workspace = true } temp-dir = { workspace = true }
tempfile = { workspace = true }
[lints] [lints]
workspace = true workspace = true
+128 -120
View File
@@ -2,7 +2,6 @@
//! be reused between runs. //! be reused between runs.
use std::{ use std::{
borrow::Cow,
collections::HashMap, collections::HashMap,
path::{Path, PathBuf}, path::{Path, PathBuf},
sync::Arc, sync::Arc,
@@ -10,13 +9,13 @@ use std::{
use futures::FutureExt; use futures::FutureExt;
use revive_dt_common::iterators::FilesWithExtensionIterator; use revive_dt_common::iterators::FilesWithExtensionIterator;
use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler}; use revive_dt_compiler::{Compiler, CompilerInput, CompilerOutput, Mode, SolidityCompiler};
use revive_dt_config::TestingPlatform; use revive_dt_config::Arguments;
use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata}; use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata};
use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address}; use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address};
use anyhow::{Context as _, Error, Result}; use anyhow::{Context as _, Error, Result};
use revive_dt_report::ExecutionSpecificReporter; use once_cell::sync::Lazy;
use semver::Version; use semver::Version;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tokio::sync::{Mutex, RwLock}; use tokio::sync::{Mutex, RwLock};
@@ -24,17 +23,9 @@ use tracing::{Instrument, debug, debug_span, instrument};
use crate::Platform; use crate::Platform;
pub struct CachedCompiler<'a> { pub struct CachedCompiler(ArtifactsCache);
/// The cache that stores the compiled contracts.
artifacts_cache: ArtifactsCache,
/// This is a mechanism that the cached compiler uses so that if multiple compilation requests impl CachedCompiler {
/// come in for the same contract we never compile all of them and only compile it once and all
/// other tasks that request this same compilation concurrently get the cached version.
cache_key_lock: RwLock<HashMap<CacheKey<'a>, Arc<Mutex<()>>>>,
}
impl<'a> CachedCompiler<'a> {
pub async fn new(path: impl AsRef<Path>, invalidate_cache: bool) -> Result<Self> { pub async fn new(path: impl AsRef<Path>, invalidate_cache: bool) -> Result<Self> {
let mut cache = ArtifactsCache::new(path); let mut cache = ArtifactsCache::new(path);
if invalidate_cache { if invalidate_cache {
@@ -43,10 +34,7 @@ impl<'a> CachedCompiler<'a> {
.await .await
.context("Failed to invalidate compilation cache directory")?; .context("Failed to invalidate compilation cache directory")?;
} }
Ok(Self { Ok(Self(cache))
artifacts_cache: cache,
cache_key_lock: Default::default(),
})
} }
/// Compiles or gets the compilation artifacts from the cache. /// Compiles or gets the compilation artifacts from the cache.
@@ -55,7 +43,7 @@ impl<'a> CachedCompiler<'a> {
level = "debug", level = "debug",
skip_all, skip_all,
fields( fields(
metadata_file_path = %metadata_file_path.display(), metadata_file_path = %metadata_file_path.as_ref().display(),
%mode, %mode,
platform = P::config_id().to_string() platform = P::config_id().to_string()
), ),
@@ -63,33 +51,76 @@ impl<'a> CachedCompiler<'a> {
)] )]
pub async fn compile_contracts<P: Platform>( pub async fn compile_contracts<P: Platform>(
&self, &self,
metadata: &'a Metadata, metadata: &Metadata,
metadata_file_path: &'a Path, metadata_file_path: impl AsRef<Path>,
mode: Cow<'a, Mode>, mode: &Mode,
config: &Arguments,
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>, deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
compiler: &P::Compiler, compilation_success_report_callback: impl Fn(
reporter: &ExecutionSpecificReporter, Version,
) -> Result<CompilerOutput> { PathBuf,
bool,
Option<CompilerInput>,
CompilerOutput,
) + Clone,
compilation_failure_report_callback: impl Fn(
Option<Version>,
Option<PathBuf>,
Option<CompilerInput>,
String,
),
) -> Result<(CompilerOutput, Version)> {
static CACHE_KEY_LOCK: Lazy<RwLock<HashMap<CacheKey, Arc<Mutex<()>>>>> =
Lazy::new(Default::default);
let compiler_version_or_requirement = mode.compiler_version_to_use(config.solc.clone());
let compiler_path = <P::Compiler as SolidityCompiler>::get_compiler_executable(
config,
compiler_version_or_requirement,
)
.await
.inspect_err(|err| {
compilation_failure_report_callback(None, None, None, format!("{err:#}"))
})
.context("Failed to obtain compiler executable path")?;
let compiler_version = <P::Compiler as SolidityCompiler>::new(compiler_path.clone())
.version()
.await
.inspect_err(|err| {
compilation_failure_report_callback(
None,
Some(compiler_path.clone()),
None,
format!("{err:#}"),
)
})
.context("Failed to query compiler version")?;
let cache_key = CacheKey { let cache_key = CacheKey {
platform_key: P::config_id(), platform_key: P::config_id().to_string(),
compiler_version: compiler.version().clone(), compiler_version: compiler_version.clone(),
metadata_file_path, metadata_file_path: metadata_file_path.as_ref().to_path_buf(),
solc_mode: mode.clone(), solc_mode: mode.clone(),
}; };
let compilation_callback = || { let compilation_callback = || {
let compiler_path = compiler_path.clone();
let compiler_version = compiler_version.clone();
let compilation_success_report_callback = compilation_success_report_callback.clone();
async move { async move {
compile_contracts::<P>( compile_contracts::<P>(
metadata metadata
.directory() .directory()
.context("Failed to get metadata directory while preparing compilation")?, .context("Failed to get metadata directory while preparing compilation")?,
compiler_path,
compiler_version,
metadata metadata
.files_to_compile() .files_to_compile()
.context("Failed to enumerate files to compile from metadata")?, .context("Failed to enumerate files to compile from metadata")?,
&mode, mode,
deployed_libraries, deployed_libraries,
compiler, compilation_success_report_callback,
reporter, compilation_failure_report_callback,
) )
.map(|compilation_result| compilation_result.map(CacheValue::new)) .map(|compilation_result| compilation_result.map(CacheValue::new))
.await .await
@@ -122,15 +153,12 @@ impl<'a> CachedCompiler<'a> {
// Lock this specific cache key such that we do not get inconsistent state. We want // Lock this specific cache key such that we do not get inconsistent state. We want
// that when multiple cases come in asking for the compilation artifacts then they // that when multiple cases come in asking for the compilation artifacts then they
// don't all trigger a compilation if there's a cache miss. Hence, the lock here. // don't all trigger a compilation if there's a cache miss. Hence, the lock here.
let read_guard = self.cache_key_lock.read().await; let read_guard = CACHE_KEY_LOCK.read().await;
let mutex = match read_guard.get(&cache_key).cloned() { let mutex = match read_guard.get(&cache_key).cloned() {
Some(value) => { Some(value) => value,
drop(read_guard);
value
}
None => { None => {
drop(read_guard); drop(read_guard);
self.cache_key_lock CACHE_KEY_LOCK
.write() .write()
.await .await
.entry(cache_key.clone()) .entry(cache_key.clone())
@@ -140,29 +168,15 @@ impl<'a> CachedCompiler<'a> {
}; };
let _guard = mutex.lock().await; let _guard = mutex.lock().await;
match self.artifacts_cache.get(&cache_key).await { match self.0.get(&cache_key).await {
Some(cache_value) => { Some(cache_value) => {
if deployed_libraries.is_some() { compilation_success_report_callback(
reporter compiler_version.clone(),
.report_post_link_contracts_compilation_succeeded_event( compiler_path,
compiler.version().clone(), true,
compiler.path(), None,
true, cache_value.compiler_output.clone(),
None, );
cache_value.compiler_output.clone(),
)
.expect("Can't happen");
} else {
reporter
.report_pre_link_contracts_compilation_succeeded_event(
compiler.version().clone(),
compiler.path(),
true,
None,
cache_value.compiler_output.clone(),
)
.expect("Can't happen");
}
cache_value.compiler_output cache_value.compiler_output
} }
None => { None => {
@@ -175,24 +189,38 @@ impl<'a> CachedCompiler<'a> {
} }
}; };
Ok(compiled_contracts) Ok((compiled_contracts, compiler_version))
} }
} }
#[allow(clippy::too_many_arguments)]
async fn compile_contracts<P: Platform>( async fn compile_contracts<P: Platform>(
metadata_directory: impl AsRef<Path>, metadata_directory: impl AsRef<Path>,
compiler_path: impl AsRef<Path>,
compiler_version: Version,
mut files_to_compile: impl Iterator<Item = PathBuf>, mut files_to_compile: impl Iterator<Item = PathBuf>,
mode: &Mode, mode: &Mode,
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>, deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
compiler: &P::Compiler, compilation_success_report_callback: impl Fn(
reporter: &ExecutionSpecificReporter, Version,
PathBuf,
bool,
Option<CompilerInput>,
CompilerOutput,
),
compilation_failure_report_callback: impl Fn(
Option<Version>,
Option<PathBuf>,
Option<CompilerInput>,
String,
),
) -> Result<CompilerOutput> { ) -> Result<CompilerOutput> {
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref()) let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
.with_allowed_extension("sol") .with_allowed_extension("sol")
.with_use_cached_fs(true) .with_use_cached_fs(true)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let compilation = Compiler::new() let compiler = Compiler::<P::Compiler>::new()
.with_allow_path(metadata_directory) .with_allow_path(metadata_directory)
// Handling the modes // Handling the modes
.with_optimization(mode.optimize_setting) .with_optimization(mode.optimize_setting)
@@ -200,6 +228,14 @@ async fn compile_contracts<P: Platform>(
// Adding the contract sources to the compiler. // Adding the contract sources to the compiler.
.try_then(|compiler| { .try_then(|compiler| {
files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path)) files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path))
})
.inspect_err(|err| {
compilation_failure_report_callback(
Some(compiler_version.clone()),
Some(compiler_path.as_ref().to_path_buf()),
None,
format!("{err:#}"),
)
})? })?
// Adding the deployed libraries to the compiler. // Adding the deployed libraries to the compiler.
.then(|compiler| { .then(|compiler| {
@@ -217,55 +253,27 @@ async fn compile_contracts<P: Platform>(
}) })
}); });
let input = compilation.input().clone(); let compiler_input = compiler.input();
let output = compilation.try_build(compiler).await; let compiler_output = compiler
.try_build(compiler_path.as_ref())
match (output.as_ref(), deployed_libraries.is_some()) { .await
(Ok(output), true) => { .inspect_err(|err| {
reporter compilation_failure_report_callback(
.report_post_link_contracts_compilation_succeeded_event( Some(compiler_version.clone()),
compiler.version().clone(), Some(compiler_path.as_ref().to_path_buf()),
compiler.path(), Some(compiler_input.clone()),
false, format!("{err:#}"),
input, )
output.clone(), })
) .context("Failed to configure compiler with sources and options")?;
.expect("Can't happen"); compilation_success_report_callback(
} compiler_version,
(Ok(output), false) => { compiler_path.as_ref().to_path_buf(),
reporter false,
.report_pre_link_contracts_compilation_succeeded_event( Some(compiler_input),
compiler.version().clone(), compiler_output.clone(),
compiler.path(), );
false, Ok(compiler_output)
input,
output.clone(),
)
.expect("Can't happen");
}
(Err(err), true) => {
reporter
.report_post_link_contracts_compilation_failed_event(
compiler.version().clone(),
compiler.path().to_path_buf(),
input,
format!("{err:#}"),
)
.expect("Can't happen");
}
(Err(err), false) => {
reporter
.report_pre_link_contracts_compilation_failed_event(
compiler.version().clone(),
compiler.path().to_path_buf(),
input,
format!("{err:#}"),
)
.expect("Can't happen");
}
}
output
} }
struct ArtifactsCache { struct ArtifactsCache {
@@ -289,7 +297,7 @@ impl ArtifactsCache {
} }
#[instrument(level = "debug", skip_all, err)] #[instrument(level = "debug", skip_all, err)]
pub async fn insert(&self, key: &CacheKey<'_>, value: &CacheValue) -> Result<()> { pub async fn insert(&self, key: &CacheKey, value: &CacheValue) -> Result<()> {
let key = bson::to_vec(key).context("Failed to serialize cache key (bson)")?; let key = bson::to_vec(key).context("Failed to serialize cache key (bson)")?;
let value = bson::to_vec(value).context("Failed to serialize cache value (bson)")?; let value = bson::to_vec(value).context("Failed to serialize cache value (bson)")?;
cacache::write(self.path.as_path(), key.encode_hex(), value) cacache::write(self.path.as_path(), key.encode_hex(), value)
@@ -300,7 +308,7 @@ impl ArtifactsCache {
Ok(()) Ok(())
} }
pub async fn get(&self, key: &CacheKey<'_>) -> Option<CacheValue> { pub async fn get(&self, key: &CacheKey) -> Option<CacheValue> {
let key = bson::to_vec(key).ok()?; let key = bson::to_vec(key).ok()?;
let value = cacache::read(self.path.as_path(), key.encode_hex()) let value = cacache::read(self.path.as_path(), key.encode_hex())
.await .await
@@ -312,7 +320,7 @@ impl ArtifactsCache {
#[instrument(level = "debug", skip_all, err)] #[instrument(level = "debug", skip_all, err)]
pub async fn get_or_insert_with( pub async fn get_or_insert_with(
&self, &self,
key: &CacheKey<'_>, key: &CacheKey,
callback: impl AsyncFnOnce() -> Result<CacheValue>, callback: impl AsyncFnOnce() -> Result<CacheValue>,
) -> Result<CacheValue> { ) -> Result<CacheValue> {
match self.get(key).await { match self.get(key).await {
@@ -330,20 +338,20 @@ impl ArtifactsCache {
} }
} }
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)] #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
struct CacheKey<'a> { struct CacheKey {
/// The platform name that this artifact was compiled for. For example, this could be EVM or /// The platform name that this artifact was compiled for. For example, this could be EVM or
/// PVM. /// PVM.
platform_key: &'a TestingPlatform, platform_key: String,
/// The version of the compiler that was used to compile the artifacts. /// The version of the compiler that was used to compile the artifacts.
compiler_version: Version, compiler_version: Version,
/// The path of the metadata file that the compilation artifacts are for. /// The path of the metadata file that the compilation artifacts are for.
metadata_file_path: &'a Path, metadata_file_path: PathBuf,
/// The mode that the compilation artifacts where compiled with. /// The mode that the compilation artifacts where compiled with.
solc_mode: Cow<'a, Mode>, solc_mode: Mode,
} }
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
+5 -5
View File
@@ -19,7 +19,7 @@ pub trait Platform {
type Compiler: SolidityCompiler; type Compiler: SolidityCompiler;
/// Returns the matching [TestingPlatform] of the [revive_dt_config::Arguments]. /// Returns the matching [TestingPlatform] of the [revive_dt_config::Arguments].
fn config_id() -> &'static TestingPlatform; fn config_id() -> TestingPlatform;
} }
#[derive(Default)] #[derive(Default)]
@@ -29,8 +29,8 @@ impl Platform for Geth {
type Blockchain = geth::GethNode; type Blockchain = geth::GethNode;
type Compiler = solc::Solc; type Compiler = solc::Solc;
fn config_id() -> &'static TestingPlatform { fn config_id() -> TestingPlatform {
&TestingPlatform::Geth TestingPlatform::Geth
} }
} }
@@ -41,7 +41,7 @@ impl Platform for Kitchensink {
type Blockchain = KitchensinkNode; type Blockchain = KitchensinkNode;
type Compiler = revive_resolc::Resolc; type Compiler = revive_resolc::Resolc;
fn config_id() -> &'static TestingPlatform { fn config_id() -> TestingPlatform {
&TestingPlatform::Kitchensink TestingPlatform::Kitchensink
} }
} }
+457 -289
View File
@@ -1,7 +1,6 @@
mod cached_compiler; mod cached_compiler;
use std::{ use std::{
borrow::Cow,
collections::{BTreeMap, HashMap}, collections::{BTreeMap, HashMap},
io::{BufWriter, Write, stderr}, io::{BufWriter, Write, stderr},
path::Path, path::Path,
@@ -17,20 +16,19 @@ use anyhow::Context;
use clap::Parser; use clap::Parser;
use futures::stream; use futures::stream;
use futures::{Stream, StreamExt}; use futures::{Stream, StreamExt};
use indexmap::{IndexMap, indexmap}; use indexmap::IndexMap;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
use revive_dt_report::{ use revive_dt_report::{
NodeDesignation, ReportAggregator, Reporter, ReporterEvent, TestCaseStatus, NodeDesignation, ReportAggregator, Reporter, ReporterEvent, TestCaseStatus,
TestSpecificReporter, TestSpecifier, TestSpecificReporter, TestSpecifier,
}; };
use serde_json::{Value, json};
use temp_dir::TempDir; use temp_dir::TempDir;
use tokio::try_join; use tokio::{join, try_join};
use tracing::{debug, error, info, info_span, instrument}; use tracing::{debug, info, info_span, instrument};
use tracing_appender::non_blocking::WorkerGuard; use tracing_appender::non_blocking::WorkerGuard;
use tracing_subscriber::{EnvFilter, FmtSubscriber}; use tracing_subscriber::{EnvFilter, FmtSubscriber};
use revive_dt_common::{iterators::EitherIter, types::Mode}; use revive_dt_common::types::Mode;
use revive_dt_compiler::{CompilerOutput, SolidityCompiler}; use revive_dt_compiler::{CompilerOutput, SolidityCompiler};
use revive_dt_config::*; use revive_dt_config::*;
use revive_dt_core::{ use revive_dt_core::{
@@ -50,6 +48,17 @@ use crate::cached_compiler::CachedCompiler;
static TEMP_DIR: LazyLock<TempDir> = LazyLock::new(|| TempDir::new().unwrap()); static TEMP_DIR: LazyLock<TempDir> = LazyLock::new(|| TempDir::new().unwrap());
/// this represents a single "test"; a mode, path and collection of cases.
#[derive(Clone, Debug)]
struct Test<'a> {
metadata: &'a MetadataFile,
metadata_file_path: &'a Path,
mode: Mode,
case_idx: CaseIdx,
case: &'a Case,
reporter: TestSpecificReporter,
}
fn main() -> anyhow::Result<()> { fn main() -> anyhow::Result<()> {
let (args, _guard) = init_cli().context("Failed to initialize CLI and tracing subscriber")?; let (args, _guard) = init_cli().context("Failed to initialize CLI and tracing subscriber")?;
info!( info!(
@@ -84,9 +93,14 @@ fn main() -> anyhow::Result<()> {
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
execute_corpus(&args, &tests, reporter, report_aggregator_task) match &args.compile_only {
.await Some(platform) => {
.context("Failed to execute corpus")?; compile_corpus(&args, &tests, platform, reporter, report_aggregator_task).await
}
None => execute_corpus(&args, &tests, reporter, report_aggregator_task)
.await
.context("Failed to execute corpus")?,
}
Ok(()) Ok(())
}; };
@@ -171,20 +185,8 @@ where
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
{ {
let leader_nodes = let tests = prepare_tests::<L, F>(args, metadata_files, reporter.clone());
NodePool::<L::Blockchain>::new(args).context("Failed to initialize leader node pool")?; let driver_task = start_driver_task::<L, F>(args, tests)
let follower_nodes =
NodePool::<F::Blockchain>::new(args).context("Failed to initialize follower node pool")?;
let tests_stream = tests_stream(
args,
metadata_files.iter(),
&leader_nodes,
&follower_nodes,
reporter.clone(),
)
.await;
let driver_task = start_driver_task::<L, F>(args, tests_stream)
.await .await
.context("Failed to start driver task")?; .context("Failed to start driver task")?;
let cli_reporting_task = start_cli_reporting_task(reporter); let cli_reporting_task = start_cli_reporting_task(reporter);
@@ -195,21 +197,19 @@ where
Ok(()) Ok(())
} }
async fn tests_stream<'a, L, F>( fn prepare_tests<'a, L, F>(
args: &Arguments, args: &Arguments,
metadata_files: impl IntoIterator<Item = &'a MetadataFile> + Clone, metadata_files: &'a [MetadataFile],
leader_node_pool: &'a NodePool<L::Blockchain>,
follower_node_pool: &'a NodePool<F::Blockchain>,
reporter: Reporter, reporter: Reporter,
) -> impl Stream<Item = Test<'a, L, F>> ) -> impl Stream<Item = Test<'a>>
where where
L: Platform, L: Platform,
F: Platform, F: Platform,
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
{ {
let tests = metadata_files let filtered_tests = metadata_files
.into_iter() .iter()
.flat_map(|metadata_file| { .flat_map(|metadata_file| {
metadata_file metadata_file
.cases .cases
@@ -219,120 +219,244 @@ where
}) })
// Flatten over the modes, prefer the case modes over the metadata file modes. // Flatten over the modes, prefer the case modes over the metadata file modes.
.flat_map(|(metadata_file, case_idx, case)| { .flat_map(|(metadata_file, case_idx, case)| {
let reporter = reporter.clone(); case.modes
.as_ref()
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref()); .or(metadata_file.modes.as_ref())
let modes = match modes { .map(|modes| ParsedMode::many_to_modes(modes.iter()).collect::<Vec<_>>())
Some(modes) => EitherIter::A( .unwrap_or(Mode::all().collect())
ParsedMode::many_to_modes(modes.iter()).map(Cow::<'static, _>::Owned), .into_iter()
), .map(move |mode| (metadata_file, case_idx, case, mode))
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
};
modes.into_iter().map(move |mode| {
(
metadata_file,
case_idx,
case,
mode.clone(),
reporter.test_specific_reporter(Arc::new(TestSpecifier {
solc_mode: mode.as_ref().clone(),
metadata_file_path: metadata_file.metadata_file_path.clone(),
case_idx: CaseIdx::new(case_idx),
})),
)
})
}) })
.collect::<Vec<_>>(); .map(move |(metadata_file, case_idx, case, mode)| Test {
metadata: metadata_file,
metadata_file_path: metadata_file.metadata_file_path.as_path(),
mode: mode.clone(),
case_idx: CaseIdx::new(case_idx),
case,
reporter: reporter.test_specific_reporter(Arc::new(TestSpecifier {
solc_mode: mode.clone(),
metadata_file_path: metadata_file.metadata_file_path.clone(),
case_idx: CaseIdx::new(case_idx),
})),
})
.inspect(|test| {
test.reporter
.report_test_case_discovery_event()
.expect("Can't fail")
})
.collect::<Vec<_>>()
.into_iter()
// Filter the test out if the leader and follower do not support the target.
.filter(|test| {
let leader_support =
<L::Blockchain as Node>::matches_target(test.metadata.targets.as_deref());
let follower_support =
<F::Blockchain as Node>::matches_target(test.metadata.targets.as_deref());
let is_allowed = leader_support && follower_support;
// Note: before we do any kind of filtering or process the iterator in any way, we need to if !is_allowed {
// inform the report aggregator of all of the cases that were found as it keeps a state of the debug!(
// test cases for its internal use. file_path = %test.metadata.relative_path().display(),
for (_, _, _, _, reporter) in tests.iter() { leader_support,
reporter follower_support,
.report_test_case_discovery_event() "Target is not supported, throwing metadata file out"
.expect("Can't fail") );
} test
.reporter
.report_test_ignored_event(
"Either the leader or the follower do not support the target desired by the test",
IndexMap::from_iter([
(
"test_desired_targets".to_string(),
serde_json::to_value(test.metadata.targets.as_ref())
.expect("Can't fail")
),
(
"leader_support".to_string(),
serde_json::to_value(leader_support)
.expect("Can't fail")
),
(
"follower_support".to_string(),
serde_json::to_value(follower_support)
.expect("Can't fail")
)
])
)
.expect("Can't fail");
}
stream::iter(tests.into_iter()) is_allowed
.filter_map( })
move |(metadata_file, case_idx, case, mode, reporter)| async move { // Filter the test out if the metadata file is ignored.
let leader_compiler = <L::Compiler as SolidityCompiler>::new( .filter(|test| {
args, if test.metadata.ignore.is_some_and(|ignore| ignore) {
mode.version.clone().map(Into::into), debug!(
) file_path = %test.metadata.relative_path().display(),
.await "Metadata file is ignored, throwing case out"
.inspect_err(|err| error!(?err, "Failed to instantiate the leader compiler")) );
.ok()?; test
.reporter
let follower_compiler = <F::Compiler as SolidityCompiler>::new( .report_test_ignored_event(
args, "Metadata file is ignored, therefore all cases are ignored",
mode.version.clone().map(Into::into), IndexMap::new(),
) )
.await .expect("Can't fail");
.inspect_err(|err| error!(?err, "Failed to instantiate the follower compiler")) false
.ok()?; } else {
true
let leader_node = leader_node_pool.round_robbin();
let follower_node = follower_node_pool.round_robbin();
Some(Test::<L, F> {
metadata: metadata_file,
metadata_file_path: metadata_file.metadata_file_path.as_path(),
mode: mode.clone(),
case_idx: CaseIdx::new(case_idx),
case,
leader_node,
follower_node,
leader_compiler,
follower_compiler,
reporter,
})
},
)
.filter_map(move |test| async move {
match test.check_compatibility() {
Ok(()) => Some(test),
Err((reason, additional_information)) => {
debug!(
metadata_file_path = %test.metadata.metadata_file_path.display(),
case_idx = %test.case_idx,
mode = %test.mode,
reason,
additional_information =
serde_json::to_string(&additional_information).unwrap(),
"Ignoring Test Case"
);
test.reporter
.report_test_ignored_event(
reason.to_string(),
additional_information
.into_iter()
.map(|(k, v)| (k.into(), v))
.collect::<IndexMap<_, _>>(),
)
.expect("Can't fail");
None
}
} }
}) })
// Filter the test case if the case is ignored.
.filter(|test| {
if test.case.ignore.is_some_and(|ignore| ignore) {
debug!(
file_path = %test.metadata.relative_path().display(),
case_idx = %test.case_idx,
"Case is ignored, throwing case out"
);
test
.reporter
.report_test_ignored_event(
"Case is ignored",
IndexMap::new(),
)
.expect("Can't fail");
false
} else {
true
}
})
// Filtering based on the EVM version compatibility
.filter(|test| {
if let Some(evm_version_requirement) = test.metadata.required_evm_version {
let leader_compatibility = evm_version_requirement
.matches(&<L::Blockchain as revive_dt_node::Node>::evm_version());
let follower_compatibility = evm_version_requirement
.matches(&<F::Blockchain as revive_dt_node::Node>::evm_version());
let is_allowed = leader_compatibility && follower_compatibility;
if !is_allowed {
debug!(
file_path = %test.metadata.relative_path().display(),
case_idx = %test.case_idx,
leader_compatibility,
follower_compatibility,
"EVM Version is incompatible, throwing case out"
);
test
.reporter
.report_test_ignored_event(
"EVM version is incompatible with either the leader or the follower",
IndexMap::from_iter([
(
"test_desired_evm_version".to_string(),
serde_json::to_value(test.metadata.required_evm_version)
.expect("Can't fail")
),
(
"leader_compatibility".to_string(),
serde_json::to_value(leader_compatibility)
.expect("Can't fail")
),
(
"follower_compatibility".to_string(),
serde_json::to_value(follower_compatibility)
.expect("Can't fail")
)
])
)
.expect("Can't fail");
}
is_allowed
} else {
true
}
});
stream::iter(filtered_tests)
// Filter based on the compiler compatibility
.filter_map(move |test| async move {
let leader_support = does_compiler_support_mode::<L>(args, &test.mode)
.await
.ok()
.unwrap_or(false);
let follower_support = does_compiler_support_mode::<F>(args, &test.mode)
.await
.ok()
.unwrap_or(false);
let is_allowed = leader_support && follower_support;
if !is_allowed {
debug!(
file_path = %test.metadata.relative_path().display(),
leader_support,
follower_support,
"Compilers do not support this, throwing case out"
);
test
.reporter
.report_test_ignored_event(
"Compilers do not support this mode either for the leader or for the follower.",
IndexMap::from_iter([
(
"leader_support".to_string(),
serde_json::to_value(leader_support)
.expect("Can't fail")
),
(
"follower_support".to_string(),
serde_json::to_value(follower_support)
.expect("Can't fail")
)
])
)
.expect("Can't fail");
}
is_allowed.then_some(test)
})
}
async fn does_compiler_support_mode<P: Platform>(
args: &Arguments,
mode: &Mode,
) -> anyhow::Result<bool> {
let compiler_version_or_requirement = mode.compiler_version_to_use(args.solc.clone());
let compiler_path = P::Compiler::get_compiler_executable(args, compiler_version_or_requirement)
.await
.context("Failed to obtain compiler executable path")?;
let compiler_version = P::Compiler::new(compiler_path.clone())
.version()
.await
.context("Failed to query compiler version")?;
Ok(P::Compiler::supports_mode(
&compiler_version,
mode.optimize_setting,
mode.pipeline,
))
} }
async fn start_driver_task<'a, L, F>( async fn start_driver_task<'a, L, F>(
args: &Arguments, args: &Arguments,
tests: impl Stream<Item = Test<'a, L, F>>, tests: impl Stream<Item = Test<'a>>,
) -> anyhow::Result<impl Future<Output = ()>> ) -> anyhow::Result<impl Future<Output = ()>>
where where
L: Platform, L: Platform,
F: Platform, F: Platform,
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
L::Compiler: 'a,
F::Compiler: 'a,
{ {
info!("Starting driver task"); info!("Starting driver task");
let leader_nodes = Arc::new(
NodePool::<L::Blockchain>::new(args).context("Failed to initialize leader node pool")?,
);
let follower_nodes = Arc::new(
NodePool::<F::Blockchain>::new(args).context("Failed to initialize follower node pool")?,
);
let number_concurrent_tasks = args.number_of_concurrent_tasks(); let number_concurrent_tasks = args.number_of_concurrent_tasks();
let cached_compiler = Arc::new( let cached_compiler = Arc::new(
CachedCompiler::new( CachedCompiler::new(
@@ -353,26 +477,38 @@ where
// this number will automatically be low enough to address (2). The user can override this. // this number will automatically be low enough to address (2). The user can override this.
Some(number_concurrent_tasks), Some(number_concurrent_tasks),
move |test| { move |test| {
let leader_nodes = leader_nodes.clone();
let follower_nodes = follower_nodes.clone();
let cached_compiler = cached_compiler.clone(); let cached_compiler = cached_compiler.clone();
async move { async move {
let leader_node = leader_nodes.round_robbin();
let follower_node = follower_nodes.round_robbin();
test.reporter test.reporter
.report_leader_node_assigned_event( .report_leader_node_assigned_event(
test.leader_node.id(), leader_node.id(),
*L::config_id(), L::config_id(),
test.leader_node.connection_string(), leader_node.connection_string(),
) )
.expect("Can't fail"); .expect("Can't fail");
test.reporter test.reporter
.report_follower_node_assigned_event( .report_follower_node_assigned_event(
test.follower_node.id(), follower_node.id(),
*F::config_id(), F::config_id(),
test.follower_node.connection_string(), follower_node.connection_string(),
) )
.expect("Can't fail"); .expect("Can't fail");
let reporter = test.reporter.clone(); let reporter = test.reporter.clone();
let result = handle_case_driver::<L, F>(test, cached_compiler).await; let result = handle_case_driver::<L, F>(
test,
args,
cached_compiler,
leader_node,
follower_node,
)
.await;
match result { match result {
Ok(steps_executed) => reporter Ok(steps_executed) => reporter
@@ -479,52 +615,99 @@ async fn start_cli_reporting_task(reporter: Reporter) {
mode = %test.mode, mode = %test.mode,
case_idx = %test.case_idx, case_idx = %test.case_idx,
case_name = test.case.name.as_deref().unwrap_or("Unnamed Case"), case_name = test.case.name.as_deref().unwrap_or("Unnamed Case"),
leader_node = test.leader_node.id(), leader_node = leader_node.id(),
follower_node = test.follower_node.id(), follower_node = follower_node.id(),
) )
)] )]
async fn handle_case_driver<'a, L, F>( async fn handle_case_driver<L, F>(
test: Test<'a, L, F>, test: Test<'_>,
cached_compiler: Arc<CachedCompiler<'a>>, config: &Arguments,
cached_compiler: Arc<CachedCompiler>,
leader_node: &L::Blockchain,
follower_node: &F::Blockchain,
) -> anyhow::Result<usize> ) -> anyhow::Result<usize>
where where
L: Platform, L: Platform,
F: Platform, F: Platform,
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
L::Compiler: 'a,
F::Compiler: 'a,
{ {
let leader_reporter = test let leader_reporter = test
.reporter .reporter
.execution_specific_reporter(test.leader_node.id(), NodeDesignation::Leader); .execution_specific_reporter(leader_node.id(), NodeDesignation::Leader);
let follower_reporter = test let follower_reporter = test
.reporter .reporter
.execution_specific_reporter(test.follower_node.id(), NodeDesignation::Follower); .execution_specific_reporter(follower_node.id(), NodeDesignation::Follower);
let ( let (
CompilerOutput { (
contracts: leader_pre_link_contracts, CompilerOutput {
}, contracts: leader_pre_link_contracts,
CompilerOutput { },
contracts: follower_pre_link_contracts, _,
}, ),
(
CompilerOutput {
contracts: follower_pre_link_contracts,
},
_,
),
) = try_join!( ) = try_join!(
cached_compiler.compile_contracts::<L>( cached_compiler.compile_contracts::<L>(
test.metadata, test.metadata,
test.metadata_file_path, test.metadata_file_path,
test.mode.clone(), &test.mode,
config,
None, None,
&test.leader_compiler, |compiler_version, compiler_path, is_cached, compiler_input, compiler_output| {
&leader_reporter, leader_reporter
.report_pre_link_contracts_compilation_succeeded_event(
compiler_version,
compiler_path,
is_cached,
compiler_input,
compiler_output,
)
.expect("Can't fail")
},
|compiler_version, compiler_path, compiler_input, failure_reason| {
leader_reporter
.report_pre_link_contracts_compilation_failed_event(
compiler_version,
compiler_path,
compiler_input,
failure_reason,
)
.expect("Can't fail")
}
), ),
cached_compiler.compile_contracts::<F>( cached_compiler.compile_contracts::<F>(
test.metadata, test.metadata,
test.metadata_file_path, test.metadata_file_path,
test.mode.clone(), &test.mode,
config,
None, None,
&test.follower_compiler, |compiler_version, compiler_path, is_cached, compiler_input, compiler_output| {
&follower_reporter follower_reporter
.report_pre_link_contracts_compilation_succeeded_event(
compiler_version,
compiler_path,
is_cached,
compiler_input,
compiler_output,
)
.expect("Can't fail")
},
|compiler_version, compiler_path, compiler_input, failure_reason| {
follower_reporter
.report_pre_link_contracts_compilation_failed_event(
compiler_version,
compiler_path,
compiler_input,
failure_reason,
)
.expect("Can't fail")
}
) )
) )
.context("Failed to compile pre-link contracts for leader/follower in parallel")?; .context("Failed to compile pre-link contracts for leader/follower in parallel")?;
@@ -597,8 +780,8 @@ where
); );
let (leader_receipt, follower_receipt) = try_join!( let (leader_receipt, follower_receipt) = try_join!(
test.leader_node.execute_transaction(leader_tx), leader_node.execute_transaction(leader_tx),
test.follower_node.execute_transaction(follower_tx) follower_node.execute_transaction(follower_tx)
)?; )?;
debug!( debug!(
@@ -656,40 +839,86 @@ where
} }
let ( let (
CompilerOutput { (
contracts: leader_post_link_contracts, CompilerOutput {
}, contracts: leader_post_link_contracts,
CompilerOutput { },
contracts: follower_post_link_contracts, leader_compiler_version,
}, ),
(
CompilerOutput {
contracts: follower_post_link_contracts,
},
follower_compiler_version,
),
) = try_join!( ) = try_join!(
cached_compiler.compile_contracts::<L>( cached_compiler.compile_contracts::<L>(
test.metadata, test.metadata,
test.metadata_file_path, test.metadata_file_path,
test.mode.clone(), &test.mode,
config,
leader_deployed_libraries.as_ref(), leader_deployed_libraries.as_ref(),
&test.leader_compiler, |compiler_version, compiler_path, is_cached, compiler_input, compiler_output| {
&leader_reporter, leader_reporter
.report_post_link_contracts_compilation_succeeded_event(
compiler_version,
compiler_path,
is_cached,
compiler_input,
compiler_output,
)
.expect("Can't fail")
},
|compiler_version, compiler_path, compiler_input, failure_reason| {
leader_reporter
.report_post_link_contracts_compilation_failed_event(
compiler_version,
compiler_path,
compiler_input,
failure_reason,
)
.expect("Can't fail")
}
), ),
cached_compiler.compile_contracts::<F>( cached_compiler.compile_contracts::<F>(
test.metadata, test.metadata,
test.metadata_file_path, test.metadata_file_path,
test.mode.clone(), &test.mode,
config,
follower_deployed_libraries.as_ref(), follower_deployed_libraries.as_ref(),
&test.follower_compiler, |compiler_version, compiler_path, is_cached, compiler_input, compiler_output| {
&follower_reporter follower_reporter
.report_post_link_contracts_compilation_succeeded_event(
compiler_version,
compiler_path,
is_cached,
compiler_input,
compiler_output,
)
.expect("Can't fail")
},
|compiler_version, compiler_path, compiler_input, failure_reason| {
follower_reporter
.report_post_link_contracts_compilation_failed_event(
compiler_version,
compiler_path,
compiler_input,
failure_reason,
)
.expect("Can't fail")
}
) )
) )
.context("Failed to compile post-link contracts for leader/follower in parallel")?; .context("Failed to compile post-link contracts for leader/follower in parallel")?;
let leader_state = CaseState::<L>::new( let leader_state = CaseState::<L>::new(
test.leader_compiler.version().clone(), leader_compiler_version,
leader_post_link_contracts, leader_post_link_contracts,
leader_deployed_libraries.unwrap_or_default(), leader_deployed_libraries.unwrap_or_default(),
leader_reporter, leader_reporter,
); );
let follower_state = CaseState::<F>::new( let follower_state = CaseState::<F>::new(
test.follower_compiler.version().clone(), follower_compiler_version,
follower_post_link_contracts, follower_post_link_contracts,
follower_deployed_libraries.unwrap_or_default(), follower_deployed_libraries.unwrap_or_default(),
follower_reporter, follower_reporter,
@@ -698,8 +927,8 @@ where
let mut driver = CaseDriver::<L, F>::new( let mut driver = CaseDriver::<L, F>::new(
test.metadata, test.metadata,
test.case, test.case,
test.leader_node, leader_node,
test.follower_node, follower_node,
leader_state, leader_state,
follower_state, follower_state,
); );
@@ -728,121 +957,60 @@ async fn execute_corpus(
Ok(()) Ok(())
} }
/// this represents a single "test"; a mode, path and collection of cases. async fn compile_corpus(
#[derive(Clone)] config: &Arguments,
struct Test<'a, L: Platform, F: Platform> { tests: &[MetadataFile],
metadata: &'a MetadataFile, platform: &TestingPlatform,
metadata_file_path: &'a Path, _: Reporter,
mode: Cow<'a, Mode>, report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
case_idx: CaseIdx, ) {
case: &'a Case, let tests = tests.iter().flat_map(|metadata| {
leader_node: &'a <L as Platform>::Blockchain, metadata
follower_node: &'a <F as Platform>::Blockchain, .solc_modes()
leader_compiler: L::Compiler, .into_iter()
follower_compiler: F::Compiler, .map(move |solc_mode| (metadata, solc_mode))
reporter: TestSpecificReporter, });
let file = tempfile::NamedTempFile::new().expect("Failed to create temp file");
let cached_compiler = CachedCompiler::new(file.path(), false)
.await
.map(Arc::new)
.expect("Failed to create the cached compiler");
let compilation_task =
futures::stream::iter(tests).for_each_concurrent(None, |(metadata, mode)| {
let cached_compiler = cached_compiler.clone();
async move {
match platform {
TestingPlatform::Geth => {
let _ = cached_compiler
.compile_contracts::<Geth>(
metadata,
metadata.metadata_file_path.as_path(),
&mode,
config,
None,
|_, _, _, _, _| {},
|_, _, _, _| {},
)
.await;
}
TestingPlatform::Kitchensink => {
let _ = cached_compiler
.compile_contracts::<Kitchensink>(
metadata,
metadata.metadata_file_path.as_path(),
&mode,
config,
None,
|_, _, _, _, _| {},
|_, _, _, _| {},
)
.await;
}
}
}
});
let _ = join!(compilation_task, report_aggregator_task);
} }
impl<'a, L: Platform, F: Platform> Test<'a, L, F> {
/// Checks if this test can be ran with the current configuration.
pub fn check_compatibility(&self) -> TestCheckFunctionResult {
self.check_metadata_file_ignored()?;
self.check_case_file_ignored()?;
self.check_target_compatibility()?;
self.check_evm_version_compatibility()?;
self.check_compiler_compatibility()?;
Ok(())
}
/// Checks if the metadata file is ignored or not.
fn check_metadata_file_ignored(&self) -> TestCheckFunctionResult {
if self.metadata.ignore.is_some_and(|ignore| ignore) {
Err(("Metadata file is ignored.", indexmap! {}))
} else {
Ok(())
}
}
/// Checks if the case file is ignored or not.
fn check_case_file_ignored(&self) -> TestCheckFunctionResult {
if self.case.ignore.is_some_and(|ignore| ignore) {
Err(("Case is ignored.", indexmap! {}))
} else {
Ok(())
}
}
/// Checks if the leader and the follower both support the desired targets in the metadata file.
fn check_target_compatibility(&self) -> TestCheckFunctionResult {
let leader_support =
<L::Blockchain as Node>::matches_target(self.metadata.targets.as_deref());
let follower_support =
<F::Blockchain as Node>::matches_target(self.metadata.targets.as_deref());
let is_allowed = leader_support && follower_support;
if is_allowed {
Ok(())
} else {
Err((
"Either the leader or the follower do not support the target desired by the test.",
indexmap! {
"test_desired_targets" => json!(self.metadata.targets.as_ref()),
"leader_support" => json!(leader_support),
"follower_support" => json!(follower_support),
},
))
}
}
// Checks for the compatibility of the EVM version with the leader and follower nodes.
fn check_evm_version_compatibility(&self) -> TestCheckFunctionResult {
let Some(evm_version_requirement) = self.metadata.required_evm_version else {
return Ok(());
};
let leader_support = evm_version_requirement
.matches(&<L::Blockchain as revive_dt_node::Node>::evm_version());
let follower_support = evm_version_requirement
.matches(&<F::Blockchain as revive_dt_node::Node>::evm_version());
let is_allowed = leader_support && follower_support;
if is_allowed {
Ok(())
} else {
Err((
"EVM version is incompatible with either the leader or the follower.",
indexmap! {
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
"leader_support" => json!(leader_support),
"follower_support" => json!(follower_support),
},
))
}
}
/// Checks if the leader and follower compilers support the mode that the test is for.
fn check_compiler_compatibility(&self) -> TestCheckFunctionResult {
let leader_support = self
.leader_compiler
.supports_mode(self.mode.optimize_setting, self.mode.pipeline);
let follower_support = self
.follower_compiler
.supports_mode(self.mode.optimize_setting, self.mode.pipeline);
let is_allowed = leader_support && follower_support;
if is_allowed {
Ok(())
} else {
Err((
"Compilers do not support this mode either for the leader or for the follower.",
indexmap! {
"mode" => json!(self.mode),
"leader_support" => json!(leader_support),
"follower_support" => json!(follower_support),
},
))
}
}
}
type TestCheckFunctionResult = Result<(), (&'static str, IndexMap<&'static str, Value>)>;
+1 -1
View File
@@ -64,7 +64,7 @@ impl Case {
pub fn solc_modes(&self) -> Vec<Mode> { pub fn solc_modes(&self) -> Vec<Mode> {
match &self.modes { match &self.modes {
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(), Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
None => Mode::all().cloned().collect(), None => Mode::all().collect(),
} }
} }
} }
+1 -1
View File
@@ -99,7 +99,7 @@ impl Metadata {
pub fn solc_modes(&self) -> Vec<Mode> { pub fn solc_modes(&self) -> Vec<Mode> {
match &self.modes { match &self.modes {
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(), Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
None => Mode::all().cloned().collect(), None => Mode::all().collect(),
} }
} }
+21 -1
View File
@@ -1,6 +1,5 @@
use anyhow::Context; use anyhow::Context;
use regex::Regex; use regex::Regex;
use revive_dt_common::iterators::EitherIter;
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline}; use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::HashSet; use std::collections::HashSet;
@@ -177,6 +176,27 @@ impl ParsedMode {
} }
} }
/// An iterator that could be either of two iterators.
#[derive(Clone, Debug)]
enum EitherIter<A, B> {
A(A),
B(B),
}
impl<A, B> Iterator for EitherIter<A, B>
where
A: Iterator,
B: Iterator<Item = A::Item>,
{
type Item = A::Item;
fn next(&mut self) -> Option<Self::Item> {
match self {
EitherIter::A(iter) => iter.next(),
EitherIter::B(iter) => iter.next(),
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
+18 -2
View File
@@ -340,13 +340,21 @@ impl ReportAggregator {
&mut self, &mut self,
event: PreLinkContractsCompilationFailedEvent, event: PreLinkContractsCompilationFailedEvent,
) { ) {
let include_input = self.report.config.report_include_compiler_input;
let execution_information = self.execution_information(&event.execution_specifier); let execution_information = self.execution_information(&event.execution_specifier);
let compiler_input = if include_input {
event.compiler_input
} else {
None
};
execution_information.pre_link_compilation_status = Some(CompilationStatus::Failure { execution_information.pre_link_compilation_status = Some(CompilationStatus::Failure {
reason: event.reason, reason: event.reason,
compiler_version: event.compiler_version, compiler_version: event.compiler_version,
compiler_path: event.compiler_path, compiler_path: event.compiler_path,
compiler_input: event.compiler_input, compiler_input,
}); });
} }
@@ -354,13 +362,21 @@ impl ReportAggregator {
&mut self, &mut self,
event: PostLinkContractsCompilationFailedEvent, event: PostLinkContractsCompilationFailedEvent,
) { ) {
let include_input = self.report.config.report_include_compiler_input;
let execution_information = self.execution_information(&event.execution_specifier); let execution_information = self.execution_information(&event.execution_specifier);
let compiler_input = if include_input {
event.compiler_input
} else {
None
};
execution_information.post_link_compilation_status = Some(CompilationStatus::Failure { execution_information.post_link_compilation_status = Some(CompilationStatus::Failure {
reason: event.reason, reason: event.reason,
compiler_version: event.compiler_version, compiler_version: event.compiler_version,
compiler_path: event.compiler_path, compiler_path: event.compiler_path,
compiler_input: event.compiler_input, compiler_input,
}); });
} }
+3 -4
View File
@@ -9,7 +9,6 @@ use std::{
sync::LazyLock, sync::LazyLock,
}; };
use semver::Version;
use tokio::sync::Mutex; use tokio::sync::Mutex;
use crate::download::SolcDownloader; use crate::download::SolcDownloader;
@@ -21,7 +20,7 @@ pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new
pub(crate) async fn get_or_download( pub(crate) async fn get_or_download(
working_directory: &Path, working_directory: &Path,
downloader: &SolcDownloader, downloader: &SolcDownloader,
) -> anyhow::Result<(Version, PathBuf)> { ) -> anyhow::Result<PathBuf> {
let target_directory = working_directory let target_directory = working_directory
.join(SOLC_CACHE_DIRECTORY) .join(SOLC_CACHE_DIRECTORY)
.join(downloader.version.to_string()); .join(downloader.version.to_string());
@@ -30,7 +29,7 @@ pub(crate) async fn get_or_download(
let mut cache = SOLC_CACHER.lock().await; let mut cache = SOLC_CACHER.lock().await;
if cache.contains(&target_file) { if cache.contains(&target_file) {
tracing::debug!("using cached solc: {}", target_file.display()); tracing::debug!("using cached solc: {}", target_file.display());
return Ok((downloader.version.clone(), target_file)); return Ok(target_file);
} }
create_dir_all(&target_directory).with_context(|| { create_dir_all(&target_directory).with_context(|| {
@@ -49,7 +48,7 @@ pub(crate) async fn get_or_download(
})?; })?;
cache.insert(target_file.clone()); cache.insert(target_file.clone());
Ok((downloader.version.clone(), target_file)) Ok(target_file)
} }
async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::Result<()> { async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::Result<()> {
+1 -2
View File
@@ -10,7 +10,6 @@ use cache::get_or_download;
use download::SolcDownloader; use download::SolcDownloader;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
use semver::Version;
pub mod cache; pub mod cache;
pub mod download; pub mod download;
@@ -25,7 +24,7 @@ pub async fn download_solc(
cache_directory: &Path, cache_directory: &Path,
version: impl Into<VersionOrRequirement>, version: impl Into<VersionOrRequirement>,
wasm: bool, wasm: bool,
) -> anyhow::Result<(Version, PathBuf)> { ) -> anyhow::Result<PathBuf> {
let downloader = if wasm { let downloader = if wasm {
SolcDownloader::wasm(version).await SolcDownloader::wasm(version).await
} else if cfg!(target_os = "linux") { } else if cfg!(target_os = "linux") {