diff --git a/Cargo.lock b/Cargo.lock index 7e0f75d..91f9bb3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4518,7 +4518,6 @@ dependencies = [ "clap", "futures", "indexmap 2.10.0", - "once_cell", "revive-dt-common", "revive-dt-compiler", "revive-dt-config", @@ -4530,7 +4529,6 @@ dependencies = [ "serde", "serde_json", "temp-dir", - "tempfile", "tokio", "tracing", "tracing-appender", diff --git a/crates/common/src/iterators/either_iter.rs b/crates/common/src/iterators/either_iter.rs new file mode 100644 index 0000000..d327c6f --- /dev/null +++ b/crates/common/src/iterators/either_iter.rs @@ -0,0 +1,21 @@ +/// An iterator that could be either of two iterators. +#[derive(Clone, Debug)] +pub enum EitherIter { + A(A), + B(B), +} + +impl Iterator for EitherIter +where + A: Iterator, + B: Iterator, +{ + type Item = T; + + fn next(&mut self) -> Option { + match self { + EitherIter::A(iter) => iter.next(), + EitherIter::B(iter) => iter.next(), + } + } +} diff --git a/crates/common/src/iterators/mod.rs b/crates/common/src/iterators/mod.rs index f94237a..ae1879a 100644 --- a/crates/common/src/iterators/mod.rs +++ b/crates/common/src/iterators/mod.rs @@ -1,3 +1,5 @@ +mod either_iter; mod files_with_extension_iterator; +pub use either_iter::*; pub use files_with_extension_iterator::*; diff --git a/crates/common/src/types/mode.rs b/crates/common/src/types/mode.rs index 535add1..3e2d3a9 100644 --- a/crates/common/src/types/mode.rs +++ b/crates/common/src/types/mode.rs @@ -3,6 +3,7 @@ use semver::Version; use serde::{Deserialize, Serialize}; use std::fmt::Display; use std::str::FromStr; +use std::sync::LazyLock; /// This represents a mode that a given test should be run with, if possible. /// @@ -34,14 +35,19 @@ impl Display for Mode { impl Mode { /// Return all of the available mode combinations. - pub fn all() -> impl Iterator { - ModePipeline::test_cases().flat_map(|pipeline| { - ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode { - pipeline, - optimize_setting, - version: None, - }) - }) + pub fn all() -> impl Iterator { + static ALL_MODES: LazyLock> = LazyLock::new(|| { + ModePipeline::test_cases() + .flat_map(|pipeline| { + ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode { + pipeline, + optimize_setting, + version: None, + }) + }) + .collect::>() + }); + ALL_MODES.iter() } /// Resolves the [`Mode`]'s solidity version requirement into a [`VersionOrRequirement`] if diff --git a/crates/compiler/src/constants.rs b/crates/compiler/src/constants.rs deleted file mode 100644 index bdb87f1..0000000 --- a/crates/compiler/src/constants.rs +++ /dev/null @@ -1,4 +0,0 @@ -use semver::Version; - -/// This is the first version of solc that supports the `--via-ir` flag / "viaIR" input JSON. -pub const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13); diff --git a/crates/compiler/src/lib.rs b/crates/compiler/src/lib.rs index 3c3e7c9..5bef24e 100644 --- a/crates/compiler/src/lib.rs +++ b/crates/compiler/src/lib.rs @@ -3,8 +3,6 @@ //! - Polkadot revive resolc compiler //! - Polkadot revive Wasm compiler -mod constants; - use std::{ collections::HashMap, hash::Hash, @@ -13,7 +11,7 @@ use std::{ use alloy::json_abi::JsonAbi; use alloy_primitives::Address; -use anyhow::Context; +use anyhow::{Context, Result}; use semver::Version; use serde::{Deserialize, Serialize}; @@ -30,36 +28,36 @@ pub mod revive_resolc; pub mod solc; /// A common interface for all supported Solidity compilers. -pub trait SolidityCompiler { - /// Extra options specific to the compiler. - type Options: Default + PartialEq + Eq + Hash; +pub trait SolidityCompiler: Sized { + /// Instantiates a new compiler object. + /// + /// Based on the given [`Arguments`] and [`VersionOrRequirement`] this function instantiates a + /// new compiler object. Certain implementations of this trait might choose to cache cache the + /// compiler objects and return the same ones over and over again. + fn new( + config: &Arguments, + version: impl Into>, + ) -> impl Future>; + + /// Returns the version of the compiler. + fn version(&self) -> &Version; + + /// Returns the path of the compiler executable. + fn path(&self) -> &Path; /// The low-level compiler interface. - fn build( - &self, - input: CompilerInput, - additional_options: Self::Options, - ) -> impl Future>; + fn build(&self, input: CompilerInput) -> impl Future>; - fn new(solc_executable: PathBuf) -> Self; - - fn get_compiler_executable( - config: &Arguments, - version: impl Into, - ) -> impl Future>; - - fn version(&self) -> impl Future>; - - /// Does the compiler support the provided mode and version settings? + /// Does the compiler support the provided mode and version settings. fn supports_mode( - compiler_version: &Version, - optimize_setting: ModeOptimizerSetting, + &self, + optimizer_setting: ModeOptimizerSetting, pipeline: ModePipeline, ) -> bool; } /// The generic compilation input configuration. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Clone, Debug, Default, Serialize, Deserialize)] pub struct CompilerInput { pub pipeline: Option, pub optimization: Option, @@ -80,21 +78,12 @@ pub struct CompilerOutput { } /// A generic builder style interface for configuring the supported compiler options. -pub struct Compiler { +#[derive(Default)] +pub struct Compiler { input: CompilerInput, - additional_options: T::Options, } -impl Default for Compiler { - fn default() -> Self { - Self::new() - } -} - -impl Compiler -where - T: SolidityCompiler, -{ +impl Compiler { pub fn new() -> Self { Self { input: CompilerInput { @@ -107,7 +96,6 @@ where libraries: Default::default(), revert_string_handling: Default::default(), }, - additional_options: T::Options::default(), } } @@ -136,7 +124,7 @@ where self } - pub fn with_source(mut self, path: impl AsRef) -> anyhow::Result { + pub fn with_source(mut self, path: impl AsRef) -> Result { self.input.sources.insert( path.as_ref().to_path_buf(), read_to_string(path.as_ref()).context("Failed to read the contract source")?, @@ -166,11 +154,6 @@ where self } - pub fn with_additional_options(mut self, options: impl Into) -> Self { - self.additional_options = options.into(); - self - } - pub fn then(self, callback: impl FnOnce(Self) -> Self) -> Self { callback(self) } @@ -179,17 +162,12 @@ where callback(self) } - pub async fn try_build( - self, - compiler_path: impl AsRef, - ) -> anyhow::Result { - T::new(compiler_path.as_ref().to_path_buf()) - .build(self.input, self.additional_options) - .await + pub async fn try_build(self, compiler: &impl SolidityCompiler) -> Result { + compiler.build(self.input).await } - pub fn input(&self) -> CompilerInput { - self.input.clone() + pub fn input(&self) -> &CompilerInput { + &self.input } } diff --git a/crates/compiler/src/revive_resolc.rs b/crates/compiler/src/revive_resolc.rs index 3a2012f..0a928af 100644 --- a/crates/compiler/src/revive_resolc.rs +++ b/crates/compiler/src/revive_resolc.rs @@ -3,8 +3,8 @@ use std::{ path::PathBuf, - process::{Command, Stdio}, - sync::LazyLock, + process::Stdio, + sync::{Arc, LazyLock}, }; use dashmap::DashMap; @@ -16,26 +16,61 @@ use revive_solc_json_interface::{ SolcStandardJsonOutput, }; -use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler}; +use crate::{ + CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler, solc::Solc, +}; use alloy::json_abi::JsonAbi; -use anyhow::Context; +use anyhow::{Context, Result}; use semver::Version; use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand}; -// TODO: I believe that we need to also pass the solc compiler to resolc so that resolc uses the -// specified solc compiler. I believe that currently we completely ignore the specified solc binary -// when invoking resolc which doesn't seem right if we're using solc as a compiler frontend. - /// A wrapper around the `resolc` binary, emitting PVM-compatible bytecode. -#[derive(Debug)] -pub struct Resolc { +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Resolc(Arc); + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +struct ResolcInner { + /// The internal solc compiler that the resolc compiler uses as a compiler frontend. + solc: Solc, /// Path to the `resolc` executable resolc_path: PathBuf, } impl SolidityCompiler for Resolc { - type Options = Vec; + async fn new( + config: &Arguments, + version: impl Into>, + ) -> Result { + /// This is a cache of all of the resolc compiler objects. Since we do not currently support + /// multiple resolc compiler versions, so our cache is just keyed by the solc compiler and + /// its version to the resolc compiler. + static COMPILERS_CACHE: LazyLock> = LazyLock::new(Default::default); + + let solc = Solc::new(config, version) + .await + .context("Failed to create the solc compiler frontend for resolc")?; + + Ok(COMPILERS_CACHE + .entry(solc.clone()) + .or_insert_with(|| { + Self(Arc::new(ResolcInner { + solc, + resolc_path: config.resolc.clone(), + })) + }) + .clone()) + } + + fn version(&self) -> &Version { + // We currently return the solc compiler version since we do not support multiple resolc + // compiler versions. + self.0.solc.version() + } + + fn path(&self) -> &std::path::Path { + &self.0.resolc_path + } #[tracing::instrument(level = "debug", ret)] async fn build( @@ -52,8 +87,7 @@ impl SolidityCompiler for Resolc { // resolc. So, we need to go back to this later once it's supported. revert_string_handling: _, }: CompilerInput, - additional_options: Self::Options, - ) -> anyhow::Result { + ) -> Result { if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) { anyhow::bail!( "Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}" @@ -100,7 +134,7 @@ impl SolidityCompiler for Resolc { }, }; - let mut command = AsyncCommand::new(&self.resolc_path); + let mut command = AsyncCommand::new(self.path()); command .stdin(Stdio::piped()) .stdout(Stdio::piped()) @@ -121,7 +155,7 @@ impl SolidityCompiler for Resolc { } let mut child = command .spawn() - .with_context(|| format!("Failed to spawn resolc at {}", self.resolc_path.display()))?; + .with_context(|| format!("Failed to spawn resolc at {}", self.path().display()))?; let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped"); let serialized_input = serde_json::to_vec(&input) @@ -238,108 +272,11 @@ impl SolidityCompiler for Resolc { Ok(compiler_output) } - fn new(resolc_path: PathBuf) -> Self { - Resolc { resolc_path } - } - - async fn get_compiler_executable( - config: &Arguments, - _version: impl Into, - ) -> anyhow::Result { - if !config.resolc.as_os_str().is_empty() { - return Ok(config.resolc.clone()); - } - - Ok(PathBuf::from("resolc")) - } - - async fn version(&self) -> anyhow::Result { - /// This is a cache of the path of the compiler to the version number of the compiler. We - /// choose to cache the version in this way rather than through a field on the struct since - /// compiler objects are being created all the time from the path and the compiler object is - /// not reused over time. - static VERSION_CACHE: LazyLock> = LazyLock::new(Default::default); - - match VERSION_CACHE.entry(self.resolc_path.clone()) { - dashmap::Entry::Occupied(occupied_entry) => Ok(occupied_entry.get().clone()), - dashmap::Entry::Vacant(vacant_entry) => { - let output = Command::new(self.resolc_path.as_path()) - .arg("--version") - .stdout(Stdio::piped()) - .spawn() - .with_context(|| { - format!( - "Failed to spawn resolc at {} to get version", - self.resolc_path.display() - ) - })? - .wait_with_output() - .with_context(|| { - format!( - "Failed waiting for resolc at {} to finish --version", - self.resolc_path.display() - ) - })? - .stdout; - - let output = String::from_utf8_lossy(&output); - let version_string = output - .split("version ") - .nth(1) - .context("Version parsing failed")? - .split("+") - .next() - .context("Version parsing failed")?; - - let version = Version::parse(version_string).with_context(|| { - format!("Failed to parse resolc semver from '{version_string}'") - })?; - - vacant_entry.insert(version.clone()); - - Ok(version) - } - } - } - fn supports_mode( - _compiler_version: &Version, - _optimize_setting: ModeOptimizerSetting, + &self, + optimize_setting: ModeOptimizerSetting, pipeline: ModePipeline, ) -> bool { - // We only support the Y (IE compile via Yul IR) mode here, which also means that we can - // only use solc version 0.8.13 and above. We must always compile via Yul IR as resolc - // needs this to translate to LLVM IR and then RISCV. - - // Note: the original implementation of this function looked like the following: - // ``` - // pipeline == ModePipeline::ViaYulIR && compiler_version >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR - // ``` - // However, that implementation is sadly incorrect since the version that's passed into this - // function is not the version of solc but the version of resolc. This is despite the fact - // that resolc depends on Solc for the initial Yul codegen. Therefore, we have skipped the - // version check until we do a better integrations between resolc and solc. - pipeline == ModePipeline::ViaYulIR - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[tokio::test] - async fn compiler_version_can_be_obtained() { - // Arrange - let args = Arguments::default(); - let path = Resolc::get_compiler_executable(&args, Version::new(0, 7, 6)) - .await - .unwrap(); - let compiler = Resolc::new(path); - - // Act - let version = compiler.version().await; - - // Assert - let _ = version.expect("Failed to get version"); + pipeline == ModePipeline::ViaYulIR && self.0.solc.supports_mode(optimize_setting, pipeline) } } diff --git a/crates/compiler/src/solc.rs b/crates/compiler/src/solc.rs index 99b5a9a..d87f940 100644 --- a/crates/compiler/src/solc.rs +++ b/crates/compiler/src/solc.rs @@ -3,8 +3,8 @@ use std::{ path::PathBuf, - process::{Command, Stdio}, - sync::LazyLock, + process::Stdio, + sync::{Arc, LazyLock}, }; use dashmap::DashMap; @@ -12,10 +12,9 @@ use revive_dt_common::types::VersionOrRequirement; use revive_dt_config::Arguments; use revive_dt_solc_binaries::download_solc; -use super::constants::SOLC_VERSION_SUPPORTING_VIA_YUL_IR; use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler}; -use anyhow::Context; +use anyhow::{Context, Result}; use foundry_compilers_artifacts::{ output_selection::{ BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection, @@ -26,13 +25,54 @@ use foundry_compilers_artifacts::{ use semver::Version; use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand}; -#[derive(Debug)] -pub struct Solc { +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Solc(Arc); + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +struct SolcInner { + /// The path of the solidity compiler executable that this object uses. solc_path: PathBuf, + /// The version of the solidity compiler executable that this object uses. + solc_version: Version, } impl SolidityCompiler for Solc { - type Options = (); + async fn new( + config: &Arguments, + version: impl Into>, + ) -> Result { + // This is a cache for the compiler objects so that whenever the same compiler version is + // requested the same object is returned. We do this as we do not want to keep cloning the + // compiler around. + static COMPILERS_CACHE: LazyLock> = LazyLock::new(Default::default); + + // We attempt to download the solc binary. Note the following: this call does the version + // resolution for us. Therefore, even if the download didn't proceed, this function will + // resolve the version requirement into a canonical version of the compiler. It's then up + // to us to either use the provided path or not. + let version = version.into().unwrap_or_else(|| config.solc.clone().into()); + let (version, path) = download_solc(config.directory(), version, false) + .await + .context("Failed to download/get path to solc binary")?; + + Ok(COMPILERS_CACHE + .entry(version.clone()) + .or_insert_with(|| { + Self(Arc::new(SolcInner { + solc_path: path, + solc_version: version, + })) + }) + .clone()) + } + + fn version(&self) -> &Version { + &self.0.solc_version + } + + fn path(&self) -> &std::path::Path { + &self.0.solc_path + } #[tracing::instrument(level = "debug", ret)] async fn build( @@ -47,19 +87,12 @@ impl SolidityCompiler for Solc { libraries, revert_string_handling, }: CompilerInput, - _: Self::Options, - ) -> anyhow::Result { - let compiler_supports_via_ir = self - .version() - .await - .context("Failed to query solc version to determine via-ir support")? - >= SOLC_VERSION_SUPPORTING_VIA_YUL_IR; - + ) -> Result { // Be careful to entirely omit the viaIR field if the compiler does not support it, // as it will error if you provide fields it does not know about. Because // `supports_mode` is called prior to instantiating a compiler, we should never // ask for something which is invalid. - let via_ir = match (pipeline, compiler_supports_via_ir) { + let via_ir = match (pipeline, self.compiler_supports_yul()) { (pipeline, true) => pipeline.map(|p| p.via_yul_ir()), (_pipeline, false) => None, }; @@ -119,7 +152,7 @@ impl SolidityCompiler for Solc { }, }; - let mut command = AsyncCommand::new(&self.solc_path); + let mut command = AsyncCommand::new(self.path()); command .stdin(Stdio::piped()) .stdout(Stdio::piped()) @@ -140,7 +173,7 @@ impl SolidityCompiler for Solc { } let mut child = command .spawn() - .with_context(|| format!("Failed to spawn solc at {}", self.solc_path.display()))?; + .with_context(|| format!("Failed to spawn solc at {}", self.path().display()))?; let stdin = child.stdin.as_mut().expect("should be piped"); let serialized_input = serde_json::to_vec(&input) @@ -220,125 +253,21 @@ impl SolidityCompiler for Solc { Ok(compiler_output) } - fn new(solc_path: PathBuf) -> Self { - Self { solc_path } - } - - async fn get_compiler_executable( - config: &Arguments, - version: impl Into, - ) -> anyhow::Result { - let path = download_solc(config.directory(), version, config.wasm) - .await - .context("Failed to download/get path to solc binary")?; - Ok(path) - } - - async fn version(&self) -> anyhow::Result { - /// This is a cache of the path of the compiler to the version number of the compiler. We - /// choose to cache the version in this way rather than through a field on the struct since - /// compiler objects are being created all the time from the path and the compiler object is - /// not reused over time. - static VERSION_CACHE: LazyLock> = LazyLock::new(Default::default); - - match VERSION_CACHE.entry(self.solc_path.clone()) { - dashmap::Entry::Occupied(occupied_entry) => Ok(occupied_entry.get().clone()), - dashmap::Entry::Vacant(vacant_entry) => { - // The following is the parsing code for the version from the solc version strings - // which look like the following: - // ``` - // solc, the solidity compiler commandline interface - // Version: 0.8.30+commit.73712a01.Darwin.appleclang - // ``` - let child = Command::new(self.solc_path.as_path()) - .arg("--version") - .stdout(Stdio::piped()) - .spawn() - .with_context(|| { - format!( - "Failed to spawn solc at {} to get version", - self.solc_path.display() - ) - })?; - let output = child.wait_with_output().with_context(|| { - format!( - "Failed waiting for solc at {} to finish --version", - self.solc_path.display() - ) - })?; - let output = String::from_utf8_lossy(&output.stdout); - let version_line = output - .split("Version: ") - .nth(1) - .context("Version parsing failed")?; - let version_string = version_line - .split("+") - .next() - .context("Version parsing failed")?; - - let version = Version::parse(version_string).with_context(|| { - format!("Failed to parse solc semver from '{version_string}'") - })?; - - vacant_entry.insert(version.clone()); - - Ok(version) - } - } - } - fn supports_mode( - compiler_version: &Version, + &self, _optimize_setting: ModeOptimizerSetting, pipeline: ModePipeline, ) -> bool { // solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support mode E // (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler is new enough. pipeline == ModePipeline::ViaEVMAssembly - || (pipeline == ModePipeline::ViaYulIR - && compiler_version >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR) + || (pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul()) } } -#[cfg(test)] -mod test { - use super::*; - - #[tokio::test] - async fn compiler_version_can_be_obtained() { - // Arrange - let args = Arguments::default(); - let path = Solc::get_compiler_executable(&args, Version::new(0, 7, 6)) - .await - .unwrap(); - let compiler = Solc::new(path); - - // Act - let version = compiler.version().await; - - // Assert - assert_eq!( - version.expect("Failed to get version"), - Version::new(0, 7, 6) - ) - } - - #[tokio::test] - async fn compiler_version_can_be_obtained1() { - // Arrange - let args = Arguments::default(); - let path = Solc::get_compiler_executable(&args, Version::new(0, 4, 21)) - .await - .unwrap(); - let compiler = Solc::new(path); - - // Act - let version = compiler.version().await; - - // Assert - assert_eq!( - version.expect("Failed to get version"), - Version::new(0, 4, 21) - ) +impl Solc { + fn compiler_supports_yul(&self) -> bool { + const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13); + self.version() >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR } } diff --git a/crates/compiler/tests/lib.rs b/crates/compiler/tests/lib.rs index 80858f2..3e2af4f 100644 --- a/crates/compiler/tests/lib.rs +++ b/crates/compiler/tests/lib.rs @@ -1,5 +1,6 @@ use std::path::PathBuf; +use revive_dt_common::types::VersionOrRequirement; use revive_dt_compiler::{Compiler, SolidityCompiler, revive_resolc::Resolc, solc::Solc}; use revive_dt_config::Arguments; use semver::Version; @@ -8,17 +9,17 @@ use semver::Version; async fn contracts_can_be_compiled_with_solc() { // Arrange let args = Arguments::default(); - let compiler_path = Solc::get_compiler_executable(&args, Version::new(0, 8, 30)) + let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30))) .await .unwrap(); // Act - let output = Compiler::::new() + let output = Compiler::new() .with_source("./tests/assets/array_one_element/callable.sol") .unwrap() .with_source("./tests/assets/array_one_element/main.sol") .unwrap() - .try_build(compiler_path) + .try_build(&solc) .await; // Assert @@ -49,17 +50,17 @@ async fn contracts_can_be_compiled_with_solc() { async fn contracts_can_be_compiled_with_resolc() { // Arrange let args = Arguments::default(); - let compiler_path = Resolc::get_compiler_executable(&args, Version::new(0, 8, 30)) + let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30))) .await .unwrap(); // Act - let output = Compiler::::new() + let output = Compiler::new() .with_source("./tests/assets/array_one_element/callable.sol") .unwrap() .with_source("./tests/assets/array_one_element/main.sol") .unwrap() - .try_build(compiler_path) + .try_build(&resolc) .await; // Assert diff --git a/crates/config/src/lib.rs b/crates/config/src/lib.rs index 21f84a9..ca245d7 100644 --- a/crates/config/src/lib.rs +++ b/crates/config/src/lib.rs @@ -84,10 +84,6 @@ pub struct Arguments { #[arg(short, long = "follower", default_value = "kitchensink")] pub follower: TestingPlatform, - /// Only compile against this testing platform (doesn't execute the tests). - #[arg(long = "compile-only")] - pub compile_only: Option, - /// Determines the amount of nodes that will be spawned for each chain. #[arg(long, default_value = "1")] pub number_of_nodes: usize, diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index a4b2221..2eb6fdd 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -28,7 +28,6 @@ cacache = { workspace = true } clap = { workspace = true } futures = { workspace = true } indexmap = { workspace = true } -once_cell = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } tracing-appender = { workspace = true } @@ -37,7 +36,6 @@ semver = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } temp-dir = { workspace = true } -tempfile = { workspace = true } [lints] workspace = true diff --git a/crates/core/src/cached_compiler.rs b/crates/core/src/cached_compiler.rs index 378394e..ed59546 100644 --- a/crates/core/src/cached_compiler.rs +++ b/crates/core/src/cached_compiler.rs @@ -2,6 +2,7 @@ //! be reused between runs. use std::{ + borrow::Cow, collections::HashMap, path::{Path, PathBuf}, sync::Arc, @@ -9,13 +10,13 @@ use std::{ use futures::FutureExt; use revive_dt_common::iterators::FilesWithExtensionIterator; -use revive_dt_compiler::{Compiler, CompilerInput, CompilerOutput, Mode, SolidityCompiler}; -use revive_dt_config::Arguments; +use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler}; +use revive_dt_config::TestingPlatform; use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata}; use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address}; use anyhow::{Context as _, Error, Result}; -use once_cell::sync::Lazy; +use revive_dt_report::ExecutionSpecificReporter; use semver::Version; use serde::{Deserialize, Serialize}; use tokio::sync::{Mutex, RwLock}; @@ -23,9 +24,17 @@ use tracing::{Instrument, debug, debug_span, instrument}; use crate::Platform; -pub struct CachedCompiler(ArtifactsCache); +pub struct CachedCompiler<'a> { + /// The cache that stores the compiled contracts. + artifacts_cache: ArtifactsCache, -impl CachedCompiler { + /// This is a mechanism that the cached compiler uses so that if multiple compilation requests + /// come in for the same contract we never compile all of them and only compile it once and all + /// other tasks that request this same compilation concurrently get the cached version. + cache_key_lock: RwLock, Arc>>>, +} + +impl<'a> CachedCompiler<'a> { pub async fn new(path: impl AsRef, invalidate_cache: bool) -> Result { let mut cache = ArtifactsCache::new(path); if invalidate_cache { @@ -34,7 +43,10 @@ impl CachedCompiler { .await .context("Failed to invalidate compilation cache directory")?; } - Ok(Self(cache)) + Ok(Self { + artifacts_cache: cache, + cache_key_lock: Default::default(), + }) } /// Compiles or gets the compilation artifacts from the cache. @@ -43,7 +55,7 @@ impl CachedCompiler { level = "debug", skip_all, fields( - metadata_file_path = %metadata_file_path.as_ref().display(), + metadata_file_path = %metadata_file_path.display(), %mode, platform = P::config_id().to_string() ), @@ -51,76 +63,33 @@ impl CachedCompiler { )] pub async fn compile_contracts( &self, - metadata: &Metadata, - metadata_file_path: impl AsRef, - mode: &Mode, - config: &Arguments, + metadata: &'a Metadata, + metadata_file_path: &'a Path, + mode: Cow<'a, Mode>, deployed_libraries: Option<&HashMap>, - compilation_success_report_callback: impl Fn( - Version, - PathBuf, - bool, - Option, - CompilerOutput, - ) + Clone, - compilation_failure_report_callback: impl Fn( - Option, - Option, - Option, - String, - ), - ) -> Result<(CompilerOutput, Version)> { - static CACHE_KEY_LOCK: Lazy>>>> = - Lazy::new(Default::default); - - let compiler_version_or_requirement = mode.compiler_version_to_use(config.solc.clone()); - let compiler_path = ::get_compiler_executable( - config, - compiler_version_or_requirement, - ) - .await - .inspect_err(|err| { - compilation_failure_report_callback(None, None, None, format!("{err:#}")) - }) - .context("Failed to obtain compiler executable path")?; - let compiler_version = ::new(compiler_path.clone()) - .version() - .await - .inspect_err(|err| { - compilation_failure_report_callback( - None, - Some(compiler_path.clone()), - None, - format!("{err:#}"), - ) - }) - .context("Failed to query compiler version")?; - + compiler: &P::Compiler, + reporter: &ExecutionSpecificReporter, + ) -> Result { let cache_key = CacheKey { - platform_key: P::config_id().to_string(), - compiler_version: compiler_version.clone(), - metadata_file_path: metadata_file_path.as_ref().to_path_buf(), + platform_key: P::config_id(), + compiler_version: compiler.version().clone(), + metadata_file_path, solc_mode: mode.clone(), }; let compilation_callback = || { - let compiler_path = compiler_path.clone(); - let compiler_version = compiler_version.clone(); - let compilation_success_report_callback = compilation_success_report_callback.clone(); async move { compile_contracts::

( metadata .directory() .context("Failed to get metadata directory while preparing compilation")?, - compiler_path, - compiler_version, metadata .files_to_compile() .context("Failed to enumerate files to compile from metadata")?, - mode, + &mode, deployed_libraries, - compilation_success_report_callback, - compilation_failure_report_callback, + compiler, + reporter, ) .map(|compilation_result| compilation_result.map(CacheValue::new)) .await @@ -153,12 +122,15 @@ impl CachedCompiler { // Lock this specific cache key such that we do not get inconsistent state. We want // that when multiple cases come in asking for the compilation artifacts then they // don't all trigger a compilation if there's a cache miss. Hence, the lock here. - let read_guard = CACHE_KEY_LOCK.read().await; + let read_guard = self.cache_key_lock.read().await; let mutex = match read_guard.get(&cache_key).cloned() { - Some(value) => value, + Some(value) => { + drop(read_guard); + value + } None => { drop(read_guard); - CACHE_KEY_LOCK + self.cache_key_lock .write() .await .entry(cache_key.clone()) @@ -168,15 +140,29 @@ impl CachedCompiler { }; let _guard = mutex.lock().await; - match self.0.get(&cache_key).await { + match self.artifacts_cache.get(&cache_key).await { Some(cache_value) => { - compilation_success_report_callback( - compiler_version.clone(), - compiler_path, - true, - None, - cache_value.compiler_output.clone(), - ); + if deployed_libraries.is_some() { + reporter + .report_post_link_contracts_compilation_succeeded_event( + compiler.version().clone(), + compiler.path(), + true, + None, + cache_value.compiler_output.clone(), + ) + .expect("Can't happen"); + } else { + reporter + .report_pre_link_contracts_compilation_succeeded_event( + compiler.version().clone(), + compiler.path(), + true, + None, + cache_value.compiler_output.clone(), + ) + .expect("Can't happen"); + } cache_value.compiler_output } None => { @@ -189,38 +175,24 @@ impl CachedCompiler { } }; - Ok((compiled_contracts, compiler_version)) + Ok(compiled_contracts) } } -#[allow(clippy::too_many_arguments)] async fn compile_contracts( metadata_directory: impl AsRef, - compiler_path: impl AsRef, - compiler_version: Version, mut files_to_compile: impl Iterator, mode: &Mode, deployed_libraries: Option<&HashMap>, - compilation_success_report_callback: impl Fn( - Version, - PathBuf, - bool, - Option, - CompilerOutput, - ), - compilation_failure_report_callback: impl Fn( - Option, - Option, - Option, - String, - ), + compiler: &P::Compiler, + reporter: &ExecutionSpecificReporter, ) -> Result { let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref()) .with_allowed_extension("sol") .with_use_cached_fs(true) .collect::>(); - let compiler = Compiler::::new() + let compilation = Compiler::new() .with_allow_path(metadata_directory) // Handling the modes .with_optimization(mode.optimize_setting) @@ -228,14 +200,6 @@ async fn compile_contracts( // Adding the contract sources to the compiler. .try_then(|compiler| { files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path)) - }) - .inspect_err(|err| { - compilation_failure_report_callback( - Some(compiler_version.clone()), - Some(compiler_path.as_ref().to_path_buf()), - None, - format!("{err:#}"), - ) })? // Adding the deployed libraries to the compiler. .then(|compiler| { @@ -253,27 +217,55 @@ async fn compile_contracts( }) }); - let compiler_input = compiler.input(); - let compiler_output = compiler - .try_build(compiler_path.as_ref()) - .await - .inspect_err(|err| { - compilation_failure_report_callback( - Some(compiler_version.clone()), - Some(compiler_path.as_ref().to_path_buf()), - Some(compiler_input.clone()), - format!("{err:#}"), - ) - }) - .context("Failed to configure compiler with sources and options")?; - compilation_success_report_callback( - compiler_version, - compiler_path.as_ref().to_path_buf(), - false, - Some(compiler_input), - compiler_output.clone(), - ); - Ok(compiler_output) + let input = compilation.input().clone(); + let output = compilation.try_build(compiler).await; + + match (output.as_ref(), deployed_libraries.is_some()) { + (Ok(output), true) => { + reporter + .report_post_link_contracts_compilation_succeeded_event( + compiler.version().clone(), + compiler.path(), + false, + input, + output.clone(), + ) + .expect("Can't happen"); + } + (Ok(output), false) => { + reporter + .report_pre_link_contracts_compilation_succeeded_event( + compiler.version().clone(), + compiler.path(), + false, + input, + output.clone(), + ) + .expect("Can't happen"); + } + (Err(err), true) => { + reporter + .report_post_link_contracts_compilation_failed_event( + compiler.version().clone(), + compiler.path().to_path_buf(), + input, + format!("{err:#}"), + ) + .expect("Can't happen"); + } + (Err(err), false) => { + reporter + .report_pre_link_contracts_compilation_failed_event( + compiler.version().clone(), + compiler.path().to_path_buf(), + input, + format!("{err:#}"), + ) + .expect("Can't happen"); + } + } + + output } struct ArtifactsCache { @@ -297,7 +289,7 @@ impl ArtifactsCache { } #[instrument(level = "debug", skip_all, err)] - pub async fn insert(&self, key: &CacheKey, value: &CacheValue) -> Result<()> { + pub async fn insert(&self, key: &CacheKey<'_>, value: &CacheValue) -> Result<()> { let key = bson::to_vec(key).context("Failed to serialize cache key (bson)")?; let value = bson::to_vec(value).context("Failed to serialize cache value (bson)")?; cacache::write(self.path.as_path(), key.encode_hex(), value) @@ -308,7 +300,7 @@ impl ArtifactsCache { Ok(()) } - pub async fn get(&self, key: &CacheKey) -> Option { + pub async fn get(&self, key: &CacheKey<'_>) -> Option { let key = bson::to_vec(key).ok()?; let value = cacache::read(self.path.as_path(), key.encode_hex()) .await @@ -320,7 +312,7 @@ impl ArtifactsCache { #[instrument(level = "debug", skip_all, err)] pub async fn get_or_insert_with( &self, - key: &CacheKey, + key: &CacheKey<'_>, callback: impl AsyncFnOnce() -> Result, ) -> Result { match self.get(key).await { @@ -338,20 +330,20 @@ impl ArtifactsCache { } } -#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] -struct CacheKey { +#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)] +struct CacheKey<'a> { /// The platform name that this artifact was compiled for. For example, this could be EVM or /// PVM. - platform_key: String, + platform_key: &'a TestingPlatform, /// The version of the compiler that was used to compile the artifacts. compiler_version: Version, /// The path of the metadata file that the compilation artifacts are for. - metadata_file_path: PathBuf, + metadata_file_path: &'a Path, /// The mode that the compilation artifacts where compiled with. - solc_mode: Mode, + solc_mode: Cow<'a, Mode>, } #[derive(Clone, Debug, Serialize, Deserialize)] diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index 1e5e5c9..b729b42 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -19,7 +19,7 @@ pub trait Platform { type Compiler: SolidityCompiler; /// Returns the matching [TestingPlatform] of the [revive_dt_config::Arguments]. - fn config_id() -> TestingPlatform; + fn config_id() -> &'static TestingPlatform; } #[derive(Default)] @@ -29,8 +29,8 @@ impl Platform for Geth { type Blockchain = geth::GethNode; type Compiler = solc::Solc; - fn config_id() -> TestingPlatform { - TestingPlatform::Geth + fn config_id() -> &'static TestingPlatform { + &TestingPlatform::Geth } } @@ -41,7 +41,7 @@ impl Platform for Kitchensink { type Blockchain = KitchensinkNode; type Compiler = revive_resolc::Resolc; - fn config_id() -> TestingPlatform { - TestingPlatform::Kitchensink + fn config_id() -> &'static TestingPlatform { + &TestingPlatform::Kitchensink } } diff --git a/crates/core/src/main.rs b/crates/core/src/main.rs index 40d3203..da46d98 100644 --- a/crates/core/src/main.rs +++ b/crates/core/src/main.rs @@ -1,6 +1,7 @@ mod cached_compiler; use std::{ + borrow::Cow, collections::{BTreeMap, HashMap}, io::{BufWriter, Write, stderr}, path::Path, @@ -16,19 +17,20 @@ use anyhow::Context; use clap::Parser; use futures::stream; use futures::{Stream, StreamExt}; -use indexmap::IndexMap; +use indexmap::{IndexMap, indexmap}; use revive_dt_node_interaction::EthereumNode; use revive_dt_report::{ NodeDesignation, ReportAggregator, Reporter, ReporterEvent, TestCaseStatus, TestSpecificReporter, TestSpecifier, }; +use serde_json::{Value, json}; use temp_dir::TempDir; -use tokio::{join, try_join}; -use tracing::{debug, info, info_span, instrument}; +use tokio::try_join; +use tracing::{debug, error, info, info_span, instrument}; use tracing_appender::non_blocking::WorkerGuard; use tracing_subscriber::{EnvFilter, FmtSubscriber}; -use revive_dt_common::types::Mode; +use revive_dt_common::{iterators::EitherIter, types::Mode}; use revive_dt_compiler::{CompilerOutput, SolidityCompiler}; use revive_dt_config::*; use revive_dt_core::{ @@ -48,17 +50,6 @@ use crate::cached_compiler::CachedCompiler; static TEMP_DIR: LazyLock = LazyLock::new(|| TempDir::new().unwrap()); -/// this represents a single "test"; a mode, path and collection of cases. -#[derive(Clone, Debug)] -struct Test<'a> { - metadata: &'a MetadataFile, - metadata_file_path: &'a Path, - mode: Mode, - case_idx: CaseIdx, - case: &'a Case, - reporter: TestSpecificReporter, -} - fn main() -> anyhow::Result<()> { let (args, _guard) = init_cli().context("Failed to initialize CLI and tracing subscriber")?; info!( @@ -93,14 +84,9 @@ fn main() -> anyhow::Result<()> { }) .collect::>(); - match &args.compile_only { - Some(platform) => { - compile_corpus(&args, &tests, platform, reporter, report_aggregator_task).await - } - None => execute_corpus(&args, &tests, reporter, report_aggregator_task) - .await - .context("Failed to execute corpus")?, - } + execute_corpus(&args, &tests, reporter, report_aggregator_task) + .await + .context("Failed to execute corpus")?; Ok(()) }; @@ -185,8 +171,20 @@ where L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, { - let tests = prepare_tests::(args, metadata_files, reporter.clone()); - let driver_task = start_driver_task::(args, tests) + let leader_nodes = + NodePool::::new(args).context("Failed to initialize leader node pool")?; + let follower_nodes = + NodePool::::new(args).context("Failed to initialize follower node pool")?; + + let tests_stream = tests_stream( + args, + metadata_files.iter(), + &leader_nodes, + &follower_nodes, + reporter.clone(), + ) + .await; + let driver_task = start_driver_task::(args, tests_stream) .await .context("Failed to start driver task")?; let cli_reporting_task = start_cli_reporting_task(reporter); @@ -197,19 +195,21 @@ where Ok(()) } -fn prepare_tests<'a, L, F>( +async fn tests_stream<'a, L, F>( args: &Arguments, - metadata_files: &'a [MetadataFile], + metadata_files: impl IntoIterator + Clone, + leader_node_pool: &'a NodePool, + follower_node_pool: &'a NodePool, reporter: Reporter, -) -> impl Stream> +) -> impl Stream> where L: Platform, F: Platform, L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, { - let filtered_tests = metadata_files - .iter() + let tests = metadata_files + .into_iter() .flat_map(|metadata_file| { metadata_file .cases @@ -219,244 +219,120 @@ where }) // Flatten over the modes, prefer the case modes over the metadata file modes. .flat_map(|(metadata_file, case_idx, case)| { - case.modes - .as_ref() - .or(metadata_file.modes.as_ref()) - .map(|modes| ParsedMode::many_to_modes(modes.iter()).collect::>()) - .unwrap_or(Mode::all().collect()) - .into_iter() - .map(move |mode| (metadata_file, case_idx, case, mode)) - }) - .map(move |(metadata_file, case_idx, case, mode)| Test { - metadata: metadata_file, - metadata_file_path: metadata_file.metadata_file_path.as_path(), - mode: mode.clone(), - case_idx: CaseIdx::new(case_idx), - case, - reporter: reporter.test_specific_reporter(Arc::new(TestSpecifier { - solc_mode: mode.clone(), - metadata_file_path: metadata_file.metadata_file_path.clone(), - case_idx: CaseIdx::new(case_idx), - })), - }) - .inspect(|test| { - test.reporter - .report_test_case_discovery_event() - .expect("Can't fail") - }) - .collect::>() - .into_iter() - // Filter the test out if the leader and follower do not support the target. - .filter(|test| { - let leader_support = - ::matches_target(test.metadata.targets.as_deref()); - let follower_support = - ::matches_target(test.metadata.targets.as_deref()); - let is_allowed = leader_support && follower_support; + let reporter = reporter.clone(); - if !is_allowed { - debug!( - file_path = %test.metadata.relative_path().display(), - leader_support, - follower_support, - "Target is not supported, throwing metadata file out" - ); - test - .reporter - .report_test_ignored_event( - "Either the leader or the follower do not support the target desired by the test", - IndexMap::from_iter([ - ( - "test_desired_targets".to_string(), - serde_json::to_value(test.metadata.targets.as_ref()) - .expect("Can't fail") - ), - ( - "leader_support".to_string(), - serde_json::to_value(leader_support) - .expect("Can't fail") - ), - ( - "follower_support".to_string(), - serde_json::to_value(follower_support) - .expect("Can't fail") - ) - ]) - ) - .expect("Can't fail"); - } + let modes = case.modes.as_ref().or(metadata_file.modes.as_ref()); + let modes = match modes { + Some(modes) => EitherIter::A( + ParsedMode::many_to_modes(modes.iter()).map(Cow::<'static, _>::Owned), + ), + None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)), + }; - is_allowed + modes.into_iter().map(move |mode| { + ( + metadata_file, + case_idx, + case, + mode.clone(), + reporter.test_specific_reporter(Arc::new(TestSpecifier { + solc_mode: mode.as_ref().clone(), + metadata_file_path: metadata_file.metadata_file_path.clone(), + case_idx: CaseIdx::new(case_idx), + })), + ) + }) }) - // Filter the test out if the metadata file is ignored. - .filter(|test| { - if test.metadata.ignore.is_some_and(|ignore| ignore) { - debug!( - file_path = %test.metadata.relative_path().display(), - "Metadata file is ignored, throwing case out" - ); - test - .reporter - .report_test_ignored_event( - "Metadata file is ignored, therefore all cases are ignored", - IndexMap::new(), - ) - .expect("Can't fail"); - false - } else { - true - } - }) - // Filter the test case if the case is ignored. - .filter(|test| { - if test.case.ignore.is_some_and(|ignore| ignore) { - debug!( - file_path = %test.metadata.relative_path().display(), - case_idx = %test.case_idx, - "Case is ignored, throwing case out" - ); - test - .reporter - .report_test_ignored_event( - "Case is ignored", - IndexMap::new(), - ) - .expect("Can't fail"); - false - } else { - true - } - }) - // Filtering based on the EVM version compatibility - .filter(|test| { - if let Some(evm_version_requirement) = test.metadata.required_evm_version { - let leader_compatibility = evm_version_requirement - .matches(&::evm_version()); - let follower_compatibility = evm_version_requirement - .matches(&::evm_version()); - let is_allowed = leader_compatibility && follower_compatibility; + .collect::>(); - if !is_allowed { + // Note: before we do any kind of filtering or process the iterator in any way, we need to + // inform the report aggregator of all of the cases that were found as it keeps a state of the + // test cases for its internal use. + for (_, _, _, _, reporter) in tests.iter() { + reporter + .report_test_case_discovery_event() + .expect("Can't fail") + } + + stream::iter(tests.into_iter()) + .filter_map( + move |(metadata_file, case_idx, case, mode, reporter)| async move { + let leader_compiler = ::new( + args, + mode.version.clone().map(Into::into), + ) + .await + .inspect_err(|err| error!(?err, "Failed to instantiate the leader compiler")) + .ok()?; + + let follower_compiler = ::new( + args, + mode.version.clone().map(Into::into), + ) + .await + .inspect_err(|err| error!(?err, "Failed to instantiate the follower compiler")) + .ok()?; + + let leader_node = leader_node_pool.round_robbin(); + let follower_node = follower_node_pool.round_robbin(); + + Some(Test:: { + metadata: metadata_file, + metadata_file_path: metadata_file.metadata_file_path.as_path(), + mode: mode.clone(), + case_idx: CaseIdx::new(case_idx), + case, + leader_node, + follower_node, + leader_compiler, + follower_compiler, + reporter, + }) + }, + ) + .filter_map(move |test| async move { + match test.check_compatibility() { + Ok(()) => Some(test), + Err((reason, additional_information)) => { debug!( - file_path = %test.metadata.relative_path().display(), + metadata_file_path = %test.metadata.metadata_file_path.display(), case_idx = %test.case_idx, - leader_compatibility, - follower_compatibility, - "EVM Version is incompatible, throwing case out" + mode = %test.mode, + reason, + additional_information = + serde_json::to_string(&additional_information).unwrap(), + + "Ignoring Test Case" ); - test - .reporter + test.reporter .report_test_ignored_event( - "EVM version is incompatible with either the leader or the follower", - IndexMap::from_iter([ - ( - "test_desired_evm_version".to_string(), - serde_json::to_value(test.metadata.required_evm_version) - .expect("Can't fail") - ), - ( - "leader_compatibility".to_string(), - serde_json::to_value(leader_compatibility) - .expect("Can't fail") - ), - ( - "follower_compatibility".to_string(), - serde_json::to_value(follower_compatibility) - .expect("Can't fail") - ) - ]) + reason.to_string(), + additional_information + .into_iter() + .map(|(k, v)| (k.into(), v)) + .collect::>(), ) .expect("Can't fail"); + None } - - is_allowed - } else { - true } - }); - - stream::iter(filtered_tests) - // Filter based on the compiler compatibility - .filter_map(move |test| async move { - let leader_support = does_compiler_support_mode::(args, &test.mode) - .await - .ok() - .unwrap_or(false); - let follower_support = does_compiler_support_mode::(args, &test.mode) - .await - .ok() - .unwrap_or(false); - let is_allowed = leader_support && follower_support; - - if !is_allowed { - debug!( - file_path = %test.metadata.relative_path().display(), - leader_support, - follower_support, - "Compilers do not support this, throwing case out" - ); - test - .reporter - .report_test_ignored_event( - "Compilers do not support this mode either for the leader or for the follower.", - IndexMap::from_iter([ - ( - "leader_support".to_string(), - serde_json::to_value(leader_support) - .expect("Can't fail") - ), - ( - "follower_support".to_string(), - serde_json::to_value(follower_support) - .expect("Can't fail") - ) - ]) - ) - .expect("Can't fail"); - } - - is_allowed.then_some(test) }) } -async fn does_compiler_support_mode( - args: &Arguments, - mode: &Mode, -) -> anyhow::Result { - let compiler_version_or_requirement = mode.compiler_version_to_use(args.solc.clone()); - let compiler_path = P::Compiler::get_compiler_executable(args, compiler_version_or_requirement) - .await - .context("Failed to obtain compiler executable path")?; - let compiler_version = P::Compiler::new(compiler_path.clone()) - .version() - .await - .context("Failed to query compiler version")?; - - Ok(P::Compiler::supports_mode( - &compiler_version, - mode.optimize_setting, - mode.pipeline, - )) -} - async fn start_driver_task<'a, L, F>( args: &Arguments, - tests: impl Stream>, + tests: impl Stream>, ) -> anyhow::Result> where L: Platform, F: Platform, L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, + L::Compiler: 'a, + F::Compiler: 'a, { info!("Starting driver task"); - let leader_nodes = Arc::new( - NodePool::::new(args).context("Failed to initialize leader node pool")?, - ); - let follower_nodes = Arc::new( - NodePool::::new(args).context("Failed to initialize follower node pool")?, - ); let number_concurrent_tasks = args.number_of_concurrent_tasks(); let cached_compiler = Arc::new( CachedCompiler::new( @@ -477,38 +353,26 @@ where // this number will automatically be low enough to address (2). The user can override this. Some(number_concurrent_tasks), move |test| { - let leader_nodes = leader_nodes.clone(); - let follower_nodes = follower_nodes.clone(); let cached_compiler = cached_compiler.clone(); async move { - let leader_node = leader_nodes.round_robbin(); - let follower_node = follower_nodes.round_robbin(); - test.reporter .report_leader_node_assigned_event( - leader_node.id(), - L::config_id(), - leader_node.connection_string(), + test.leader_node.id(), + *L::config_id(), + test.leader_node.connection_string(), ) .expect("Can't fail"); test.reporter .report_follower_node_assigned_event( - follower_node.id(), - F::config_id(), - follower_node.connection_string(), + test.follower_node.id(), + *F::config_id(), + test.follower_node.connection_string(), ) .expect("Can't fail"); let reporter = test.reporter.clone(); - let result = handle_case_driver::( - test, - args, - cached_compiler, - leader_node, - follower_node, - ) - .await; + let result = handle_case_driver::(test, cached_compiler).await; match result { Ok(steps_executed) => reporter @@ -615,99 +479,52 @@ async fn start_cli_reporting_task(reporter: Reporter) { mode = %test.mode, case_idx = %test.case_idx, case_name = test.case.name.as_deref().unwrap_or("Unnamed Case"), - leader_node = leader_node.id(), - follower_node = follower_node.id(), + leader_node = test.leader_node.id(), + follower_node = test.follower_node.id(), ) )] -async fn handle_case_driver( - test: Test<'_>, - config: &Arguments, - cached_compiler: Arc, - leader_node: &L::Blockchain, - follower_node: &F::Blockchain, +async fn handle_case_driver<'a, L, F>( + test: Test<'a, L, F>, + cached_compiler: Arc>, ) -> anyhow::Result where L: Platform, F: Platform, L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, + L::Compiler: 'a, + F::Compiler: 'a, { let leader_reporter = test .reporter - .execution_specific_reporter(leader_node.id(), NodeDesignation::Leader); + .execution_specific_reporter(test.leader_node.id(), NodeDesignation::Leader); let follower_reporter = test .reporter - .execution_specific_reporter(follower_node.id(), NodeDesignation::Follower); + .execution_specific_reporter(test.follower_node.id(), NodeDesignation::Follower); let ( - ( - CompilerOutput { - contracts: leader_pre_link_contracts, - }, - _, - ), - ( - CompilerOutput { - contracts: follower_pre_link_contracts, - }, - _, - ), + CompilerOutput { + contracts: leader_pre_link_contracts, + }, + CompilerOutput { + contracts: follower_pre_link_contracts, + }, ) = try_join!( cached_compiler.compile_contracts::( test.metadata, test.metadata_file_path, - &test.mode, - config, + test.mode.clone(), None, - |compiler_version, compiler_path, is_cached, compiler_input, compiler_output| { - leader_reporter - .report_pre_link_contracts_compilation_succeeded_event( - compiler_version, - compiler_path, - is_cached, - compiler_input, - compiler_output, - ) - .expect("Can't fail") - }, - |compiler_version, compiler_path, compiler_input, failure_reason| { - leader_reporter - .report_pre_link_contracts_compilation_failed_event( - compiler_version, - compiler_path, - compiler_input, - failure_reason, - ) - .expect("Can't fail") - } + &test.leader_compiler, + &leader_reporter, ), cached_compiler.compile_contracts::( test.metadata, test.metadata_file_path, - &test.mode, - config, + test.mode.clone(), None, - |compiler_version, compiler_path, is_cached, compiler_input, compiler_output| { - follower_reporter - .report_pre_link_contracts_compilation_succeeded_event( - compiler_version, - compiler_path, - is_cached, - compiler_input, - compiler_output, - ) - .expect("Can't fail") - }, - |compiler_version, compiler_path, compiler_input, failure_reason| { - follower_reporter - .report_pre_link_contracts_compilation_failed_event( - compiler_version, - compiler_path, - compiler_input, - failure_reason, - ) - .expect("Can't fail") - } + &test.follower_compiler, + &follower_reporter ) ) .context("Failed to compile pre-link contracts for leader/follower in parallel")?; @@ -780,8 +597,8 @@ where ); let (leader_receipt, follower_receipt) = try_join!( - leader_node.execute_transaction(leader_tx), - follower_node.execute_transaction(follower_tx) + test.leader_node.execute_transaction(leader_tx), + test.follower_node.execute_transaction(follower_tx) )?; debug!( @@ -839,86 +656,40 @@ where } let ( - ( - CompilerOutput { - contracts: leader_post_link_contracts, - }, - leader_compiler_version, - ), - ( - CompilerOutput { - contracts: follower_post_link_contracts, - }, - follower_compiler_version, - ), + CompilerOutput { + contracts: leader_post_link_contracts, + }, + CompilerOutput { + contracts: follower_post_link_contracts, + }, ) = try_join!( cached_compiler.compile_contracts::( test.metadata, test.metadata_file_path, - &test.mode, - config, + test.mode.clone(), leader_deployed_libraries.as_ref(), - |compiler_version, compiler_path, is_cached, compiler_input, compiler_output| { - leader_reporter - .report_post_link_contracts_compilation_succeeded_event( - compiler_version, - compiler_path, - is_cached, - compiler_input, - compiler_output, - ) - .expect("Can't fail") - }, - |compiler_version, compiler_path, compiler_input, failure_reason| { - leader_reporter - .report_post_link_contracts_compilation_failed_event( - compiler_version, - compiler_path, - compiler_input, - failure_reason, - ) - .expect("Can't fail") - } + &test.leader_compiler, + &leader_reporter, ), cached_compiler.compile_contracts::( test.metadata, test.metadata_file_path, - &test.mode, - config, + test.mode.clone(), follower_deployed_libraries.as_ref(), - |compiler_version, compiler_path, is_cached, compiler_input, compiler_output| { - follower_reporter - .report_post_link_contracts_compilation_succeeded_event( - compiler_version, - compiler_path, - is_cached, - compiler_input, - compiler_output, - ) - .expect("Can't fail") - }, - |compiler_version, compiler_path, compiler_input, failure_reason| { - follower_reporter - .report_post_link_contracts_compilation_failed_event( - compiler_version, - compiler_path, - compiler_input, - failure_reason, - ) - .expect("Can't fail") - } + &test.follower_compiler, + &follower_reporter ) ) .context("Failed to compile post-link contracts for leader/follower in parallel")?; let leader_state = CaseState::::new( - leader_compiler_version, + test.leader_compiler.version().clone(), leader_post_link_contracts, leader_deployed_libraries.unwrap_or_default(), leader_reporter, ); let follower_state = CaseState::::new( - follower_compiler_version, + test.follower_compiler.version().clone(), follower_post_link_contracts, follower_deployed_libraries.unwrap_or_default(), follower_reporter, @@ -927,8 +698,8 @@ where let mut driver = CaseDriver::::new( test.metadata, test.case, - leader_node, - follower_node, + test.leader_node, + test.follower_node, leader_state, follower_state, ); @@ -957,60 +728,121 @@ async fn execute_corpus( Ok(()) } -async fn compile_corpus( - config: &Arguments, - tests: &[MetadataFile], - platform: &TestingPlatform, - _: Reporter, - report_aggregator_task: impl Future>, -) { - let tests = tests.iter().flat_map(|metadata| { - metadata - .solc_modes() - .into_iter() - .map(move |solc_mode| (metadata, solc_mode)) - }); - - let file = tempfile::NamedTempFile::new().expect("Failed to create temp file"); - let cached_compiler = CachedCompiler::new(file.path(), false) - .await - .map(Arc::new) - .expect("Failed to create the cached compiler"); - - let compilation_task = - futures::stream::iter(tests).for_each_concurrent(None, |(metadata, mode)| { - let cached_compiler = cached_compiler.clone(); - - async move { - match platform { - TestingPlatform::Geth => { - let _ = cached_compiler - .compile_contracts::( - metadata, - metadata.metadata_file_path.as_path(), - &mode, - config, - None, - |_, _, _, _, _| {}, - |_, _, _, _| {}, - ) - .await; - } - TestingPlatform::Kitchensink => { - let _ = cached_compiler - .compile_contracts::( - metadata, - metadata.metadata_file_path.as_path(), - &mode, - config, - None, - |_, _, _, _, _| {}, - |_, _, _, _| {}, - ) - .await; - } - } - } - }); - let _ = join!(compilation_task, report_aggregator_task); +/// this represents a single "test"; a mode, path and collection of cases. +#[derive(Clone)] +struct Test<'a, L: Platform, F: Platform> { + metadata: &'a MetadataFile, + metadata_file_path: &'a Path, + mode: Cow<'a, Mode>, + case_idx: CaseIdx, + case: &'a Case, + leader_node: &'a ::Blockchain, + follower_node: &'a ::Blockchain, + leader_compiler: L::Compiler, + follower_compiler: F::Compiler, + reporter: TestSpecificReporter, } + +impl<'a, L: Platform, F: Platform> Test<'a, L, F> { + /// Checks if this test can be ran with the current configuration. + pub fn check_compatibility(&self) -> TestCheckFunctionResult { + self.check_metadata_file_ignored()?; + self.check_case_file_ignored()?; + self.check_target_compatibility()?; + self.check_evm_version_compatibility()?; + self.check_compiler_compatibility()?; + Ok(()) + } + + /// Checks if the metadata file is ignored or not. + fn check_metadata_file_ignored(&self) -> TestCheckFunctionResult { + if self.metadata.ignore.is_some_and(|ignore| ignore) { + Err(("Metadata file is ignored.", indexmap! {})) + } else { + Ok(()) + } + } + + /// Checks if the case file is ignored or not. + fn check_case_file_ignored(&self) -> TestCheckFunctionResult { + if self.case.ignore.is_some_and(|ignore| ignore) { + Err(("Case is ignored.", indexmap! {})) + } else { + Ok(()) + } + } + + /// Checks if the leader and the follower both support the desired targets in the metadata file. + fn check_target_compatibility(&self) -> TestCheckFunctionResult { + let leader_support = + ::matches_target(self.metadata.targets.as_deref()); + let follower_support = + ::matches_target(self.metadata.targets.as_deref()); + let is_allowed = leader_support && follower_support; + + if is_allowed { + Ok(()) + } else { + Err(( + "Either the leader or the follower do not support the target desired by the test.", + indexmap! { + "test_desired_targets" => json!(self.metadata.targets.as_ref()), + "leader_support" => json!(leader_support), + "follower_support" => json!(follower_support), + }, + )) + } + } + + // Checks for the compatibility of the EVM version with the leader and follower nodes. + fn check_evm_version_compatibility(&self) -> TestCheckFunctionResult { + let Some(evm_version_requirement) = self.metadata.required_evm_version else { + return Ok(()); + }; + + let leader_support = evm_version_requirement + .matches(&::evm_version()); + let follower_support = evm_version_requirement + .matches(&::evm_version()); + let is_allowed = leader_support && follower_support; + + if is_allowed { + Ok(()) + } else { + Err(( + "EVM version is incompatible with either the leader or the follower.", + indexmap! { + "test_desired_evm_version" => json!(self.metadata.required_evm_version), + "leader_support" => json!(leader_support), + "follower_support" => json!(follower_support), + }, + )) + } + } + + /// Checks if the leader and follower compilers support the mode that the test is for. + fn check_compiler_compatibility(&self) -> TestCheckFunctionResult { + let leader_support = self + .leader_compiler + .supports_mode(self.mode.optimize_setting, self.mode.pipeline); + let follower_support = self + .follower_compiler + .supports_mode(self.mode.optimize_setting, self.mode.pipeline); + let is_allowed = leader_support && follower_support; + + if is_allowed { + Ok(()) + } else { + Err(( + "Compilers do not support this mode either for the leader or for the follower.", + indexmap! { + "mode" => json!(self.mode), + "leader_support" => json!(leader_support), + "follower_support" => json!(follower_support), + }, + )) + } + } +} + +type TestCheckFunctionResult = Result<(), (&'static str, IndexMap<&'static str, Value>)>; diff --git a/crates/format/src/case.rs b/crates/format/src/case.rs index c98ac46..93ddd4b 100644 --- a/crates/format/src/case.rs +++ b/crates/format/src/case.rs @@ -64,7 +64,7 @@ impl Case { pub fn solc_modes(&self) -> Vec { match &self.modes { Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(), - None => Mode::all().collect(), + None => Mode::all().cloned().collect(), } } } diff --git a/crates/format/src/metadata.rs b/crates/format/src/metadata.rs index 66985a5..53a26a6 100644 --- a/crates/format/src/metadata.rs +++ b/crates/format/src/metadata.rs @@ -99,7 +99,7 @@ impl Metadata { pub fn solc_modes(&self) -> Vec { match &self.modes { Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(), - None => Mode::all().collect(), + None => Mode::all().cloned().collect(), } } diff --git a/crates/format/src/mode.rs b/crates/format/src/mode.rs index a89b2cb..52b9f75 100644 --- a/crates/format/src/mode.rs +++ b/crates/format/src/mode.rs @@ -1,5 +1,6 @@ use anyhow::Context; use regex::Regex; +use revive_dt_common::iterators::EitherIter; use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline}; use serde::{Deserialize, Serialize}; use std::collections::HashSet; @@ -176,27 +177,6 @@ impl ParsedMode { } } -/// An iterator that could be either of two iterators. -#[derive(Clone, Debug)] -enum EitherIter { - A(A), - B(B), -} - -impl Iterator for EitherIter -where - A: Iterator, - B: Iterator, -{ - type Item = A::Item; - fn next(&mut self) -> Option { - match self { - EitherIter::A(iter) => iter.next(), - EitherIter::B(iter) => iter.next(), - } - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/crates/report/src/aggregator.rs b/crates/report/src/aggregator.rs index 913b0d5..9b9670d 100644 --- a/crates/report/src/aggregator.rs +++ b/crates/report/src/aggregator.rs @@ -340,21 +340,13 @@ impl ReportAggregator { &mut self, event: PreLinkContractsCompilationFailedEvent, ) { - let include_input = self.report.config.report_include_compiler_input; - let execution_information = self.execution_information(&event.execution_specifier); - let compiler_input = if include_input { - event.compiler_input - } else { - None - }; - execution_information.pre_link_compilation_status = Some(CompilationStatus::Failure { reason: event.reason, compiler_version: event.compiler_version, compiler_path: event.compiler_path, - compiler_input, + compiler_input: event.compiler_input, }); } @@ -362,21 +354,13 @@ impl ReportAggregator { &mut self, event: PostLinkContractsCompilationFailedEvent, ) { - let include_input = self.report.config.report_include_compiler_input; - let execution_information = self.execution_information(&event.execution_specifier); - let compiler_input = if include_input { - event.compiler_input - } else { - None - }; - execution_information.post_link_compilation_status = Some(CompilationStatus::Failure { reason: event.reason, compiler_version: event.compiler_version, compiler_path: event.compiler_path, - compiler_input, + compiler_input: event.compiler_input, }); } diff --git a/crates/solc-binaries/src/cache.rs b/crates/solc-binaries/src/cache.rs index 67d8f9e..908616f 100644 --- a/crates/solc-binaries/src/cache.rs +++ b/crates/solc-binaries/src/cache.rs @@ -9,6 +9,7 @@ use std::{ sync::LazyLock, }; +use semver::Version; use tokio::sync::Mutex; use crate::download::SolcDownloader; @@ -20,7 +21,7 @@ pub(crate) static SOLC_CACHER: LazyLock>> = LazyLock::new pub(crate) async fn get_or_download( working_directory: &Path, downloader: &SolcDownloader, -) -> anyhow::Result { +) -> anyhow::Result<(Version, PathBuf)> { let target_directory = working_directory .join(SOLC_CACHE_DIRECTORY) .join(downloader.version.to_string()); @@ -29,7 +30,7 @@ pub(crate) async fn get_or_download( let mut cache = SOLC_CACHER.lock().await; if cache.contains(&target_file) { tracing::debug!("using cached solc: {}", target_file.display()); - return Ok(target_file); + return Ok((downloader.version.clone(), target_file)); } create_dir_all(&target_directory).with_context(|| { @@ -48,7 +49,7 @@ pub(crate) async fn get_or_download( })?; cache.insert(target_file.clone()); - Ok(target_file) + Ok((downloader.version.clone(), target_file)) } async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::Result<()> { diff --git a/crates/solc-binaries/src/lib.rs b/crates/solc-binaries/src/lib.rs index 8bf4134..1656cdb 100644 --- a/crates/solc-binaries/src/lib.rs +++ b/crates/solc-binaries/src/lib.rs @@ -10,6 +10,7 @@ use cache::get_or_download; use download::SolcDownloader; use revive_dt_common::types::VersionOrRequirement; +use semver::Version; pub mod cache; pub mod download; @@ -24,7 +25,7 @@ pub async fn download_solc( cache_directory: &Path, version: impl Into, wasm: bool, -) -> anyhow::Result { +) -> anyhow::Result<(Version, PathBuf)> { let downloader = if wasm { SolcDownloader::wasm(version).await } else if cfg!(target_os = "linux") {