Compare commits

..

2 Commits

Author SHA1 Message Date
Omar Abdulla 59f439b5f8 Update the kitchensink tests 2025-08-25 18:12:01 +03:00
Omar Abdulla 8d1523fd77 Configure kitchensink to use devnode by default 2025-08-25 17:43:52 +03:00
36 changed files with 1549 additions and 2077 deletions
+1 -3
View File
@@ -8,6 +8,4 @@ node_modules
# added to the .gitignore file. # added to the .gitignore file.
*.log *.log
profile.json.gz profile.json.gz
resolc-compiler-tests
workdir
Generated
+5 -5
View File
@@ -4501,12 +4501,9 @@ name = "revive-dt-config"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"alloy", "alloy",
"anyhow",
"clap", "clap",
"semver 1.0.26", "semver 1.0.26",
"serde", "serde",
"serde_json",
"strum",
"temp-dir", "temp-dir",
] ]
@@ -4521,6 +4518,7 @@ dependencies = [
"clap", "clap",
"futures", "futures",
"indexmap 2.10.0", "indexmap 2.10.0",
"once_cell",
"revive-dt-common", "revive-dt-common",
"revive-dt-compiler", "revive-dt-compiler",
"revive-dt-config", "revive-dt-config",
@@ -4531,6 +4529,8 @@ dependencies = [
"semver 1.0.26", "semver 1.0.26",
"serde", "serde",
"serde_json", "serde_json",
"temp-dir",
"tempfile",
"tokio", "tokio",
"tracing", "tracing",
"tracing-appender", "tracing-appender",
@@ -5692,9 +5692,9 @@ checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
[[package]] [[package]]
name = "strum" name = "strum"
version = "0.27.2" version = "0.27.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" checksum = "f64def088c51c9510a8579e3c5d67c65349dcf755e5479ad3d010aa6454e2c32"
dependencies = [ dependencies = [
"strum_macros", "strum_macros",
] ]
-1
View File
@@ -48,7 +48,6 @@ serde_with = { version = "3.14.0" }
sha2 = { version = "0.10.9" } sha2 = { version = "0.10.9" }
sp-core = "36.1.0" sp-core = "36.1.0"
sp-runtime = "41.1.0" sp-runtime = "41.1.0"
strum = { version = "0.27.2", features = ["derive"] }
temp-dir = { version = "0.1.16" } temp-dir = { version = "0.1.16" }
tempfile = "3.3" tempfile = "3.3"
thiserror = "2" thiserror = "2"
+3 -4
View File
@@ -187,11 +187,10 @@ The above corpus file instructs the tool to look for all of the test cases conta
The simplest command to run this tool is the following: The simplest command to run this tool is the following:
```bash ```bash
RUST_LOG="info" cargo run --release -- execute-tests \ RUST_LOG="info" cargo run --release -- \
--follower geth \
--corpus path_to_your_corpus_file.json \ --corpus path_to_your_corpus_file.json \
--working-directory path_to_a_temporary_directory_to_cache_things_in \ --workdir path_to_a_temporary_directory_to_cache_things_in \
--concurrency.number-of-nodes 5 \ --number-of-nodes 5 \
> logs.log \ > logs.log \
2> output.log 2> output.log
``` ```
+5 -14
View File
@@ -3,28 +3,19 @@ use std::{
path::Path, path::Path,
}; };
use anyhow::{Context, Result}; use anyhow::Result;
/// This method clears the passed directory of all of the files and directories contained within /// This method clears the passed directory of all of the files and directories contained within
/// without deleting the directory. /// without deleting the directory.
pub fn clear_directory(path: impl AsRef<Path>) -> Result<()> { pub fn clear_directory(path: impl AsRef<Path>) -> Result<()> {
for entry in read_dir(path.as_ref()) for entry in read_dir(path.as_ref())? {
.with_context(|| format!("Failed to read directory: {}", path.as_ref().display()))? let entry = entry?;
{
let entry = entry.with_context(|| {
format!(
"Failed to read an entry in directory: {}",
path.as_ref().display()
)
})?;
let entry_path = entry.path(); let entry_path = entry.path();
if entry_path.is_file() { if entry_path.is_file() {
remove_file(&entry_path) remove_file(entry_path)?
.with_context(|| format!("Failed to remove file: {}", entry_path.display()))?
} else { } else {
remove_dir_all(&entry_path) remove_dir_all(entry_path)?
.with_context(|| format!("Failed to remove directory: {}", entry_path.display()))?
} }
} }
Ok(()) Ok(())
+2 -5
View File
@@ -1,7 +1,7 @@
use std::ops::ControlFlow; use std::ops::ControlFlow;
use std::time::Duration; use std::time::Duration;
use anyhow::{Context as _, Result, anyhow}; use anyhow::{Result, anyhow};
const EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION: Duration = Duration::from_secs(60); const EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION: Duration = Duration::from_secs(60);
@@ -38,10 +38,7 @@ where
)); ));
} }
match future() match future().await? {
.await
.context("Polled future returned an error during polling loop")?
{
ControlFlow::Continue(()) => { ControlFlow::Continue(()) => {
let next_wait_duration = match polling_wait_behavior { let next_wait_duration = match polling_wait_behavior {
PollingWaitBehavior::Constant(duration) => duration, PollingWaitBehavior::Constant(duration) => duration,
@@ -1,21 +0,0 @@
/// An iterator that could be either of two iterators.
#[derive(Clone, Debug)]
pub enum EitherIter<A, B> {
A(A),
B(B),
}
impl<A, B, T> Iterator for EitherIter<A, B>
where
A: Iterator<Item = T>,
B: Iterator<Item = T>,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
match self {
EitherIter::A(iter) => iter.next(),
EitherIter::B(iter) => iter.next(),
}
}
}
-2
View File
@@ -1,5 +1,3 @@
mod either_iter;
mod files_with_extension_iterator; mod files_with_extension_iterator;
pub use either_iter::*;
pub use files_with_extension_iterator::*; pub use files_with_extension_iterator::*;
+8 -14
View File
@@ -3,7 +3,6 @@ use semver::Version;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::fmt::Display; use std::fmt::Display;
use std::str::FromStr; use std::str::FromStr;
use std::sync::LazyLock;
/// This represents a mode that a given test should be run with, if possible. /// This represents a mode that a given test should be run with, if possible.
/// ///
@@ -35,19 +34,14 @@ impl Display for Mode {
impl Mode { impl Mode {
/// Return all of the available mode combinations. /// Return all of the available mode combinations.
pub fn all() -> impl Iterator<Item = &'static Mode> { pub fn all() -> impl Iterator<Item = Mode> {
static ALL_MODES: LazyLock<Vec<Mode>> = LazyLock::new(|| { ModePipeline::test_cases().flat_map(|pipeline| {
ModePipeline::test_cases() ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode {
.flat_map(|pipeline| { pipeline,
ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode { optimize_setting,
pipeline, version: None,
optimize_setting, })
version: None, })
})
})
.collect::<Vec<_>>()
});
ALL_MODES.iter()
} }
/// Resolves the [`Mode`]'s solidity version requirement into a [`VersionOrRequirement`] if /// Resolves the [`Mode`]'s solidity version requirement into a [`VersionOrRequirement`] if
+4
View File
@@ -0,0 +1,4 @@
use semver::Version;
/// This is the first version of solc that supports the `--via-ir` flag / "viaIR" input JSON.
pub const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13);
+56 -38
View File
@@ -3,6 +3,8 @@
//! - Polkadot revive resolc compiler //! - Polkadot revive resolc compiler
//! - Polkadot revive Wasm compiler //! - Polkadot revive Wasm compiler
mod constants;
use std::{ use std::{
collections::HashMap, collections::HashMap,
hash::Hash, hash::Hash,
@@ -11,14 +13,13 @@ use std::{
use alloy::json_abi::JsonAbi; use alloy::json_abi::JsonAbi;
use alloy_primitives::Address; use alloy_primitives::Address;
use anyhow::{Context as _, Result};
use semver::Version; use semver::Version;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use revive_common::EVMVersion; use revive_common::EVMVersion;
use revive_dt_common::cached_fs::read_to_string; use revive_dt_common::cached_fs::read_to_string;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration}; use revive_dt_config::Arguments;
// Re-export this as it's a part of the compiler interface. // Re-export this as it's a part of the compiler interface.
pub use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline}; pub use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
@@ -28,38 +29,36 @@ pub mod revive_resolc;
pub mod solc; pub mod solc;
/// A common interface for all supported Solidity compilers. /// A common interface for all supported Solidity compilers.
pub trait SolidityCompiler: Sized { pub trait SolidityCompiler {
/// Instantiates a new compiler object. /// Extra options specific to the compiler.
/// type Options: Default + PartialEq + Eq + Hash;
/// Based on the given [`Context`] and [`VersionOrRequirement`] this function instantiates a
/// new compiler object. Certain implementations of this trait might choose to cache cache the
/// compiler objects and return the same ones over and over again.
fn new(
context: impl AsRef<SolcConfiguration>
+ AsRef<ResolcConfiguration>
+ AsRef<WorkingDirectoryConfiguration>,
version: impl Into<Option<VersionOrRequirement>>,
) -> impl Future<Output = Result<Self>>;
/// Returns the version of the compiler.
fn version(&self) -> &Version;
/// Returns the path of the compiler executable.
fn path(&self) -> &Path;
/// The low-level compiler interface. /// The low-level compiler interface.
fn build(&self, input: CompilerInput) -> impl Future<Output = Result<CompilerOutput>>; fn build(
/// Does the compiler support the provided mode and version settings.
fn supports_mode(
&self, &self,
optimizer_setting: ModeOptimizerSetting, input: CompilerInput,
additional_options: Self::Options,
) -> impl Future<Output = anyhow::Result<CompilerOutput>>;
fn new(solc_executable: PathBuf) -> Self;
fn get_compiler_executable(
config: &Arguments,
version: impl Into<VersionOrRequirement>,
) -> impl Future<Output = anyhow::Result<PathBuf>>;
fn version(&self) -> impl Future<Output = anyhow::Result<Version>>;
/// Does the compiler support the provided mode and version settings?
fn supports_mode(
compiler_version: &Version,
optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline, pipeline: ModePipeline,
) -> bool; ) -> bool;
} }
/// The generic compilation input configuration. /// The generic compilation input configuration.
#[derive(Clone, Debug, Default, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CompilerInput { pub struct CompilerInput {
pub pipeline: Option<ModePipeline>, pub pipeline: Option<ModePipeline>,
pub optimization: Option<ModeOptimizerSetting>, pub optimization: Option<ModeOptimizerSetting>,
@@ -80,12 +79,21 @@ pub struct CompilerOutput {
} }
/// A generic builder style interface for configuring the supported compiler options. /// A generic builder style interface for configuring the supported compiler options.
#[derive(Default)] pub struct Compiler<T: SolidityCompiler> {
pub struct Compiler {
input: CompilerInput, input: CompilerInput,
additional_options: T::Options,
} }
impl Compiler { impl Default for Compiler<solc::Solc> {
fn default() -> Self {
Self::new()
}
}
impl<T> Compiler<T>
where
T: SolidityCompiler,
{
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
input: CompilerInput { input: CompilerInput {
@@ -98,6 +106,7 @@ impl Compiler {
libraries: Default::default(), libraries: Default::default(),
revert_string_handling: Default::default(), revert_string_handling: Default::default(),
}, },
additional_options: T::Options::default(),
} }
} }
@@ -126,11 +135,10 @@ impl Compiler {
self self
} }
pub fn with_source(mut self, path: impl AsRef<Path>) -> Result<Self> { pub fn with_source(mut self, path: impl AsRef<Path>) -> anyhow::Result<Self> {
self.input.sources.insert( self.input
path.as_ref().to_path_buf(), .sources
read_to_string(path.as_ref()).context("Failed to read the contract source")?, .insert(path.as_ref().to_path_buf(), read_to_string(path.as_ref())?);
);
Ok(self) Ok(self)
} }
@@ -156,6 +164,11 @@ impl Compiler {
self self
} }
pub fn with_additional_options(mut self, options: impl Into<T::Options>) -> Self {
self.additional_options = options.into();
self
}
pub fn then(self, callback: impl FnOnce(Self) -> Self) -> Self { pub fn then(self, callback: impl FnOnce(Self) -> Self) -> Self {
callback(self) callback(self)
} }
@@ -164,12 +177,17 @@ impl Compiler {
callback(self) callback(self)
} }
pub async fn try_build(self, compiler: &impl SolidityCompiler) -> Result<CompilerOutput> { pub async fn try_build(
compiler.build(self.input).await self,
compiler_path: impl AsRef<Path>,
) -> anyhow::Result<CompilerOutput> {
T::new(compiler_path.as_ref().to_path_buf())
.build(self.input, self.additional_options)
.await
} }
pub fn input(&self) -> &CompilerInput { pub fn input(&self) -> CompilerInput {
&self.input self.input.clone()
} }
} }
+130 -118
View File
@@ -3,78 +3,39 @@
use std::{ use std::{
path::PathBuf, path::PathBuf,
process::Stdio, process::{Command, Stdio},
sync::{Arc, LazyLock}, sync::LazyLock,
}; };
use dashmap::DashMap; use dashmap::DashMap;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration}; use revive_dt_config::Arguments;
use revive_solc_json_interface::{ use revive_solc_json_interface::{
SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings, SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection, SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection,
SolcStandardJsonOutput, SolcStandardJsonOutput,
}; };
use crate::{ use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler, solc::Solc,
};
use alloy::json_abi::JsonAbi; use alloy::json_abi::JsonAbi;
use anyhow::{Context as _, Result}; use anyhow::Context;
use semver::Version; use semver::Version;
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand}; use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
/// A wrapper around the `resolc` binary, emitting PVM-compatible bytecode. // TODO: I believe that we need to also pass the solc compiler to resolc so that resolc uses the
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] // specified solc compiler. I believe that currently we completely ignore the specified solc binary
pub struct Resolc(Arc<ResolcInner>); // when invoking resolc which doesn't seem right if we're using solc as a compiler frontend.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] /// A wrapper around the `resolc` binary, emitting PVM-compatible bytecode.
struct ResolcInner { #[derive(Debug)]
/// The internal solc compiler that the resolc compiler uses as a compiler frontend. pub struct Resolc {
solc: Solc,
/// Path to the `resolc` executable /// Path to the `resolc` executable
resolc_path: PathBuf, resolc_path: PathBuf,
} }
impl SolidityCompiler for Resolc { impl SolidityCompiler for Resolc {
async fn new( type Options = Vec<String>;
context: impl AsRef<SolcConfiguration>
+ AsRef<ResolcConfiguration>
+ AsRef<WorkingDirectoryConfiguration>,
version: impl Into<Option<VersionOrRequirement>>,
) -> Result<Self> {
/// This is a cache of all of the resolc compiler objects. Since we do not currently support
/// multiple resolc compiler versions, so our cache is just keyed by the solc compiler and
/// its version to the resolc compiler.
static COMPILERS_CACHE: LazyLock<DashMap<Solc, Resolc>> = LazyLock::new(Default::default);
let resolc_configuration = AsRef::<ResolcConfiguration>::as_ref(&context);
let solc = Solc::new(&context, version)
.await
.context("Failed to create the solc compiler frontend for resolc")?;
Ok(COMPILERS_CACHE
.entry(solc.clone())
.or_insert_with(|| {
Self(Arc::new(ResolcInner {
solc,
resolc_path: resolc_configuration.path.clone(),
}))
})
.clone())
}
fn version(&self) -> &Version {
// We currently return the solc compiler version since we do not support multiple resolc
// compiler versions.
self.0.solc.version()
}
fn path(&self) -> &std::path::Path {
&self.0.resolc_path
}
#[tracing::instrument(level = "debug", ret)] #[tracing::instrument(level = "debug", ret)]
async fn build( async fn build(
@@ -91,7 +52,8 @@ impl SolidityCompiler for Resolc {
// resolc. So, we need to go back to this later once it's supported. // resolc. So, we need to go back to this later once it's supported.
revert_string_handling: _, revert_string_handling: _,
}: CompilerInput, }: CompilerInput,
) -> Result<CompilerOutput> { additional_options: Self::Options,
) -> anyhow::Result<CompilerOutput> {
if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) { if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) {
anyhow::bail!( anyhow::bail!(
"Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}" "Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}"
@@ -138,7 +100,7 @@ impl SolidityCompiler for Resolc {
}, },
}; };
let mut command = AsyncCommand::new(self.path()); let mut command = AsyncCommand::new(&self.resolc_path);
command command
.stdin(Stdio::piped()) .stdin(Stdio::piped())
.stdout(Stdio::piped()) .stdout(Stdio::piped())
@@ -157,28 +119,18 @@ impl SolidityCompiler for Resolc {
.join(","), .join(","),
); );
} }
let mut child = command let mut child = command.spawn()?;
.spawn()
.with_context(|| format!("Failed to spawn resolc at {}", self.path().display()))?;
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped"); let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
let serialized_input = serde_json::to_vec(&input) let serialized_input = serde_json::to_vec(&input)?;
.context("Failed to serialize Standard JSON input for resolc")?; stdin_pipe.write_all(&serialized_input).await?;
stdin_pipe
.write_all(&serialized_input)
.await
.context("Failed to write Standard JSON to resolc stdin")?;
let output = child let output = child.wait_with_output().await?;
.wait_with_output()
.await
.context("Failed while waiting for resolc process to finish")?;
let stdout = output.stdout; let stdout = output.stdout;
let stderr = output.stderr; let stderr = output.stderr;
if !output.status.success() { if !output.status.success() {
let json_in = serde_json::to_string_pretty(&input) let json_in = serde_json::to_string_pretty(&input)?;
.context("Failed to pretty-print Standard JSON input for logging")?;
let message = String::from_utf8_lossy(&stderr); let message = String::from_utf8_lossy(&stderr);
tracing::error!( tracing::error!(
status = %output.status, status = %output.status,
@@ -189,14 +141,12 @@ impl SolidityCompiler for Resolc {
anyhow::bail!("Compilation failed with an error: {message}"); anyhow::bail!("Compilation failed with an error: {message}");
} }
let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout) let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout).map_err(|e| {
.map_err(|e| { anyhow::anyhow!(
anyhow::anyhow!( "failed to parse resolc JSON output: {e}\nstderr: {}",
"failed to parse resolc JSON output: {e}\nstderr: {}", String::from_utf8_lossy(&stderr)
String::from_utf8_lossy(&stderr) )
) })?;
})
.context("Failed to parse resolc standard JSON output")?;
tracing::debug!( tracing::debug!(
output = %serde_json::to_string(&parsed).unwrap(), output = %serde_json::to_string(&parsed).unwrap(),
@@ -223,10 +173,7 @@ impl SolidityCompiler for Resolc {
let mut compiler_output = CompilerOutput::default(); let mut compiler_output = CompilerOutput::default();
for (source_path, contracts) in contracts.into_iter() { for (source_path, contracts) in contracts.into_iter() {
let src_for_msg = source_path.clone(); let source_path = PathBuf::from(source_path).canonicalize()?;
let source_path = PathBuf::from(source_path)
.canonicalize()
.with_context(|| format!("Failed to canonicalize path {src_for_msg}"))?;
let map = compiler_output.contracts.entry(source_path).or_default(); let map = compiler_output.contracts.entry(source_path).or_default();
for (contract_name, contract_information) in contracts.into_iter() { for (contract_name, contract_information) in contracts.into_iter() {
@@ -234,41 +181,23 @@ impl SolidityCompiler for Resolc {
.evm .evm
.and_then(|evm| evm.bytecode.clone()) .and_then(|evm| evm.bytecode.clone())
.context("Unexpected - Contract compiled with resolc has no bytecode")?; .context("Unexpected - Contract compiled with resolc has no bytecode")?;
let abi = { let abi = contract_information
let metadata = contract_information .metadata
.metadata .as_ref()
.as_ref() .and_then(|metadata| metadata.as_object())
.context("No metadata found for the contract")?; .and_then(|metadata| metadata.get("solc_metadata"))
let solc_metadata_str = match metadata { .and_then(|solc_metadata| solc_metadata.as_str())
serde_json::Value::String(solc_metadata_str) => solc_metadata_str.as_str(), .and_then(|metadata| serde_json::from_str::<serde_json::Value>(metadata).ok())
serde_json::Value::Object(metadata_object) => { .and_then(|metadata| {
let solc_metadata_value = metadata_object metadata.get("output").and_then(|output| {
.get("solc_metadata") output
.context("Contract doesn't have a 'solc_metadata' field")?; .get("abi")
solc_metadata_value .and_then(|abi| serde_json::from_value::<JsonAbi>(abi.clone()).ok())
.as_str() })
.context("The 'solc_metadata' field is not a string")? })
} .context(
serde_json::Value::Null "Unexpected - Failed to get the ABI for a contract compiled with resolc",
| serde_json::Value::Bool(_) )?;
| serde_json::Value::Number(_)
| serde_json::Value::Array(_) => {
anyhow::bail!("Unsupported type of metadata {metadata:?}")
}
};
let solc_metadata =
serde_json::from_str::<serde_json::Value>(solc_metadata_str).context(
"Failed to deserialize the solc_metadata as a serde_json generic value",
)?;
let output_value = solc_metadata
.get("output")
.context("solc_metadata doesn't have an output field")?;
let abi_value = output_value
.get("abi")
.context("solc_metadata output doesn't contain an abi field")?;
serde_json::from_value::<JsonAbi>(abi_value.clone())
.context("ABI found in solc_metadata output is not valid ABI")?
};
map.insert(contract_name, (bytecode.object, abi)); map.insert(contract_name, (bytecode.object, abi));
} }
} }
@@ -276,11 +205,94 @@ impl SolidityCompiler for Resolc {
Ok(compiler_output) Ok(compiler_output)
} }
fn new(resolc_path: PathBuf) -> Self {
Resolc { resolc_path }
}
async fn get_compiler_executable(
config: &Arguments,
_version: impl Into<VersionOrRequirement>,
) -> anyhow::Result<PathBuf> {
if !config.resolc.as_os_str().is_empty() {
return Ok(config.resolc.clone());
}
Ok(PathBuf::from("resolc"))
}
async fn version(&self) -> anyhow::Result<semver::Version> {
/// This is a cache of the path of the compiler to the version number of the compiler. We
/// choose to cache the version in this way rather than through a field on the struct since
/// compiler objects are being created all the time from the path and the compiler object is
/// not reused over time.
static VERSION_CACHE: LazyLock<DashMap<PathBuf, Version>> = LazyLock::new(Default::default);
match VERSION_CACHE.entry(self.resolc_path.clone()) {
dashmap::Entry::Occupied(occupied_entry) => Ok(occupied_entry.get().clone()),
dashmap::Entry::Vacant(vacant_entry) => {
let output = Command::new(self.resolc_path.as_path())
.arg("--version")
.stdout(Stdio::piped())
.spawn()?
.wait_with_output()?
.stdout;
let output = String::from_utf8_lossy(&output);
let version_string = output
.split("version ")
.nth(1)
.context("Version parsing failed")?
.split("+")
.next()
.context("Version parsing failed")?;
let version = Version::parse(version_string)?;
vacant_entry.insert(version.clone());
Ok(version)
}
}
}
fn supports_mode( fn supports_mode(
&self, _compiler_version: &Version,
optimize_setting: ModeOptimizerSetting, _optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline, pipeline: ModePipeline,
) -> bool { ) -> bool {
pipeline == ModePipeline::ViaYulIR && self.0.solc.supports_mode(optimize_setting, pipeline) // We only support the Y (IE compile via Yul IR) mode here, which also means that we can
// only use solc version 0.8.13 and above. We must always compile via Yul IR as resolc
// needs this to translate to LLVM IR and then RISCV.
// Note: the original implementation of this function looked like the following:
// ```
// pipeline == ModePipeline::ViaYulIR && compiler_version >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR
// ```
// However, that implementation is sadly incorrect since the version that's passed into this
// function is not the version of solc but the version of resolc. This is despite the fact
// that resolc depends on Solc for the initial Yul codegen. Therefore, we have skipped the
// version check until we do a better integrations between resolc and solc.
pipeline == ModePipeline::ViaYulIR
}
}
#[cfg(test)]
mod test {
use super::*;
#[tokio::test]
async fn compiler_version_can_be_obtained() {
// Arrange
let args = Arguments::default();
let path = Resolc::get_compiler_executable(&args, Version::new(0, 7, 6))
.await
.unwrap();
let compiler = Resolc::new(path);
// Act
let version = compiler.version().await;
// Assert
let _ = version.expect("Failed to get version");
} }
} }
+121 -96
View File
@@ -3,18 +3,19 @@
use std::{ use std::{
path::PathBuf, path::PathBuf,
process::Stdio, process::{Command, Stdio},
sync::{Arc, LazyLock}, sync::LazyLock,
}; };
use dashmap::DashMap; use dashmap::DashMap;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration}; use revive_dt_config::Arguments;
use revive_dt_solc_binaries::download_solc; use revive_dt_solc_binaries::download_solc;
use super::constants::SOLC_VERSION_SUPPORTING_VIA_YUL_IR;
use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler}; use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
use anyhow::{Context as _, Result}; use anyhow::Context;
use foundry_compilers_artifacts::{ use foundry_compilers_artifacts::{
output_selection::{ output_selection::{
BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection, BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection,
@@ -25,64 +26,13 @@ use foundry_compilers_artifacts::{
use semver::Version; use semver::Version;
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand}; use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Debug)]
pub struct Solc(Arc<SolcInner>); pub struct Solc {
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
struct SolcInner {
/// The path of the solidity compiler executable that this object uses.
solc_path: PathBuf, solc_path: PathBuf,
/// The version of the solidity compiler executable that this object uses.
solc_version: Version,
} }
impl SolidityCompiler for Solc { impl SolidityCompiler for Solc {
async fn new( type Options = ();
context: impl AsRef<SolcConfiguration>
+ AsRef<ResolcConfiguration>
+ AsRef<WorkingDirectoryConfiguration>,
version: impl Into<Option<VersionOrRequirement>>,
) -> Result<Self> {
// This is a cache for the compiler objects so that whenever the same compiler version is
// requested the same object is returned. We do this as we do not want to keep cloning the
// compiler around.
static COMPILERS_CACHE: LazyLock<DashMap<(PathBuf, Version), Solc>> =
LazyLock::new(Default::default);
let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
let solc_configuration = AsRef::<SolcConfiguration>::as_ref(&context);
// We attempt to download the solc binary. Note the following: this call does the version
// resolution for us. Therefore, even if the download didn't proceed, this function will
// resolve the version requirement into a canonical version of the compiler. It's then up
// to us to either use the provided path or not.
let version = version
.into()
.unwrap_or_else(|| solc_configuration.version.clone().into());
let (version, path) =
download_solc(working_directory_configuration.as_path(), version, false)
.await
.context("Failed to download/get path to solc binary")?;
Ok(COMPILERS_CACHE
.entry((path.clone(), version.clone()))
.or_insert_with(|| {
Self(Arc::new(SolcInner {
solc_path: path,
solc_version: version,
}))
})
.clone())
}
fn version(&self) -> &Version {
&self.0.solc_version
}
fn path(&self) -> &std::path::Path {
&self.0.solc_path
}
#[tracing::instrument(level = "debug", ret)] #[tracing::instrument(level = "debug", ret)]
async fn build( async fn build(
@@ -97,12 +47,15 @@ impl SolidityCompiler for Solc {
libraries, libraries,
revert_string_handling, revert_string_handling,
}: CompilerInput, }: CompilerInput,
) -> Result<CompilerOutput> { _: Self::Options,
) -> anyhow::Result<CompilerOutput> {
let compiler_supports_via_ir = self.version().await? >= SOLC_VERSION_SUPPORTING_VIA_YUL_IR;
// Be careful to entirely omit the viaIR field if the compiler does not support it, // Be careful to entirely omit the viaIR field if the compiler does not support it,
// as it will error if you provide fields it does not know about. Because // as it will error if you provide fields it does not know about. Because
// `supports_mode` is called prior to instantiating a compiler, we should never // `supports_mode` is called prior to instantiating a compiler, we should never
// ask for something which is invalid. // ask for something which is invalid.
let via_ir = match (pipeline, self.compiler_supports_yul()) { let via_ir = match (pipeline, compiler_supports_via_ir) {
(pipeline, true) => pipeline.map(|p| p.via_yul_ir()), (pipeline, true) => pipeline.map(|p| p.via_yul_ir()),
(_pipeline, false) => None, (_pipeline, false) => None,
}; };
@@ -162,7 +115,7 @@ impl SolidityCompiler for Solc {
}, },
}; };
let mut command = AsyncCommand::new(self.path()); let mut command = AsyncCommand::new(&self.solc_path);
command command
.stdin(Stdio::piped()) .stdin(Stdio::piped())
.stdout(Stdio::piped()) .stdout(Stdio::piped())
@@ -181,25 +134,15 @@ impl SolidityCompiler for Solc {
.join(","), .join(","),
); );
} }
let mut child = command let mut child = command.spawn()?;
.spawn()
.with_context(|| format!("Failed to spawn solc at {}", self.path().display()))?;
let stdin = child.stdin.as_mut().expect("should be piped"); let stdin = child.stdin.as_mut().expect("should be piped");
let serialized_input = serde_json::to_vec(&input) let serialized_input = serde_json::to_vec(&input)?;
.context("Failed to serialize Standard JSON input for solc")?; stdin.write_all(&serialized_input).await?;
stdin let output = child.wait_with_output().await?;
.write_all(&serialized_input)
.await
.context("Failed to write Standard JSON to solc stdin")?;
let output = child
.wait_with_output()
.await
.context("Failed while waiting for solc process to finish")?;
if !output.status.success() { if !output.status.success() {
let json_in = serde_json::to_string_pretty(&input) let json_in = serde_json::to_string_pretty(&input)?;
.context("Failed to pretty-print Standard JSON input for logging")?;
let message = String::from_utf8_lossy(&output.stderr); let message = String::from_utf8_lossy(&output.stderr);
tracing::error!( tracing::error!(
status = %output.status, status = %output.status,
@@ -210,14 +153,12 @@ impl SolidityCompiler for Solc {
anyhow::bail!("Compilation failed with an error: {message}"); anyhow::bail!("Compilation failed with an error: {message}");
} }
let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout) let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout).map_err(|e| {
.map_err(|e| { anyhow::anyhow!(
anyhow::anyhow!( "failed to parse resolc JSON output: {e}\nstderr: {}",
"failed to parse resolc JSON output: {e}\nstderr: {}", String::from_utf8_lossy(&output.stdout)
String::from_utf8_lossy(&output.stdout) )
) })?;
})
.context("Failed to parse solc standard JSON output")?;
// Detecting if the compiler output contained errors and reporting them through logs and // Detecting if the compiler output contained errors and reporting them through logs and
// errors instead of returning the compiler output that might contain errors. // errors instead of returning the compiler output that might contain errors.
@@ -237,12 +178,7 @@ impl SolidityCompiler for Solc {
for (contract_path, contracts) in parsed.contracts { for (contract_path, contracts) in parsed.contracts {
let map = compiler_output let map = compiler_output
.contracts .contracts
.entry(contract_path.canonicalize().with_context(|| { .entry(contract_path.canonicalize()?)
format!(
"Failed to canonicalize contract path {}",
contract_path.display()
)
})?)
.or_default(); .or_default();
for (contract_name, contract_info) in contracts.into_iter() { for (contract_name, contract_info) in contracts.into_iter() {
let source_code = contract_info let source_code = contract_info
@@ -263,21 +199,110 @@ impl SolidityCompiler for Solc {
Ok(compiler_output) Ok(compiler_output)
} }
fn new(solc_path: PathBuf) -> Self {
Self { solc_path }
}
async fn get_compiler_executable(
config: &Arguments,
version: impl Into<VersionOrRequirement>,
) -> anyhow::Result<PathBuf> {
let path = download_solc(config.directory(), version, config.wasm).await?;
Ok(path)
}
async fn version(&self) -> anyhow::Result<semver::Version> {
/// This is a cache of the path of the compiler to the version number of the compiler. We
/// choose to cache the version in this way rather than through a field on the struct since
/// compiler objects are being created all the time from the path and the compiler object is
/// not reused over time.
static VERSION_CACHE: LazyLock<DashMap<PathBuf, Version>> = LazyLock::new(Default::default);
match VERSION_CACHE.entry(self.solc_path.clone()) {
dashmap::Entry::Occupied(occupied_entry) => Ok(occupied_entry.get().clone()),
dashmap::Entry::Vacant(vacant_entry) => {
// The following is the parsing code for the version from the solc version strings
// which look like the following:
// ```
// solc, the solidity compiler commandline interface
// Version: 0.8.30+commit.73712a01.Darwin.appleclang
// ```
let child = Command::new(self.solc_path.as_path())
.arg("--version")
.stdout(Stdio::piped())
.spawn()?;
let output = child.wait_with_output()?;
let output = String::from_utf8_lossy(&output.stdout);
let version_line = output
.split("Version: ")
.nth(1)
.context("Version parsing failed")?;
let version_string = version_line
.split("+")
.next()
.context("Version parsing failed")?;
let version = Version::parse(version_string)?;
vacant_entry.insert(version.clone());
Ok(version)
}
}
}
fn supports_mode( fn supports_mode(
&self, compiler_version: &Version,
_optimize_setting: ModeOptimizerSetting, _optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline, pipeline: ModePipeline,
) -> bool { ) -> bool {
// solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support mode E // solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support mode E
// (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler is new enough. // (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler is new enough.
pipeline == ModePipeline::ViaEVMAssembly pipeline == ModePipeline::ViaEVMAssembly
|| (pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul()) || (pipeline == ModePipeline::ViaYulIR
&& compiler_version >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR)
} }
} }
impl Solc { #[cfg(test)]
fn compiler_supports_yul(&self) -> bool { mod test {
const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13); use super::*;
self.version() >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR
#[tokio::test]
async fn compiler_version_can_be_obtained() {
// Arrange
let args = Arguments::default();
let path = Solc::get_compiler_executable(&args, Version::new(0, 7, 6))
.await
.unwrap();
let compiler = Solc::new(path);
// Act
let version = compiler.version().await;
// Assert
assert_eq!(
version.expect("Failed to get version"),
Version::new(0, 7, 6)
)
}
#[tokio::test]
async fn compiler_version_can_be_obtained1() {
// Arrange
let args = Arguments::default();
let path = Solc::get_compiler_executable(&args, Version::new(0, 4, 21))
.await
.unwrap();
let compiler = Solc::new(path);
// Act
let version = compiler.version().await;
// Assert
assert_eq!(
version.expect("Failed to get version"),
Version::new(0, 4, 21)
)
} }
} }
+9 -10
View File
@@ -1,25 +1,24 @@
use std::path::PathBuf; use std::path::PathBuf;
use revive_dt_common::types::VersionOrRequirement;
use revive_dt_compiler::{Compiler, SolidityCompiler, revive_resolc::Resolc, solc::Solc}; use revive_dt_compiler::{Compiler, SolidityCompiler, revive_resolc::Resolc, solc::Solc};
use revive_dt_config::ExecutionContext; use revive_dt_config::Arguments;
use semver::Version; use semver::Version;
#[tokio::test] #[tokio::test]
async fn contracts_can_be_compiled_with_solc() { async fn contracts_can_be_compiled_with_solc() {
// Arrange // Arrange
let args = ExecutionContext::default(); let args = Arguments::default();
let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30))) let compiler_path = Solc::get_compiler_executable(&args, Version::new(0, 8, 30))
.await .await
.unwrap(); .unwrap();
// Act // Act
let output = Compiler::new() let output = Compiler::<Solc>::new()
.with_source("./tests/assets/array_one_element/callable.sol") .with_source("./tests/assets/array_one_element/callable.sol")
.unwrap() .unwrap()
.with_source("./tests/assets/array_one_element/main.sol") .with_source("./tests/assets/array_one_element/main.sol")
.unwrap() .unwrap()
.try_build(&solc) .try_build(compiler_path)
.await; .await;
// Assert // Assert
@@ -49,18 +48,18 @@ async fn contracts_can_be_compiled_with_solc() {
#[tokio::test] #[tokio::test]
async fn contracts_can_be_compiled_with_resolc() { async fn contracts_can_be_compiled_with_resolc() {
// Arrange // Arrange
let args = ExecutionContext::default(); let args = Arguments::default();
let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30))) let compiler_path = Resolc::get_compiler_executable(&args, Version::new(0, 8, 30))
.await .await
.unwrap(); .unwrap();
// Act // Act
let output = Compiler::new() let output = Compiler::<Resolc>::new()
.with_source("./tests/assets/array_one_element/callable.sol") .with_source("./tests/assets/array_one_element/callable.sol")
.unwrap() .unwrap()
.with_source("./tests/assets/array_one_element/main.sol") .with_source("./tests/assets/array_one_element/main.sol")
.unwrap() .unwrap()
.try_build(&resolc) .try_build(compiler_path)
.await; .await;
// Assert // Assert
-3
View File
@@ -10,13 +10,10 @@ rust-version.workspace = true
[dependencies] [dependencies]
alloy = { workspace = true } alloy = { workspace = true }
anyhow = { workspace = true }
clap = { workspace = true } clap = { workspace = true }
semver = { workspace = true } semver = { workspace = true }
temp-dir = { workspace = true } temp-dir = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
serde_json = { workspace = true }
strum = { workspace = true }
[lints] [lints]
workspace = true workspace = true
+137 -479
View File
@@ -2,561 +2,219 @@
use std::{ use std::{
fmt::Display, fmt::Display,
fs::read_to_string,
ops::Deref,
path::{Path, PathBuf}, path::{Path, PathBuf},
str::FromStr, sync::LazyLock,
sync::{Arc, LazyLock, OnceLock},
time::Duration,
}; };
use alloy::{ use alloy::{network::EthereumWallet, signers::local::PrivateKeySigner};
genesis::Genesis, use clap::{Parser, ValueEnum};
hex::ToHexExt,
network::EthereumWallet,
primitives::{FixedBytes, U256},
signers::local::PrivateKeySigner,
};
use clap::{Parser, ValueEnum, ValueHint};
use semver::Version; use semver::Version;
use serde::{Serialize, Serializer}; use serde::{Deserialize, Serialize};
use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
use temp_dir::TempDir; use temp_dir::TempDir;
#[derive(Clone, Debug, Parser, Serialize)] #[derive(Debug, Parser, Clone, Serialize, Deserialize)]
#[command(name = "retester")] #[command(name = "retester")]
pub enum Context { pub struct Arguments {
/// Executes tests in the MatterLabs format differentially against a leader and a follower. /// The `solc` version to use if the test didn't specify it explicitly.
ExecuteTests(ExecutionContext), #[arg(long = "solc", short, default_value = "0.8.29")]
} pub solc: Version,
impl Context { /// Use the Wasm compiler versions.
pub fn working_directory_configuration(&self) -> &WorkingDirectoryConfiguration { #[arg(long = "wasm")]
self.as_ref() pub wasm: bool,
}
pub fn report_configuration(&self) -> &ReportConfiguration { /// The path to the `resolc` executable to be tested.
self.as_ref()
}
}
impl AsRef<WorkingDirectoryConfiguration> for Context {
fn as_ref(&self) -> &WorkingDirectoryConfiguration {
match self {
Context::ExecuteTests(execution_context) => &execution_context.working_directory,
}
}
}
impl AsRef<ReportConfiguration> for Context {
fn as_ref(&self) -> &ReportConfiguration {
match self {
Context::ExecuteTests(execution_context) => &execution_context.report_configuration,
}
}
}
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ExecutionContext {
/// The working directory that the program will use for all of the temporary artifacts needed at
/// runtime.
/// ///
/// If not specified, then a temporary directory will be created and used by the program for all /// By default it uses the `resolc` binary found in `$PATH`.
/// temporary artifacts. ///
#[clap( /// If `--wasm` is set, this should point to the resolc Wasm ile.
short, #[arg(long = "resolc", short, default_value = "resolc")]
long, pub resolc: PathBuf,
default_value = "",
value_hint = ValueHint::DirPath,
)]
pub working_directory: WorkingDirectoryConfiguration,
/// The differential testing leader node implementation.
#[arg(short, long = "leader", default_value_t = TestingPlatform::Geth)]
pub leader: TestingPlatform,
/// The differential testing follower node implementation.
#[arg(short, long = "follower", default_value_t = TestingPlatform::Kitchensink)]
pub follower: TestingPlatform,
/// A list of test corpus JSON files to be tested. /// A list of test corpus JSON files to be tested.
#[arg(long = "corpus", short)] #[arg(long = "corpus", short)]
pub corpus: Vec<PathBuf>, pub corpus: Vec<PathBuf>,
/// Configuration parameters for the solc compiler. /// A place to store temporary artifacts during test execution.
#[clap(flatten, next_help_heading = "Solc Configuration")]
pub solc_configuration: SolcConfiguration,
/// Configuration parameters for the resolc compiler.
#[clap(flatten, next_help_heading = "Resolc Configuration")]
pub resolc_configuration: ResolcConfiguration,
/// Configuration parameters for the geth node.
#[clap(flatten, next_help_heading = "Geth Configuration")]
pub geth_configuration: GethConfiguration,
/// Configuration parameters for the Kitchensink.
#[clap(flatten, next_help_heading = "Kitchensink Configuration")]
pub kitchensink_configuration: KitchensinkConfiguration,
/// Configuration parameters for the Revive Dev Node.
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
/// Configuration parameters for the Eth Rpc.
#[clap(flatten, next_help_heading = "Eth RPC Configuration")]
pub eth_rpc_configuration: EthRpcConfiguration,
/// Configuration parameters for the genesis.
#[clap(flatten, next_help_heading = "Genesis Configuration")]
pub genesis_configuration: GenesisConfiguration,
/// Configuration parameters for the wallet.
#[clap(flatten, next_help_heading = "Wallet Configuration")]
pub wallet_configuration: WalletConfiguration,
/// Configuration parameters for concurrency.
#[clap(flatten, next_help_heading = "Concurrency Configuration")]
pub concurrency_configuration: ConcurrencyConfiguration,
/// Configuration parameters for the compilers and compilation.
#[clap(flatten, next_help_heading = "Compilation Configuration")]
pub compilation_configuration: CompilationConfiguration,
/// Configuration parameters for the report.
#[clap(flatten, next_help_heading = "Report Configuration")]
pub report_configuration: ReportConfiguration,
}
impl Default for ExecutionContext {
fn default() -> Self {
Self::parse_from(["execution-context"])
}
}
impl AsRef<WorkingDirectoryConfiguration> for ExecutionContext {
fn as_ref(&self) -> &WorkingDirectoryConfiguration {
&self.working_directory
}
}
impl AsRef<SolcConfiguration> for ExecutionContext {
fn as_ref(&self) -> &SolcConfiguration {
&self.solc_configuration
}
}
impl AsRef<ResolcConfiguration> for ExecutionContext {
fn as_ref(&self) -> &ResolcConfiguration {
&self.resolc_configuration
}
}
impl AsRef<GethConfiguration> for ExecutionContext {
fn as_ref(&self) -> &GethConfiguration {
&self.geth_configuration
}
}
impl AsRef<KitchensinkConfiguration> for ExecutionContext {
fn as_ref(&self) -> &KitchensinkConfiguration {
&self.kitchensink_configuration
}
}
impl AsRef<ReviveDevNodeConfiguration> for ExecutionContext {
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
&self.revive_dev_node_configuration
}
}
impl AsRef<EthRpcConfiguration> for ExecutionContext {
fn as_ref(&self) -> &EthRpcConfiguration {
&self.eth_rpc_configuration
}
}
impl AsRef<GenesisConfiguration> for ExecutionContext {
fn as_ref(&self) -> &GenesisConfiguration {
&self.genesis_configuration
}
}
impl AsRef<WalletConfiguration> for ExecutionContext {
fn as_ref(&self) -> &WalletConfiguration {
&self.wallet_configuration
}
}
impl AsRef<ConcurrencyConfiguration> for ExecutionContext {
fn as_ref(&self) -> &ConcurrencyConfiguration {
&self.concurrency_configuration
}
}
impl AsRef<CompilationConfiguration> for ExecutionContext {
fn as_ref(&self) -> &CompilationConfiguration {
&self.compilation_configuration
}
}
impl AsRef<ReportConfiguration> for ExecutionContext {
fn as_ref(&self) -> &ReportConfiguration {
&self.report_configuration
}
}
/// A set of configuration parameters for Solc.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct SolcConfiguration {
/// Specifies the default version of the Solc compiler that should be used if there is no
/// override specified by one of the test cases.
#[clap(long = "solc.version", default_value = "0.8.29")]
pub version: Version,
}
/// A set of configuration parameters for Resolc.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ResolcConfiguration {
/// Specifies the path of the resolc compiler to be used by the tool.
/// ///
/// If this is not specified, then the tool assumes that it should use the resolc binary that's /// Creates a temporary dir if not specified.
/// provided in the user's $PATH. #[arg(long = "workdir", short)]
#[clap(id = "resolc.path", long = "resolc.path", default_value = "resolc")] pub working_directory: Option<PathBuf>,
pub path: PathBuf,
}
/// A set of configuration parameters for Geth. /// Add a tempdir manually if `working_directory` was not given.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct GethConfiguration {
/// Specifies the path of the geth node to be used by the tool.
/// ///
/// If this is not specified, then the tool assumes that it should use the geth binary that's /// We attach it here because [TempDir] prunes itself on drop.
/// provided in the user's $PATH.
#[clap(id = "geth.path", long = "geth.path", default_value = "geth")]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "geth.start-timeout-ms",
long = "geth.start-timeout-ms",
default_value = "5000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
}
/// A set of configuration parameters for Kitchensink.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct KitchensinkConfiguration {
/// Specifies the path of the kitchensink node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the kitchensink binary
/// that's provided in the user's $PATH.
#[clap(
id = "kitchensink.path",
long = "kitchensink.path",
default_value = "substrate-node"
)]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "kitchensink.start-timeout-ms",
long = "kitchensink.start-timeout-ms",
default_value = "5000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
/// This configures the tool to use Kitchensink instead of using the revive-dev-node.
#[clap(long = "kitchensink.dont-use-dev-node")]
pub use_kitchensink: bool,
}
/// A set of configuration parameters for the revive dev node.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ReviveDevNodeConfiguration {
/// Specifies the path of the revive dev node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the revive dev node binary
/// that's provided in the user's $PATH.
#[clap(
id = "revive-dev-node.path",
long = "revive-dev-node.path",
default_value = "revive-dev-node"
)]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "revive-dev-node.start-timeout-ms",
long = "revive-dev-node.start-timeout-ms",
default_value = "5000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
}
/// A set of configuration parameters for the ETH RPC.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct EthRpcConfiguration {
/// Specifies the path of the ETH RPC to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the ETH RPC binary
/// that's provided in the user's $PATH.
#[clap(id = "eth-rpc.path", long = "eth-rpc.path", default_value = "eth-rpc")]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "eth-rpc.start-timeout-ms",
long = "eth-rpc.start-timeout-ms",
default_value = "5000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
}
/// A set of configuration parameters for the genesis.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct GenesisConfiguration {
/// Specifies the path of the genesis file to use for the nodes that are started.
///
/// This is expected to be the path of a JSON geth genesis file.
#[clap(id = "genesis.path", long = "genesis.path")]
path: Option<PathBuf>,
/// The genesis object found at the provided path.
#[clap(skip)] #[clap(skip)]
#[serde(skip)] #[serde(skip)]
genesis: OnceLock<Genesis>, pub temp_dir: Option<&'static TempDir>,
}
impl GenesisConfiguration { /// The path to the `geth` executable.
pub fn genesis(&self) -> anyhow::Result<&Genesis> { ///
static DEFAULT_GENESIS: LazyLock<Genesis> = LazyLock::new(|| { /// By default it uses `geth` binary found in `$PATH`.
let genesis = include_str!("../../../genesis.json"); #[arg(short, long = "geth", default_value = "geth")]
serde_json::from_str(genesis).unwrap() pub geth: PathBuf,
});
match self.genesis.get() { /// The maximum time in milliseconds to wait for geth to start.
Some(genesis) => Ok(genesis), #[arg(long = "geth-start-timeout", default_value = "5000")]
None => { pub geth_start_timeout: u64,
let genesis = match self.path.as_ref() {
Some(genesis_path) => {
let genesis_content = read_to_string(genesis_path)?;
serde_json::from_str(genesis_content.as_str())?
}
None => DEFAULT_GENESIS.clone(),
};
Ok(self.genesis.get_or_init(|| genesis))
}
}
}
}
/// A set of configuration parameters for the wallet. /// Configure nodes according to this genesis.json file.
#[derive(Clone, Debug, Parser, Serialize)] #[arg(long = "genesis", default_value = "genesis.json")]
pub struct WalletConfiguration { pub genesis_file: PathBuf,
/// The private key of the default signer.
#[clap( /// The signing account private key.
long = "wallet.default-private-key", #[arg(
short,
long = "account",
default_value = "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d" default_value = "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d"
)] )]
#[serde(serialize_with = "serialize_private_key")] pub account: String,
default_key: PrivateKeySigner,
/// This argument controls which private keys the nodes should have access to and be added to /// This argument controls which private keys the nodes should have access to and be added to
/// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set /// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set
/// of the node. /// of the node.
#[clap(long = "wallet.additional-keys", default_value_t = 100_000)] #[arg(long = "private-keys-count", default_value_t = 100_000)]
additional_keys: usize, pub private_keys_to_add: usize,
/// The wallet object that will be used. /// The differential testing leader node implementation.
#[clap(skip)] #[arg(short, long = "leader", default_value = "geth")]
#[serde(skip)] pub leader: TestingPlatform,
wallet: OnceLock<Arc<EthereumWallet>>,
}
impl WalletConfiguration { /// The differential testing follower node implementation.
pub fn wallet(&self) -> Arc<EthereumWallet> { #[arg(short, long = "follower", default_value = "kitchensink")]
self.wallet pub follower: TestingPlatform,
.get_or_init(|| {
let mut wallet = EthereumWallet::new(self.default_key.clone());
for signer in (1..=self.additional_keys)
.map(|id| U256::from(id))
.map(|id| id.to_be_bytes::<32>())
.map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap())
{
wallet.register_signer(signer);
}
Arc::new(wallet)
})
.clone()
}
}
fn serialize_private_key<S>(value: &PrivateKeySigner, serializer: S) -> Result<S::Ok, S::Error> /// Only compile against this testing platform (doesn't execute the tests).
where #[arg(long = "compile-only")]
S: Serializer, pub compile_only: Option<TestingPlatform>,
{
value.to_bytes().encode_hex().serialize(serializer)
}
/// A set of configuration for concurrency.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ConcurrencyConfiguration {
/// Determines the amount of nodes that will be spawned for each chain. /// Determines the amount of nodes that will be spawned for each chain.
#[clap(long = "concurrency.number-of-nodes", default_value_t = 5)] #[arg(long, default_value = "1")]
pub number_of_nodes: usize, pub number_of_nodes: usize,
/// Determines the amount of tokio worker threads that will will be used. /// Determines the amount of tokio worker threads that will will be used.
#[arg( #[arg(
long = "concurrency.number-of-threads", long,
default_value_t = std::thread::available_parallelism() default_value_t = std::thread::available_parallelism()
.map(|n| n.get()) .map(|n| n.get())
.unwrap_or(1) .unwrap_or(1)
)] )]
pub number_of_threads: usize, pub number_of_threads: usize,
/// Determines the amount of concurrent tasks that will be spawned to run tests. /// Determines the amount of concurrent tasks that will be spawned to run tests. Defaults to 10 x the number of nodes.
#[arg(long)]
pub number_concurrent_tasks: Option<usize>,
/// Extract problems back to the test corpus.
#[arg(short, long = "extract-problems")]
pub extract_problems: bool,
/// The path to the `kitchensink` executable.
/// ///
/// Defaults to 10 x the number of nodes. /// By default it uses `substrate-node` binary found in `$PATH`.
#[arg(long = "concurrency.number-of-concurrent-tasks")] #[arg(short, long = "kitchensink", default_value = "substrate-node")]
number_concurrent_tasks: Option<usize>, pub kitchensink: PathBuf,
/// Determines if the concurrency limit should be ignored or not. /// The path to the `revive-dev-node` executable.
#[arg(long = "concurrency.ignore-concurrency-limit")] ///
ignore_concurrency_limit: bool, /// By default it uses `revive-dev-node` binary found in `$PATH`.
} #[arg(long = "revive-dev-node", default_value = "revive-dev-node")]
pub revive_dev_node: PathBuf,
impl ConcurrencyConfiguration { /// By default the tool uses the revive-dev-node when it's running differential tests against
pub fn concurrency_limit(&self) -> Option<usize> { /// PolkaVM since the dev-node is much faster than kitchensink. This flag allows the caller to
match self.ignore_concurrency_limit { /// configure the tool to use kitchensink rather than the dev-node.
true => None, #[arg(long)]
false => Some( pub use_kitchensink_not_dev_node: bool,
self.number_concurrent_tasks
.unwrap_or(20 * self.number_of_nodes), /// The path to the `eth_proxy` executable.
), ///
} /// By default it uses `eth-rpc` binary found in `$PATH`.
} #[arg(short = 'p', long = "eth_proxy", default_value = "eth-rpc")]
} pub eth_proxy: PathBuf,
#[derive(Clone, Debug, Parser, Serialize)]
pub struct CompilationConfiguration {
/// Controls if the compilation cache should be invalidated or not. /// Controls if the compilation cache should be invalidated or not.
#[arg(long = "compilation.invalidate-cache")] #[arg(short, long)]
pub invalidate_compilation_cache: bool, pub invalidate_compilation_cache: bool,
}
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ReportConfiguration {
/// Controls if the compiler input is included in the final report. /// Controls if the compiler input is included in the final report.
#[clap(long = "report.include-compiler-input")] #[clap(long = "report.include-compiler-input")]
pub include_compiler_input: bool, pub report_include_compiler_input: bool,
/// Controls if the compiler output is included in the final report. /// Controls if the compiler output is included in the final report.
#[clap(long = "report.include-compiler-output")] #[clap(long = "report.include-compiler-output")]
pub include_compiler_output: bool, pub report_include_compiler_output: bool,
} }
/// Represents the working directory that the program uses. impl Arguments {
#[derive(Debug, Clone)] /// Return the configured working directory with the following precedence:
pub enum WorkingDirectoryConfiguration { /// 1. `self.working_directory` if it was provided.
/// A temporary directory is used as the working directory. This will be removed when dropped. /// 2. `self.temp_dir` if it it was provided
TemporaryDirectory(Arc<TempDir>), /// 3. Panic.
/// A directory with a path is used as the working directory. pub fn directory(&self) -> &Path {
Path(PathBuf), if let Some(path) = &self.working_directory {
} return path.as_path();
impl WorkingDirectoryConfiguration {
pub fn as_path(&self) -> &Path {
self.as_ref()
}
}
impl Deref for WorkingDirectoryConfiguration {
type Target = Path;
fn deref(&self) -> &Self::Target {
self.as_path()
}
}
impl AsRef<Path> for WorkingDirectoryConfiguration {
fn as_ref(&self) -> &Path {
match self {
WorkingDirectoryConfiguration::TemporaryDirectory(temp_dir) => temp_dir.path(),
WorkingDirectoryConfiguration::Path(path) => path.as_path(),
} }
if let Some(temp_dir) = &self.temp_dir {
return temp_dir.path();
}
panic!("should have a workdir configured")
}
/// Return the number of concurrent tasks to run. This is provided via the
/// `--number-concurrent-tasks` argument, and otherwise defaults to --number-of-nodes * 20.
pub fn number_of_concurrent_tasks(&self) -> usize {
self.number_concurrent_tasks
.unwrap_or(20 * self.number_of_nodes)
}
/// Try to parse `self.account` into a [PrivateKeySigner],
/// panicing on error.
pub fn wallet(&self) -> EthereumWallet {
let signer = self
.account
.parse::<PrivateKeySigner>()
.unwrap_or_else(|error| {
panic!("private key '{}' parsing error: {error}", self.account);
});
EthereumWallet::new(signer)
} }
} }
impl Default for WorkingDirectoryConfiguration { impl Default for Arguments {
fn default() -> Self { fn default() -> Self {
TempDir::new() static TEMP_DIR: LazyLock<TempDir> = LazyLock::new(|| TempDir::new().unwrap());
.map(Arc::new)
.map(Self::TemporaryDirectory)
.expect("Failed to create the temporary directory")
}
}
impl FromStr for WorkingDirectoryConfiguration { let default = Arguments::parse_from(["retester"]);
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> { Arguments {
match s { temp_dir: Some(&TEMP_DIR),
"" => Ok(Default::default()), ..default
_ => Ok(Self::Path(PathBuf::from(s))),
} }
} }
} }
impl Display for WorkingDirectoryConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self.as_path().display(), f)
}
}
impl Serialize for WorkingDirectoryConfiguration {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.as_path().serialize(serializer)
}
}
fn parse_duration(s: &str) -> anyhow::Result<Duration> {
u64::from_str(s)
.map(Duration::from_millis)
.map_err(Into::into)
}
/// The Solidity compatible node implementation. /// The Solidity compatible node implementation.
/// ///
/// This describes the solutions to be tested against on a high level. /// This describes the solutions to be tested against on a high level.
#[derive( #[derive(
Clone, Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, ValueEnum, Serialize, Deserialize,
Copy,
Debug,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Serialize,
ValueEnum,
EnumString,
Display,
AsRefStr,
IntoStaticStr,
)] )]
#[strum(serialize_all = "kebab-case")] #[clap(rename_all = "lower")]
pub enum TestingPlatform { pub enum TestingPlatform {
/// The go-ethereum reference full node EVM implementation. /// The go-ethereum reference full node EVM implementation.
Geth, Geth,
/// The kitchensink runtime provides the PolkaVM (PVM) based node implementation. /// The kitchensink runtime provides the PolkaVM (PVM) based node implentation.
Kitchensink, Kitchensink,
} }
impl Display for TestingPlatform {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Geth => f.write_str("geth"),
Self::Kitchensink => f.write_str("revive"),
}
}
}
+3
View File
@@ -28,6 +28,7 @@ cacache = { workspace = true }
clap = { workspace = true } clap = { workspace = true }
futures = { workspace = true } futures = { workspace = true }
indexmap = { workspace = true } indexmap = { workspace = true }
once_cell = { workspace = true }
tokio = { workspace = true } tokio = { workspace = true }
tracing = { workspace = true } tracing = { workspace = true }
tracing-appender = { workspace = true } tracing-appender = { workspace = true }
@@ -35,6 +36,8 @@ tracing-subscriber = { workspace = true }
semver = { workspace = true } semver = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
serde_json = { workspace = true } serde_json = { workspace = true }
temp-dir = { workspace = true }
tempfile = { workspace = true }
[lints] [lints]
workspace = true workspace = true
+135 -150
View File
@@ -2,7 +2,6 @@
//! be reused between runs. //! be reused between runs.
use std::{ use std::{
borrow::Cow,
collections::HashMap, collections::HashMap,
path::{Path, PathBuf}, path::{Path, PathBuf},
sync::Arc, sync::Arc,
@@ -10,13 +9,13 @@ use std::{
use futures::FutureExt; use futures::FutureExt;
use revive_dt_common::iterators::FilesWithExtensionIterator; use revive_dt_common::iterators::FilesWithExtensionIterator;
use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler}; use revive_dt_compiler::{Compiler, CompilerInput, CompilerOutput, Mode, SolidityCompiler};
use revive_dt_config::TestingPlatform; use revive_dt_config::Arguments;
use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata}; use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata};
use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address}; use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address};
use anyhow::{Context as _, Error, Result}; use anyhow::{Error, Result};
use revive_dt_report::ExecutionSpecificReporter; use once_cell::sync::Lazy;
use semver::Version; use semver::Version;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tokio::sync::{Mutex, RwLock}; use tokio::sync::{Mutex, RwLock};
@@ -24,29 +23,15 @@ use tracing::{Instrument, debug, debug_span, instrument};
use crate::Platform; use crate::Platform;
pub struct CachedCompiler<'a> { pub struct CachedCompiler(ArtifactsCache);
/// The cache that stores the compiled contracts.
artifacts_cache: ArtifactsCache,
/// This is a mechanism that the cached compiler uses so that if multiple compilation requests impl CachedCompiler {
/// come in for the same contract we never compile all of them and only compile it once and all
/// other tasks that request this same compilation concurrently get the cached version.
cache_key_lock: RwLock<HashMap<CacheKey<'a>, Arc<Mutex<()>>>>,
}
impl<'a> CachedCompiler<'a> {
pub async fn new(path: impl AsRef<Path>, invalidate_cache: bool) -> Result<Self> { pub async fn new(path: impl AsRef<Path>, invalidate_cache: bool) -> Result<Self> {
let mut cache = ArtifactsCache::new(path); let mut cache = ArtifactsCache::new(path);
if invalidate_cache { if invalidate_cache {
cache = cache cache = cache.with_invalidated_cache().await?;
.with_invalidated_cache()
.await
.context("Failed to invalidate compilation cache directory")?;
} }
Ok(Self { Ok(Self(cache))
artifacts_cache: cache,
cache_key_lock: Default::default(),
})
} }
/// Compiles or gets the compilation artifacts from the cache. /// Compiles or gets the compilation artifacts from the cache.
@@ -55,7 +40,7 @@ impl<'a> CachedCompiler<'a> {
level = "debug", level = "debug",
skip_all, skip_all,
fields( fields(
metadata_file_path = %metadata_file_path.display(), metadata_file_path = %metadata_file_path.as_ref().display(),
%mode, %mode,
platform = P::config_id().to_string() platform = P::config_id().to_string()
), ),
@@ -63,33 +48,70 @@ impl<'a> CachedCompiler<'a> {
)] )]
pub async fn compile_contracts<P: Platform>( pub async fn compile_contracts<P: Platform>(
&self, &self,
metadata: &'a Metadata, metadata: &Metadata,
metadata_file_path: &'a Path, metadata_file_path: impl AsRef<Path>,
mode: Cow<'a, Mode>, mode: &Mode,
config: &Arguments,
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>, deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
compiler: &P::Compiler, compilation_success_report_callback: impl Fn(
reporter: &ExecutionSpecificReporter, Version,
) -> Result<CompilerOutput> { PathBuf,
bool,
Option<CompilerInput>,
CompilerOutput,
) + Clone,
compilation_failure_report_callback: impl Fn(
Option<Version>,
Option<PathBuf>,
Option<CompilerInput>,
String,
),
) -> Result<(CompilerOutput, Version)> {
static CACHE_KEY_LOCK: Lazy<RwLock<HashMap<CacheKey, Arc<Mutex<()>>>>> =
Lazy::new(Default::default);
let compiler_version_or_requirement = mode.compiler_version_to_use(config.solc.clone());
let compiler_path = <P::Compiler as SolidityCompiler>::get_compiler_executable(
config,
compiler_version_or_requirement,
)
.await
.inspect_err(|err| {
compilation_failure_report_callback(None, None, None, err.to_string())
})?;
let compiler_version = <P::Compiler as SolidityCompiler>::new(compiler_path.clone())
.version()
.await
.inspect_err(|err| {
compilation_failure_report_callback(
None,
Some(compiler_path.clone()),
None,
err.to_string(),
)
})?;
let cache_key = CacheKey { let cache_key = CacheKey {
platform_key: P::config_id(), platform_key: P::config_id().to_string(),
compiler_version: compiler.version().clone(), compiler_version: compiler_version.clone(),
metadata_file_path, metadata_file_path: metadata_file_path.as_ref().to_path_buf(),
solc_mode: mode.clone(), solc_mode: mode.clone(),
}; };
let compilation_callback = || { let compilation_callback = || {
let compiler_path = compiler_path.clone();
let compiler_version = compiler_version.clone();
let compilation_success_report_callback = compilation_success_report_callback.clone();
async move { async move {
compile_contracts::<P>( compile_contracts::<P>(
metadata metadata.directory()?,
.directory() compiler_path,
.context("Failed to get metadata directory while preparing compilation")?, compiler_version,
metadata metadata.files_to_compile()?,
.files_to_compile() mode,
.context("Failed to enumerate files to compile from metadata")?,
&mode,
deployed_libraries, deployed_libraries,
compiler, compilation_success_report_callback,
reporter, compilation_failure_report_callback,
) )
.map(|compilation_result| compilation_result.map(CacheValue::new)) .map(|compilation_result| compilation_result.map(CacheValue::new))
.await .await
@@ -109,10 +131,7 @@ impl<'a> CachedCompiler<'a> {
Some(_) => { Some(_) => {
debug!("Deployed libraries defined, recompilation must take place"); debug!("Deployed libraries defined, recompilation must take place");
debug!("Cache miss"); debug!("Cache miss");
compilation_callback() compilation_callback().await?.compiler_output
.await
.context("Compilation callback for deployed libraries failed")?
.compiler_output
} }
// If no deployed libraries are specified then we can follow the cached flow and attempt // If no deployed libraries are specified then we can follow the cached flow and attempt
// to lookup the compilation artifacts in the cache. // to lookup the compilation artifacts in the cache.
@@ -122,15 +141,12 @@ impl<'a> CachedCompiler<'a> {
// Lock this specific cache key such that we do not get inconsistent state. We want // Lock this specific cache key such that we do not get inconsistent state. We want
// that when multiple cases come in asking for the compilation artifacts then they // that when multiple cases come in asking for the compilation artifacts then they
// don't all trigger a compilation if there's a cache miss. Hence, the lock here. // don't all trigger a compilation if there's a cache miss. Hence, the lock here.
let read_guard = self.cache_key_lock.read().await; let read_guard = CACHE_KEY_LOCK.read().await;
let mutex = match read_guard.get(&cache_key).cloned() { let mutex = match read_guard.get(&cache_key).cloned() {
Some(value) => { Some(value) => value,
drop(read_guard);
value
}
None => { None => {
drop(read_guard); drop(read_guard);
self.cache_key_lock CACHE_KEY_LOCK
.write() .write()
.await .await
.entry(cache_key.clone()) .entry(cache_key.clone())
@@ -140,59 +156,54 @@ impl<'a> CachedCompiler<'a> {
}; };
let _guard = mutex.lock().await; let _guard = mutex.lock().await;
match self.artifacts_cache.get(&cache_key).await { match self.0.get(&cache_key).await {
Some(cache_value) => { Some(cache_value) => {
if deployed_libraries.is_some() { compilation_success_report_callback(
reporter compiler_version.clone(),
.report_post_link_contracts_compilation_succeeded_event( compiler_path,
compiler.version().clone(), true,
compiler.path(), None,
true, cache_value.compiler_output.clone(),
None, );
cache_value.compiler_output.clone(),
)
.expect("Can't happen");
} else {
reporter
.report_pre_link_contracts_compilation_succeeded_event(
compiler.version().clone(),
compiler.path(),
true,
None,
cache_value.compiler_output.clone(),
)
.expect("Can't happen");
}
cache_value.compiler_output cache_value.compiler_output
} }
None => { None => compilation_callback().await?.compiler_output,
compilation_callback()
.await
.context("Compilation callback failed (cache miss path)")?
.compiler_output
}
} }
} }
}; };
Ok(compiled_contracts) Ok((compiled_contracts, compiler_version))
} }
} }
#[allow(clippy::too_many_arguments)]
async fn compile_contracts<P: Platform>( async fn compile_contracts<P: Platform>(
metadata_directory: impl AsRef<Path>, metadata_directory: impl AsRef<Path>,
compiler_path: impl AsRef<Path>,
compiler_version: Version,
mut files_to_compile: impl Iterator<Item = PathBuf>, mut files_to_compile: impl Iterator<Item = PathBuf>,
mode: &Mode, mode: &Mode,
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>, deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
compiler: &P::Compiler, compilation_success_report_callback: impl Fn(
reporter: &ExecutionSpecificReporter, Version,
PathBuf,
bool,
Option<CompilerInput>,
CompilerOutput,
),
compilation_failure_report_callback: impl Fn(
Option<Version>,
Option<PathBuf>,
Option<CompilerInput>,
String,
),
) -> Result<CompilerOutput> { ) -> Result<CompilerOutput> {
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref()) let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
.with_allowed_extension("sol") .with_allowed_extension("sol")
.with_use_cached_fs(true) .with_use_cached_fs(true)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let compilation = Compiler::new() let compiler = Compiler::<P::Compiler>::new()
.with_allow_path(metadata_directory) .with_allow_path(metadata_directory)
// Handling the modes // Handling the modes
.with_optimization(mode.optimize_setting) .with_optimization(mode.optimize_setting)
@@ -200,6 +211,14 @@ async fn compile_contracts<P: Platform>(
// Adding the contract sources to the compiler. // Adding the contract sources to the compiler.
.try_then(|compiler| { .try_then(|compiler| {
files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path)) files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path))
})
.inspect_err(|err| {
compilation_failure_report_callback(
Some(compiler_version.clone()),
Some(compiler_path.as_ref().to_path_buf()),
None,
err.to_string(),
)
})? })?
// Adding the deployed libraries to the compiler. // Adding the deployed libraries to the compiler.
.then(|compiler| { .then(|compiler| {
@@ -217,55 +236,26 @@ async fn compile_contracts<P: Platform>(
}) })
}); });
let input = compilation.input().clone(); let compiler_input = compiler.input();
let output = compilation.try_build(compiler).await; let compiler_output = compiler
.try_build(compiler_path.as_ref())
match (output.as_ref(), deployed_libraries.is_some()) { .await
(Ok(output), true) => { .inspect_err(|err| {
reporter compilation_failure_report_callback(
.report_post_link_contracts_compilation_succeeded_event( Some(compiler_version.clone()),
compiler.version().clone(), Some(compiler_path.as_ref().to_path_buf()),
compiler.path(), Some(compiler_input.clone()),
false, err.to_string(),
input, )
output.clone(), })?;
) compilation_success_report_callback(
.expect("Can't happen"); compiler_version,
} compiler_path.as_ref().to_path_buf(),
(Ok(output), false) => { false,
reporter Some(compiler_input),
.report_pre_link_contracts_compilation_succeeded_event( compiler_output.clone(),
compiler.version().clone(), );
compiler.path(), Ok(compiler_output)
false,
input,
output.clone(),
)
.expect("Can't happen");
}
(Err(err), true) => {
reporter
.report_post_link_contracts_compilation_failed_event(
compiler.version().clone(),
compiler.path().to_path_buf(),
input,
format!("{err:#}"),
)
.expect("Can't happen");
}
(Err(err), false) => {
reporter
.report_pre_link_contracts_compilation_failed_event(
compiler.version().clone(),
compiler.path().to_path_buf(),
input,
format!("{err:#}"),
)
.expect("Can't happen");
}
}
output
} }
struct ArtifactsCache { struct ArtifactsCache {
@@ -283,24 +273,19 @@ impl ArtifactsCache {
pub async fn with_invalidated_cache(self) -> Result<Self> { pub async fn with_invalidated_cache(self) -> Result<Self> {
cacache::clear(self.path.as_path()) cacache::clear(self.path.as_path())
.await .await
.map_err(Into::<Error>::into) .map_err(Into::<Error>::into)?;
.with_context(|| format!("Failed to clear cache at {}", self.path.display()))?;
Ok(self) Ok(self)
} }
#[instrument(level = "debug", skip_all, err)] #[instrument(level = "debug", skip_all, err)]
pub async fn insert(&self, key: &CacheKey<'_>, value: &CacheValue) -> Result<()> { pub async fn insert(&self, key: &CacheKey, value: &CacheValue) -> Result<()> {
let key = bson::to_vec(key).context("Failed to serialize cache key (bson)")?; let key = bson::to_vec(key)?;
let value = bson::to_vec(value).context("Failed to serialize cache value (bson)")?; let value = bson::to_vec(value)?;
cacache::write(self.path.as_path(), key.encode_hex(), value) cacache::write(self.path.as_path(), key.encode_hex(), value).await?;
.await
.with_context(|| {
format!("Failed to write cache entry under {}", self.path.display())
})?;
Ok(()) Ok(())
} }
pub async fn get(&self, key: &CacheKey<'_>) -> Option<CacheValue> { pub async fn get(&self, key: &CacheKey) -> Option<CacheValue> {
let key = bson::to_vec(key).ok()?; let key = bson::to_vec(key).ok()?;
let value = cacache::read(self.path.as_path(), key.encode_hex()) let value = cacache::read(self.path.as_path(), key.encode_hex())
.await .await
@@ -312,7 +297,7 @@ impl ArtifactsCache {
#[instrument(level = "debug", skip_all, err)] #[instrument(level = "debug", skip_all, err)]
pub async fn get_or_insert_with( pub async fn get_or_insert_with(
&self, &self,
key: &CacheKey<'_>, key: &CacheKey,
callback: impl AsyncFnOnce() -> Result<CacheValue>, callback: impl AsyncFnOnce() -> Result<CacheValue>,
) -> Result<CacheValue> { ) -> Result<CacheValue> {
match self.get(key).await { match self.get(key).await {
@@ -330,20 +315,20 @@ impl ArtifactsCache {
} }
} }
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)] #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
struct CacheKey<'a> { struct CacheKey {
/// The platform name that this artifact was compiled for. For example, this could be EVM or /// The platform name that this artifact was compiled for. For example, this could be EVM or
/// PVM. /// PVM.
platform_key: &'a TestingPlatform, platform_key: String,
/// The version of the compiler that was used to compile the artifacts. /// The version of the compiler that was used to compile the artifacts.
compiler_version: Version, compiler_version: Version,
/// The path of the metadata file that the compilation artifacts are for. /// The path of the metadata file that the compilation artifacts are for.
metadata_file_path: &'a Path, metadata_file_path: PathBuf,
/// The mode that the compilation artifacts where compiled with. /// The mode that the compilation artifacts where compiled with.
solc_mode: Cow<'a, Mode>, solc_mode: Mode,
} }
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
+21 -42
View File
@@ -18,7 +18,7 @@ use alloy::{
primitives::Address, primitives::Address,
rpc::types::{TransactionRequest, trace::geth::DiffMode}, rpc::types::{TransactionRequest, trace::geth::DiffMode},
}; };
use anyhow::Context as _; use anyhow::Context;
use futures::TryStreamExt; use futures::TryStreamExt;
use indexmap::IndexMap; use indexmap::IndexMap;
use revive_dt_format::traits::{ResolutionContext, ResolverApi}; use revive_dt_format::traits::{ResolutionContext, ResolverApi};
@@ -86,22 +86,18 @@ where
) -> anyhow::Result<StepOutput> { ) -> anyhow::Result<StepOutput> {
match step { match step {
Step::FunctionCall(input) => { Step::FunctionCall(input) => {
let (receipt, geth_trace, diff_mode) = self let (receipt, geth_trace, diff_mode) =
.handle_input(metadata, input, node) self.handle_input(metadata, input, node).await?;
.await
.context("Failed to handle function call step")?;
Ok(StepOutput::FunctionCall(receipt, geth_trace, diff_mode)) Ok(StepOutput::FunctionCall(receipt, geth_trace, diff_mode))
} }
Step::BalanceAssertion(balance_assertion) => { Step::BalanceAssertion(balance_assertion) => {
self.handle_balance_assertion(metadata, balance_assertion, node) self.handle_balance_assertion(metadata, balance_assertion, node)
.await .await?;
.context("Failed to handle balance assertion step")?;
Ok(StepOutput::BalanceAssertion) Ok(StepOutput::BalanceAssertion)
} }
Step::StorageEmptyAssertion(storage_empty) => { Step::StorageEmptyAssertion(storage_empty) => {
self.handle_storage_empty(metadata, storage_empty, node) self.handle_storage_empty(metadata, storage_empty, node)
.await .await?;
.context("Failed to handle storage empty assertion step")?;
Ok(StepOutput::StorageEmptyAssertion) Ok(StepOutput::StorageEmptyAssertion)
} }
} }
@@ -117,23 +113,18 @@ where
) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> { ) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> {
let deployment_receipts = self let deployment_receipts = self
.handle_input_contract_deployment(metadata, input, node) .handle_input_contract_deployment(metadata, input, node)
.await .await?;
.context("Failed during contract deployment phase of input handling")?;
let execution_receipt = self let execution_receipt = self
.handle_input_execution(input, deployment_receipts, node) .handle_input_execution(input, deployment_receipts, node)
.await .await?;
.context("Failed during transaction execution phase of input handling")?;
let tracing_result = self let tracing_result = self
.handle_input_call_frame_tracing(&execution_receipt, node) .handle_input_call_frame_tracing(&execution_receipt, node)
.await .await?;
.context("Failed during callframe tracing phase of input handling")?; self.handle_input_variable_assignment(input, &tracing_result)?;
self.handle_input_variable_assignment(input, &tracing_result)
.context("Failed to assign variables from callframe output")?;
let (_, (geth_trace, diff_mode)) = try_join!( let (_, (geth_trace, diff_mode)) = try_join!(
self.handle_input_expectations(input, &execution_receipt, node, &tracing_result), self.handle_input_expectations(input, &execution_receipt, node, &tracing_result),
self.handle_input_diff(&execution_receipt, node) self.handle_input_diff(&execution_receipt, node)
) )?;
.context("Failed while evaluating expectations and diffs in parallel")?;
Ok((execution_receipt, geth_trace, diff_mode)) Ok((execution_receipt, geth_trace, diff_mode))
} }
@@ -145,11 +136,9 @@ where
node: &T::Blockchain, node: &T::Blockchain,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
self.handle_balance_assertion_contract_deployment(metadata, balance_assertion, node) self.handle_balance_assertion_contract_deployment(metadata, balance_assertion, node)
.await .await?;
.context("Failed to deploy contract for balance assertion")?;
self.handle_balance_assertion_execution(balance_assertion, node) self.handle_balance_assertion_execution(balance_assertion, node)
.await .await?;
.context("Failed to execute balance assertion")?;
Ok(()) Ok(())
} }
@@ -161,11 +150,9 @@ where
node: &T::Blockchain, node: &T::Blockchain,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
self.handle_storage_empty_assertion_contract_deployment(metadata, storage_empty, node) self.handle_storage_empty_assertion_contract_deployment(metadata, storage_empty, node)
.await .await?;
.context("Failed to deploy contract for storage empty assertion")?;
self.handle_storage_empty_assertion_execution(storage_empty, node) self.handle_storage_empty_assertion_execution(storage_empty, node)
.await .await?;
.context("Failed to execute storage empty assertion")?;
Ok(()) Ok(())
} }
@@ -204,8 +191,7 @@ where
value, value,
node, node,
) )
.await .await?
.context("Failed to get or deploy contract instance during input execution")?
{ {
receipts.insert(instance.clone(), receipt); receipts.insert(instance.clone(), receipt);
} }
@@ -227,7 +213,7 @@ where
// lookup the transaction receipt in this case and continue on. // lookup the transaction receipt in this case and continue on.
Method::Deployer => deployment_receipts Method::Deployer => deployment_receipts
.remove(&input.instance) .remove(&input.instance)
.context("Failed to find deployment receipt for constructor call"), .context("Failed to find deployment receipt"),
Method::Fallback | Method::FunctionName(_) => { Method::Fallback | Method::FunctionName(_) => {
let tx = match input let tx = match input
.legacy_transaction(node, self.default_resolution_context()) .legacy_transaction(node, self.default_resolution_context())
@@ -399,8 +385,7 @@ where
let actual = &tracing_result.output.as_ref().unwrap_or_default(); let actual = &tracing_result.output.as_ref().unwrap_or_default();
if !expected if !expected
.is_equivalent(actual, resolver, resolution_context) .is_equivalent(actual, resolver, resolution_context)
.await .await?
.context("Failed to resolve calldata equivalence for return data assertion")?
{ {
tracing::error!( tracing::error!(
?execution_receipt, ?execution_receipt,
@@ -463,8 +448,7 @@ where
let expected = Calldata::new_compound([expected]); let expected = Calldata::new_compound([expected]);
if !expected if !expected
.is_equivalent(&actual.0, resolver, resolution_context) .is_equivalent(&actual.0, resolver, resolution_context)
.await .await?
.context("Failed to resolve event topic equivalence")?
{ {
tracing::error!( tracing::error!(
event_idx, event_idx,
@@ -484,8 +468,7 @@ where
let actual = &actual_event.data().data; let actual = &actual_event.data().data;
if !expected if !expected
.is_equivalent(&actual.0, resolver, resolution_context) .is_equivalent(&actual.0, resolver, resolution_context)
.await .await?
.context("Failed to resolve event value equivalence")?
{ {
tracing::error!( tracing::error!(
event_idx, event_idx,
@@ -518,12 +501,8 @@ where
let trace = node let trace = node
.trace_transaction(execution_receipt, trace_options) .trace_transaction(execution_receipt, trace_options)
.await .await?;
.context("Failed to obtain geth prestate tracer output")?; let diff = node.state_diff(execution_receipt).await?;
let diff = node
.state_diff(execution_receipt)
.await
.context("Failed to obtain state diff for transaction")?;
Ok((trace, diff)) Ok((trace, diff))
} }
+5 -5
View File
@@ -19,7 +19,7 @@ pub trait Platform {
type Compiler: SolidityCompiler; type Compiler: SolidityCompiler;
/// Returns the matching [TestingPlatform] of the [revive_dt_config::Arguments]. /// Returns the matching [TestingPlatform] of the [revive_dt_config::Arguments].
fn config_id() -> &'static TestingPlatform; fn config_id() -> TestingPlatform;
} }
#[derive(Default)] #[derive(Default)]
@@ -29,8 +29,8 @@ impl Platform for Geth {
type Blockchain = geth::GethNode; type Blockchain = geth::GethNode;
type Compiler = solc::Solc; type Compiler = solc::Solc;
fn config_id() -> &'static TestingPlatform { fn config_id() -> TestingPlatform {
&TestingPlatform::Geth TestingPlatform::Geth
} }
} }
@@ -41,7 +41,7 @@ impl Platform for Kitchensink {
type Blockchain = KitchensinkNode; type Blockchain = KitchensinkNode;
type Compiler = revive_resolc::Resolc; type Compiler = revive_resolc::Resolc;
fn config_id() -> &'static TestingPlatform { fn config_id() -> TestingPlatform {
&TestingPlatform::Kitchensink TestingPlatform::Kitchensink
} }
} }
+538 -362
View File
File diff suppressed because it is too large Load Diff
+1 -1
View File
@@ -64,7 +64,7 @@ impl Case {
pub fn solc_modes(&self) -> Vec<Mode> { pub fn solc_modes(&self) -> Vec<Mode> {
match &self.modes { match &self.modes {
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(), Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
None => Mode::all().cloned().collect(), None => Mode::all().collect(),
} }
} }
} }
+15 -17
View File
@@ -8,7 +8,6 @@ use serde::{Deserialize, Serialize};
use tracing::{debug, info}; use tracing::{debug, info};
use crate::metadata::{Metadata, MetadataFile}; use crate::metadata::{Metadata, MetadataFile};
use anyhow::Context as _;
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[serde(untagged)] #[serde(untagged)]
@@ -21,24 +20,23 @@ impl Corpus {
pub fn try_from_path(file_path: impl AsRef<Path>) -> anyhow::Result<Self> { pub fn try_from_path(file_path: impl AsRef<Path>) -> anyhow::Result<Self> {
let mut corpus = File::open(file_path.as_ref()) let mut corpus = File::open(file_path.as_ref())
.map_err(anyhow::Error::from) .map_err(anyhow::Error::from)
.and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into)) .and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into))?;
.with_context(|| {
format!(
"Failed to open and deserialize corpus file at {}",
file_path.as_ref().display()
)
})?;
let corpus_directory = file_path
.as_ref()
.canonicalize()
.context("Failed to canonicalize the path to the corpus file")?
.parent()
.context("Corpus file has no parent")?
.to_path_buf();
for path in corpus.paths_iter_mut() { for path in corpus.paths_iter_mut() {
*path = corpus_directory.join(path.as_path()) *path = file_path
.as_ref()
.parent()
.ok_or_else(|| {
anyhow::anyhow!("Corpus path '{}' does not point to a file", path.display())
})?
.canonicalize()
.map_err(|error| {
anyhow::anyhow!(
"Failed to canonicalize path to corpus '{}': {error}",
path.display()
)
})?
.join(path.as_path())
} }
Ok(corpus) Ok(corpus)
+15 -39
View File
@@ -8,7 +8,7 @@ use alloy::{
rpc::types::TransactionRequest, rpc::types::TransactionRequest,
}; };
use alloy_primitives::{FixedBytes, utils::parse_units}; use alloy_primitives::{FixedBytes, utils::parse_units};
use anyhow::Context as _; use anyhow::Context;
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, stream}; use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, stream};
use semver::VersionReq; use semver::VersionReq;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -268,11 +268,7 @@ impl Input {
) -> anyhow::Result<Bytes> { ) -> anyhow::Result<Bytes> {
match self.method { match self.method {
Method::Deployer | Method::Fallback => { Method::Deployer | Method::Fallback => {
let calldata = self let calldata = self.calldata.calldata(resolver, context).await?;
.calldata
.calldata(resolver, context)
.await
.context("Failed to produce calldata for deployer/fallback method")?;
Ok(calldata.into()) Ok(calldata.into())
} }
@@ -287,15 +283,14 @@ impl Input {
// Overloads are handled by providing the full function signature in the "function // Overloads are handled by providing the full function signature in the "function
// name". // name".
// https://github.com/matter-labs/era-compiler-tester/blob/1dfa7d07cba0734ca97e24704f12dd57f6990c2c/compiler_tester/src/test/case/input/mod.rs#L158-L190 // https://github.com/matter-labs/era-compiler-tester/blob/1dfa7d07cba0734ca97e24704f12dd57f6990c2c/compiler_tester/src/test/case/input/mod.rs#L158-L190
let selector = let selector = if function_name.contains('(') && function_name.contains(')') {
if function_name.contains('(') && function_name.contains(')') { Function::parse(function_name)
Function::parse(function_name)
.context( .context(
"Failed to parse the provided function name into a function signature", "Failed to parse the provided function name into a function signature",
)? )?
.selector() .selector()
} else { } else {
abi.functions() abi.functions()
.find(|function| function.signature().starts_with(function_name)) .find(|function| function.signature().starts_with(function_name))
.ok_or_else(|| { .ok_or_else(|| {
anyhow::anyhow!( anyhow::anyhow!(
@@ -303,13 +298,9 @@ impl Input {
function_name, function_name,
&self.instance &self.instance
) )
}) })?
.with_context(|| format!(
"Failed to resolve function selector for {:?} on instance {:?}",
function_name, &self.instance
))?
.selector() .selector()
}; };
// Allocating a vector that we will be using for the calldata. The vector size will be: // Allocating a vector that we will be using for the calldata. The vector size will be:
// 4 bytes for the function selector. // 4 bytes for the function selector.
@@ -321,8 +312,7 @@ impl Input {
calldata.extend(selector.0); calldata.extend(selector.0);
self.calldata self.calldata
.calldata_into_slice(&mut calldata, resolver, context) .calldata_into_slice(&mut calldata, resolver, context)
.await .await?;
.context("Failed to append encoded argument to calldata buffer")?;
Ok(calldata.into()) Ok(calldata.into())
} }
@@ -335,10 +325,7 @@ impl Input {
resolver: &impl ResolverApi, resolver: &impl ResolverApi,
context: ResolutionContext<'_>, context: ResolutionContext<'_>,
) -> anyhow::Result<TransactionRequest> { ) -> anyhow::Result<TransactionRequest> {
let input_data = self let input_data = self.encoded_input(resolver, context).await?;
.encoded_input(resolver, context)
.await
.context("Failed to encode input bytes for transaction request")?;
let transaction_request = TransactionRequest::default().from(self.caller).value( let transaction_request = TransactionRequest::default().from(self.caller).value(
self.value self.value
.map(|value| value.into_inner()) .map(|value| value.into_inner())
@@ -450,8 +437,7 @@ impl Calldata {
}) })
.buffered(0xFF) .buffered(0xFF)
.try_collect::<Vec<_>>() .try_collect::<Vec<_>>()
.await .await?;
.context("Failed to resolve one or more calldata arguments")?;
buffer.extend(resolved.into_iter().flatten()); buffer.extend(resolved.into_iter().flatten());
} }
@@ -492,10 +478,7 @@ impl Calldata {
std::borrow::Cow::Borrowed(other) std::borrow::Cow::Borrowed(other)
}; };
let this = this let this = this.resolve(resolver, context).await?;
.resolve(resolver, context)
.await
.context("Failed to resolve calldata item during equivalence check")?;
let other = U256::from_be_slice(&other); let other = U256::from_be_slice(&other);
Ok(this == other) Ok(this == other)
}) })
@@ -681,24 +664,17 @@ impl<T: AsRef<str>> CalldataToken<T> {
let current_block_number = match context.tip_block_number() { let current_block_number = match context.tip_block_number() {
Some(block_number) => *block_number, Some(block_number) => *block_number,
None => resolver.last_block_number().await.context( None => resolver.last_block_number().await?,
"Failed to query last block number while resolving $BLOCK_HASH",
)?,
}; };
let desired_block_number = current_block_number.saturating_sub(offset); let desired_block_number = current_block_number.saturating_sub(offset);
let block_hash = resolver let block_hash = resolver.block_hash(desired_block_number.into()).await?;
.block_hash(desired_block_number.into())
.await
.context("Failed to resolve block hash for desired block number")?;
Ok(U256::from_be_bytes(block_hash.0)) Ok(U256::from_be_bytes(block_hash.0))
} else if item == Self::BLOCK_NUMBER_VARIABLE { } else if item == Self::BLOCK_NUMBER_VARIABLE {
let current_block_number = match context.tip_block_number() { let current_block_number = match context.tip_block_number() {
Some(block_number) => *block_number, Some(block_number) => *block_number,
None => resolver.last_block_number().await.context( None => resolver.last_block_number().await?,
"Failed to query last block number while resolving $BLOCK_NUMBER",
)?,
}; };
Ok(U256::from(current_block_number)) Ok(U256::from(current_block_number))
} else if item == Self::BLOCK_TIMESTAMP_VARIABLE { } else if item == Self::BLOCK_TIMESTAMP_VARIABLE {
+2 -10
View File
@@ -99,7 +99,7 @@ impl Metadata {
pub fn solc_modes(&self) -> Vec<Mode> { pub fn solc_modes(&self) -> Vec<Mode> {
match &self.modes { match &self.modes {
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(), Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
None => Mode::all().cloned().collect(), None => Mode::all().collect(),
} }
} }
@@ -132,15 +132,7 @@ impl Metadata {
) in contracts ) in contracts
{ {
let alias = alias.clone(); let alias = alias.clone();
let absolute_path = directory let absolute_path = directory.join(contract_source_path).canonicalize()?;
.join(contract_source_path)
.canonicalize()
.map_err(|error| {
anyhow::anyhow!(
"Failed to canonicalize contract source path '{}': {error}",
directory.join(contract_source_path).display()
)
})?;
let contract_ident = contract_ident.clone(); let contract_ident = contract_ident.clone();
sources.insert( sources.insert(
+26 -20
View File
@@ -1,6 +1,4 @@
use anyhow::Context as _;
use regex::Regex; use regex::Regex;
use revive_dt_common::iterators::EitherIter;
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline}; use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::HashSet; use std::collections::HashSet;
@@ -46,34 +44,21 @@ impl FromStr for ParsedMode {
}; };
let pipeline = match caps.name("pipeline") { let pipeline = match caps.name("pipeline") {
Some(m) => Some( Some(m) => Some(ModePipeline::from_str(m.as_str())?),
ModePipeline::from_str(m.as_str())
.context("Failed to parse mode pipeline from string")?,
),
None => None, None => None,
}; };
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+"); let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
let optimize_setting = match caps.name("optimize_setting") { let optimize_setting = match caps.name("optimize_setting") {
Some(m) => Some( Some(m) => Some(ModeOptimizerSetting::from_str(m.as_str())?),
ModeOptimizerSetting::from_str(m.as_str())
.context("Failed to parse optimizer setting from string")?,
),
None => None, None => None,
}; };
let version = match caps.name("version") { let version = match caps.name("version") {
Some(m) => Some( Some(m) => Some(semver::VersionReq::parse(m.as_str()).map_err(|e| {
semver::VersionReq::parse(m.as_str()) anyhow::anyhow!("Cannot parse the version requirement '{}': {e}", m.as_str())
.map_err(|e| { })?),
anyhow::anyhow!(
"Cannot parse the version requirement '{}': {e}",
m.as_str()
)
})
.context("Failed to parse semver requirement from mode string")?,
),
None => None, None => None,
}; };
@@ -177,6 +162,27 @@ impl ParsedMode {
} }
} }
/// An iterator that could be either of two iterators.
#[derive(Clone, Debug)]
enum EitherIter<A, B> {
A(A),
B(B),
}
impl<A, B> Iterator for EitherIter<A, B>
where
A: Iterator,
B: Iterator<Item = A::Item>,
{
type Item = A::Item;
fn next(&mut self) -> Option<Self::Item> {
match self {
EitherIter::A(iter) => iter.next(),
EitherIter::B(iter) => iter.next(),
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
+109 -149
View File
@@ -17,7 +17,9 @@ use alloy::{
eips::BlockNumberOrTag, eips::BlockNumberOrTag,
genesis::{Genesis, GenesisAccount}, genesis::{Genesis, GenesisAccount},
network::{Ethereum, EthereumWallet, NetworkWallet}, network::{Ethereum, EthereumWallet, NetworkWallet},
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256}, primitives::{
Address, BlockHash, BlockNumber, BlockTimestamp, FixedBytes, StorageKey, TxHash, U256,
},
providers::{ providers::{
Provider, ProviderBuilder, Provider, ProviderBuilder,
ext::DebugApi, ext::DebugApi,
@@ -27,8 +29,9 @@ use alloy::{
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest, EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame}, trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
}, },
signers::local::PrivateKeySigner,
}; };
use anyhow::Context as _; use anyhow::Context;
use revive_common::EVMVersion; use revive_common::EVMVersion;
use tracing::{Instrument, instrument}; use tracing::{Instrument, instrument};
@@ -36,7 +39,7 @@ use revive_dt_common::{
fs::clear_directory, fs::clear_directory,
futures::{PollingWaitBehavior, poll}, futures::{PollingWaitBehavior, poll},
}; };
use revive_dt_config::*; use revive_dt_config::Arguments;
use revive_dt_format::traits::ResolverApi; use revive_dt_format::traits::ResolverApi;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
@@ -61,7 +64,7 @@ pub struct GethNode {
geth: PathBuf, geth: PathBuf,
id: u32, id: u32,
handle: Option<Child>, handle: Option<Child>,
start_timeout: Duration, start_timeout: u64,
wallet: Arc<EthereumWallet>, wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
chain_id_filler: ChainIdFiller, chain_id_filler: ChainIdFiller,
@@ -94,15 +97,14 @@ impl GethNode {
/// Create the node directory and call `geth init` to configure the genesis. /// Create the node directory and call `geth init` to configure the genesis.
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> { fn init(&mut self, genesis: String) -> anyhow::Result<&mut Self> {
let _ = clear_directory(&self.base_directory); let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory); let _ = clear_directory(&self.logs_directory);
create_dir_all(&self.base_directory) create_dir_all(&self.base_directory)?;
.context("Failed to create base directory for geth node")?; create_dir_all(&self.logs_directory)?;
create_dir_all(&self.logs_directory)
.context("Failed to create logs directory for geth node")?;
let mut genesis = serde_json::from_str::<Genesis>(&genesis)?;
for signer_address in for signer_address in
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet) <EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
{ {
@@ -114,11 +116,7 @@ impl GethNode {
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE))); .or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
} }
let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE); let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE);
serde_json::to_writer( serde_json::to_writer(File::create(&genesis_path)?, &genesis)?;
File::create(&genesis_path).context("Failed to create geth genesis file")?,
&genesis,
)
.context("Failed to serialize geth genesis JSON to file")?;
let mut child = Command::new(&self.geth) let mut child = Command::new(&self.geth)
.arg("--state.scheme") .arg("--state.scheme")
@@ -129,22 +127,16 @@ impl GethNode {
.arg(genesis_path) .arg(genesis_path)
.stderr(Stdio::piped()) .stderr(Stdio::piped())
.stdout(Stdio::null()) .stdout(Stdio::null())
.spawn() .spawn()?;
.context("Failed to spawn geth --init process")?;
let mut stderr = String::new(); let mut stderr = String::new();
child child
.stderr .stderr
.take() .take()
.expect("should be piped") .expect("should be piped")
.read_to_string(&mut stderr) .read_to_string(&mut stderr)?;
.context("Failed to read geth --init stderr")?;
if !child if !child.wait()?.success() {
.wait()
.context("Failed waiting for geth --init process to finish")?
.success()
{
anyhow::bail!("failed to initialize geth node #{:?}: {stderr}", &self.id); anyhow::bail!("failed to initialize geth node #{:?}: {stderr}", &self.id);
} }
@@ -169,11 +161,8 @@ impl GethNode {
let stdout_logs_file = open_options let stdout_logs_file = open_options
.clone() .clone()
.open(self.geth_stdout_log_file_path()) .open(self.geth_stdout_log_file_path())?;
.context("Failed to open geth stdout logs file")?; let stderr_logs_file = open_options.open(self.geth_stderr_log_file_path())?;
let stderr_logs_file = open_options
.open(self.geth_stderr_log_file_path())
.context("Failed to open geth stderr logs file")?;
self.handle = Command::new(&self.geth) self.handle = Command::new(&self.geth)
.arg("--dev") .arg("--dev")
.arg("--datadir") .arg("--datadir")
@@ -193,24 +182,14 @@ impl GethNode {
.arg("full") .arg("full")
.arg("--gcmode") .arg("--gcmode")
.arg("archive") .arg("archive")
.stderr( .stderr(stderr_logs_file.try_clone()?)
stderr_logs_file .stdout(stdout_logs_file.try_clone()?)
.try_clone() .spawn()?
.context("Failed to clone geth stderr log file handle")?,
)
.stdout(
stdout_logs_file
.try_clone()
.context("Failed to clone geth stdout log file handle")?,
)
.spawn()
.context("Failed to spawn geth node process")?
.into(); .into();
if let Err(error) = self.wait_ready() { if let Err(error) = self.wait_ready() {
tracing::error!(?error, "Failed to start geth, shutting down gracefully"); tracing::error!(?error, "Failed to start geth, shutting down gracefully");
self.shutdown() self.shutdown()?;
.context("Failed to gracefully shutdown after geth start error")?;
return Err(error); return Err(error);
} }
@@ -232,10 +211,9 @@ impl GethNode {
.write(false) .write(false)
.append(false) .append(false)
.truncate(false) .truncate(false)
.open(self.geth_stderr_log_file_path()) .open(self.geth_stderr_log_file_path())?;
.context("Failed to open geth stderr logs file for readiness check")?;
let maximum_wait_time = self.start_timeout; let maximum_wait_time = Duration::from_millis(self.start_timeout);
let mut stderr = BufReader::new(logs_file).lines(); let mut stderr = BufReader::new(logs_file).lines();
let mut lines = vec![]; let mut lines = vec![];
loop { loop {
@@ -251,7 +229,7 @@ impl GethNode {
if Instant::now().duration_since(start_time) > maximum_wait_time { if Instant::now().duration_since(start_time) > maximum_wait_time {
anyhow::bail!( anyhow::bail!(
"Timeout in starting geth: took longer than {}ms. stdout:\n\n{}\n", "Timeout in starting geth: took longer than {}ms. stdout:\n\n{}\n",
self.start_timeout.as_millis(), self.start_timeout,
lines.join("\n") lines.join("\n")
); );
} }
@@ -299,18 +277,11 @@ impl EthereumNode for GethNode {
&self, &self,
transaction: TransactionRequest, transaction: TransactionRequest,
) -> anyhow::Result<alloy::rpc::types::TransactionReceipt> { ) -> anyhow::Result<alloy::rpc::types::TransactionReceipt> {
let provider = self let provider = self.provider().await?;
.provider()
.await
.context("Failed to create provider for transaction submission")?;
let pending_transaction = provider let pending_transaction = provider.send_transaction(transaction).await.inspect_err(
.send_transaction(transaction) |err| tracing::error!(%err, "Encountered an error when submitting the transaction"),
.await )?;
.inspect_err(
|err| tracing::error!(%err, "Encountered an error when submitting the transaction"),
)
.context("Failed to submit transaction to geth node")?;
let transaction_hash = *pending_transaction.tx_hash(); let transaction_hash = *pending_transaction.tx_hash();
// The following is a fix for the "transaction indexing is in progress" error that we used // The following is a fix for the "transaction indexing is in progress" error that we used
@@ -364,11 +335,7 @@ impl EthereumNode for GethNode {
transaction: &TransactionReceipt, transaction: &TransactionReceipt,
trace_options: GethDebugTracingOptions, trace_options: GethDebugTracingOptions,
) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> { ) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> {
let provider = Arc::new( let provider = Arc::new(self.provider().await?);
self.provider()
.await
.context("Failed to create provider for tracing")?,
);
poll( poll(
Self::TRACE_POLLING_DURATION, Self::TRACE_POLLING_DURATION,
PollingWaitBehavior::Constant(Duration::from_millis(200)), PollingWaitBehavior::Constant(Duration::from_millis(200)),
@@ -404,10 +371,8 @@ impl EthereumNode for GethNode {
}); });
match self match self
.trace_transaction(transaction, trace_options) .trace_transaction(transaction, trace_options)
.await .await?
.context("Failed to trace transaction for prestate diff")? .try_into_pre_state_frame()?
.try_into_pre_state_frame()
.context("Failed to convert trace into pre-state frame")?
{ {
PreStateFrame::Diff(diff) => Ok(diff), PreStateFrame::Diff(diff) => Ok(diff),
_ => anyhow::bail!("expected a diff mode trace"), _ => anyhow::bail!("expected a diff mode trace"),
@@ -417,8 +382,7 @@ impl EthereumNode for GethNode {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn balance_of(&self, address: Address) -> anyhow::Result<U256> { async fn balance_of(&self, address: Address) -> anyhow::Result<U256> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Geth provider")?
.get_balance(address) .get_balance(address)
.await .await
.map_err(Into::into) .map_err(Into::into)
@@ -431,8 +395,7 @@ impl EthereumNode for GethNode {
keys: Vec<StorageKey>, keys: Vec<StorageKey>,
) -> anyhow::Result<EIP1186AccountProofResponse> { ) -> anyhow::Result<EIP1186AccountProofResponse> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Geth provider")?
.get_proof(address, keys) .get_proof(address, keys)
.latest() .latest()
.await .await
@@ -444,8 +407,7 @@ impl ResolverApi for GethNode {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn chain_id(&self) -> anyhow::Result<alloy::primitives::ChainId> { async fn chain_id(&self) -> anyhow::Result<alloy::primitives::ChainId> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Geth provider")?
.get_chain_id() .get_chain_id()
.await .await
.map_err(Into::into) .map_err(Into::into)
@@ -454,8 +416,7 @@ impl ResolverApi for GethNode {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result<u128> { async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result<u128> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Geth provider")?
.get_transaction_receipt(*tx_hash) .get_transaction_receipt(*tx_hash)
.await? .await?
.context("Failed to get the transaction receipt") .context("Failed to get the transaction receipt")
@@ -465,48 +426,40 @@ impl ResolverApi for GethNode {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result<u128> { async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result<u128> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Geth provider")?
.get_block_by_number(number) .get_block_by_number(number)
.await .await?
.context("Failed to get the geth block")? .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.context("Failed to get the Geth block, perhaps there are no blocks?")
.map(|block| block.header.gas_limit as _) .map(|block| block.header.gas_limit as _)
} }
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result<Address> { async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result<Address> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Geth provider")?
.get_block_by_number(number) .get_block_by_number(number)
.await .await?
.context("Failed to get the geth block")? .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.context("Failed to get the Geth block, perhaps there are no blocks?")
.map(|block| block.header.beneficiary) .map(|block| block.header.beneficiary)
} }
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result<U256> { async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result<U256> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Geth provider")?
.get_block_by_number(number) .get_block_by_number(number)
.await .await?
.context("Failed to get the geth block")? .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.context("Failed to get the Geth block, perhaps there are no blocks?")
.map(|block| U256::from_be_bytes(block.header.mix_hash.0)) .map(|block| U256::from_be_bytes(block.header.mix_hash.0))
} }
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result<u64> { async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result<u64> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Geth provider")?
.get_block_by_number(number) .get_block_by_number(number)
.await .await?
.context("Failed to get the geth block")? .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.context("Failed to get the Geth block, perhaps there are no blocks?")
.and_then(|block| { .and_then(|block| {
block block
.header .header
@@ -518,32 +471,27 @@ impl ResolverApi for GethNode {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockHash> { async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockHash> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Geth provider")?
.get_block_by_number(number) .get_block_by_number(number)
.await .await?
.context("Failed to get the geth block")? .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.context("Failed to get the Geth block, perhaps there are no blocks?")
.map(|block| block.header.hash) .map(|block| block.header.hash)
} }
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> { async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Geth provider")?
.get_block_by_number(number) .get_block_by_number(number)
.await .await?
.context("Failed to get the geth block")? .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.context("Failed to get the Geth block, perhaps there are no blocks?")
.map(|block| block.header.timestamp) .map(|block| block.header.timestamp)
} }
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn last_block_number(&self) -> anyhow::Result<BlockNumber> { async fn last_block_number(&self) -> anyhow::Result<BlockNumber> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Geth provider")?
.get_block_number() .get_block_number()
.await .await
.map_err(Into::into) .map_err(Into::into)
@@ -551,40 +499,30 @@ impl ResolverApi for GethNode {
} }
impl Node for GethNode { impl Node for GethNode {
fn new( fn new(config: &Arguments) -> Self {
context: impl AsRef<WorkingDirectoryConfiguration> let geth_directory = config.directory().join(Self::BASE_DIRECTORY);
+ AsRef<ConcurrencyConfiguration>
+ AsRef<GenesisConfiguration>
+ AsRef<WalletConfiguration>
+ AsRef<GethConfiguration>
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Clone,
) -> Self {
let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context);
let geth_configuration = AsRef::<GethConfiguration>::as_ref(&context);
let geth_directory = working_directory_configuration
.as_path()
.join(Self::BASE_DIRECTORY);
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
let base_directory = geth_directory.join(id.to_string()); let base_directory = geth_directory.join(id.to_string());
let wallet = wallet_configuration.wallet(); let mut wallet = config.wallet();
for signer in (1..=config.private_keys_to_add)
.map(|id| U256::from(id))
.map(|id| id.to_be_bytes::<32>())
.map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap())
{
wallet.register_signer(signer);
}
Self { Self {
connection_string: base_directory.join(Self::IPC_FILE).display().to_string(), connection_string: base_directory.join(Self::IPC_FILE).display().to_string(),
data_directory: base_directory.join(Self::DATA_DIRECTORY), data_directory: base_directory.join(Self::DATA_DIRECTORY),
logs_directory: base_directory.join(Self::LOGS_DIRECTORY), logs_directory: base_directory.join(Self::LOGS_DIRECTORY),
base_directory, base_directory,
geth: geth_configuration.path.clone(), geth: config.geth.clone(),
id, id,
handle: None, handle: None,
start_timeout: geth_configuration.start_timeout_ms, start_timeout: config.geth_start_timeout,
wallet: wallet.clone(), wallet: Arc::new(wallet),
chain_id_filler: Default::default(), chain_id_filler: Default::default(),
nonce_manager: Default::default(), nonce_manager: Default::default(),
// We know that we only need to be storing 2 files so we can specify that when creating // We know that we only need to be storing 2 files so we can specify that when creating
@@ -626,7 +564,7 @@ impl Node for GethNode {
} }
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> { fn spawn(&mut self, genesis: String) -> anyhow::Result<()> {
self.init(genesis)?.spawn_process()?; self.init(genesis)?.spawn_process()?;
Ok(()) Ok(())
} }
@@ -638,10 +576,8 @@ impl Node for GethNode {
.stdin(Stdio::null()) .stdin(Stdio::null())
.stdout(Stdio::piped()) .stdout(Stdio::piped())
.stderr(Stdio::null()) .stderr(Stdio::null())
.spawn() .spawn()?
.context("Failed to spawn geth --version process")? .wait_with_output()?
.wait_with_output()
.context("Failed to wait for geth --version output")?
.stdout; .stdout;
Ok(String::from_utf8_lossy(&output).into()) Ok(String::from_utf8_lossy(&output).into())
} }
@@ -667,25 +603,49 @@ impl Drop for GethNode {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use revive_dt_config::Arguments;
use temp_dir::TempDir;
use crate::{GENESIS_JSON, Node};
use super::*; use super::*;
fn test_config() -> ExecutionContext { fn test_config() -> (Arguments, TempDir) {
ExecutionContext::default() let mut config = Arguments::default();
let temp_dir = TempDir::new().unwrap();
config.working_directory = temp_dir.path().to_path_buf().into();
(config, temp_dir)
} }
fn new_node() -> (ExecutionContext, GethNode) { fn new_node() -> (GethNode, TempDir) {
let context = test_config(); let (args, temp_dir) = test_config();
let mut node = GethNode::new(&context); let mut node = GethNode::new(&args);
node.init(context.genesis_configuration.genesis().unwrap().clone()) node.init(GENESIS_JSON.to_owned())
.expect("Failed to initialize the node") .expect("Failed to initialize the node")
.spawn_process() .spawn_process()
.expect("Failed to spawn the node process"); .expect("Failed to spawn the node process");
(context, node) (node, temp_dir)
}
#[test]
fn init_works() {
GethNode::new(&test_config().0)
.init(GENESIS_JSON.to_string())
.unwrap();
}
#[test]
fn spawn_works() {
GethNode::new(&test_config().0)
.spawn(GENESIS_JSON.to_string())
.unwrap();
} }
#[test] #[test]
fn version_works() { fn version_works() {
let version = GethNode::new(&test_config()).version().unwrap(); let version = GethNode::new(&test_config().0).version().unwrap();
assert!( assert!(
version.starts_with("geth version"), version.starts_with("geth version"),
"expected version string, got: '{version}'" "expected version string, got: '{version}'"
@@ -695,7 +655,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_chain_id_from_node() { async fn can_get_chain_id_from_node() {
// Arrange // Arrange
let (_context, node) = new_node(); let (node, _temp_dir) = new_node();
// Act // Act
let chain_id = node.chain_id().await; let chain_id = node.chain_id().await;
@@ -708,7 +668,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_gas_limit_from_node() { async fn can_get_gas_limit_from_node() {
// Arrange // Arrange
let (_context, node) = new_node(); let (node, _temp_dir) = new_node();
// Act // Act
let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest).await; let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest).await;
@@ -721,7 +681,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_coinbase_from_node() { async fn can_get_coinbase_from_node() {
// Arrange // Arrange
let (_context, node) = new_node(); let (node, _temp_dir) = new_node();
// Act // Act
let coinbase = node.block_coinbase(BlockNumberOrTag::Latest).await; let coinbase = node.block_coinbase(BlockNumberOrTag::Latest).await;
@@ -734,7 +694,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_block_difficulty_from_node() { async fn can_get_block_difficulty_from_node() {
// Arrange // Arrange
let (_context, node) = new_node(); let (node, _temp_dir) = new_node();
// Act // Act
let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest).await; let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest).await;
@@ -747,7 +707,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_block_hash_from_node() { async fn can_get_block_hash_from_node() {
// Arrange // Arrange
let (_context, node) = new_node(); let (node, _temp_dir) = new_node();
// Act // Act
let block_hash = node.block_hash(BlockNumberOrTag::Latest).await; let block_hash = node.block_hash(BlockNumberOrTag::Latest).await;
@@ -759,7 +719,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_block_timestamp_from_node() { async fn can_get_block_timestamp_from_node() {
// Arrange // Arrange
let (_context, node) = new_node(); let (node, _temp_dir) = new_node();
// Act // Act
let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest).await; let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest).await;
@@ -771,7 +731,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_block_number_from_node() { async fn can_get_block_number_from_node() {
// Arrange // Arrange
let (_context, node) = new_node(); let (node, _temp_dir) = new_node();
// Act // Act
let block_number = node.last_block_number().await; let block_number = node.last_block_number().await;
+111 -166
View File
@@ -19,8 +19,8 @@ use alloy::{
TransactionBuilderError, UnbuiltTransactionError, TransactionBuilderError, UnbuiltTransactionError,
}, },
primitives::{ primitives::{
Address, B64, B256, BlockHash, BlockNumber, BlockTimestamp, Bloom, Bytes, StorageKey, Address, B64, B256, BlockHash, BlockNumber, BlockTimestamp, Bloom, Bytes, FixedBytes,
TxHash, U256, StorageKey, TxHash, U256,
}, },
providers::{ providers::{
Provider, ProviderBuilder, Provider, ProviderBuilder,
@@ -32,8 +32,9 @@ use alloy::{
eth::{Block, Header, Transaction}, eth::{Block, Header, Transaction},
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame}, trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
}, },
signers::local::PrivateKeySigner,
}; };
use anyhow::Context as _; use anyhow::Context;
use revive_common::EVMVersion; use revive_common::EVMVersion;
use revive_dt_common::fs::clear_directory; use revive_dt_common::fs::clear_directory;
use revive_dt_format::traits::ResolverApi; use revive_dt_format::traits::ResolverApi;
@@ -42,7 +43,7 @@ use serde_json::{Value as JsonValue, json};
use sp_core::crypto::Ss58Codec; use sp_core::crypto::Ss58Codec;
use sp_runtime::AccountId32; use sp_runtime::AccountId32;
use revive_dt_config::*; use revive_dt_config::Arguments;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE}; use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE};
@@ -91,14 +92,12 @@ impl KitchensinkNode {
const PROXY_STDOUT_LOG_FILE_NAME: &str = "proxy_stdout.log"; const PROXY_STDOUT_LOG_FILE_NAME: &str = "proxy_stdout.log";
const PROXY_STDERR_LOG_FILE_NAME: &str = "proxy_stderr.log"; const PROXY_STDERR_LOG_FILE_NAME: &str = "proxy_stderr.log";
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> { fn init(&mut self, genesis: &str) -> anyhow::Result<&mut Self> {
let _ = clear_directory(&self.base_directory); let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory); let _ = clear_directory(&self.logs_directory);
create_dir_all(&self.base_directory) create_dir_all(&self.base_directory)?;
.context("Failed to create base directory for kitchensink node")?; create_dir_all(&self.logs_directory)?;
create_dir_all(&self.logs_directory)
.context("Failed to create logs directory for kitchensink node")?;
let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE); let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE);
@@ -127,10 +126,8 @@ impl KitchensinkNode {
); );
} }
let content = String::from_utf8(output.stdout) let content = String::from_utf8(output.stdout)?;
.context("Failed to decode substrate export-chain-spec output as UTF-8")?; let mut chainspec_json: JsonValue = serde_json::from_str(&content)?;
let mut chainspec_json: JsonValue =
serde_json::from_str(&content).context("Failed to parse substrate chain spec JSON")?;
let existing_chainspec_balances = let existing_chainspec_balances =
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"] chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"]
@@ -152,6 +149,7 @@ impl KitchensinkNode {
}) })
.collect(); .collect();
let mut eth_balances = { let mut eth_balances = {
let mut genesis = serde_json::from_str::<Genesis>(genesis)?;
for signer_address in for signer_address in
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet) <EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
{ {
@@ -162,8 +160,7 @@ impl KitchensinkNode {
.entry(signer_address) .entry(signer_address)
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE))); .or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
} }
self.extract_balance_from_genesis_file(&genesis) self.extract_balance_from_genesis_file(&genesis)?
.context("Failed to extract balances from EVM genesis JSON")?
}; };
merged_balances.append(&mut eth_balances); merged_balances.append(&mut eth_balances);
@@ -171,11 +168,9 @@ impl KitchensinkNode {
json!(merged_balances); json!(merged_balances);
serde_json::to_writer_pretty( serde_json::to_writer_pretty(
std::fs::File::create(&template_chainspec_path) std::fs::File::create(&template_chainspec_path)?,
.context("Failed to create kitchensink template chainspec file")?,
&chainspec_json, &chainspec_json,
) )?;
.context("Failed to write kitchensink template chainspec JSON")?;
Ok(self) Ok(self)
} }
@@ -201,12 +196,10 @@ impl KitchensinkNode {
// Start Substrate node // Start Substrate node
let kitchensink_stdout_logs_file = open_options let kitchensink_stdout_logs_file = open_options
.clone() .clone()
.open(self.kitchensink_stdout_log_file_path()) .open(self.kitchensink_stdout_log_file_path())?;
.context("Failed to open kitchensink stdout logs file")?;
let kitchensink_stderr_logs_file = open_options let kitchensink_stderr_logs_file = open_options
.clone() .clone()
.open(self.kitchensink_stderr_log_file_path()) .open(self.kitchensink_stderr_log_file_path())?;
.context("Failed to open kitchensink stderr logs file")?;
let node_binary_path = if self.use_kitchensink_not_dev_node { let node_binary_path = if self.use_kitchensink_not_dev_node {
self.substrate_binary.as_path() self.substrate_binary.as_path()
} else { } else {
@@ -230,18 +223,9 @@ impl KitchensinkNode {
.arg("--rpc-max-connections") .arg("--rpc-max-connections")
.arg(u32::MAX.to_string()) .arg(u32::MAX.to_string())
.env("RUST_LOG", Self::SUBSTRATE_LOG_ENV) .env("RUST_LOG", Self::SUBSTRATE_LOG_ENV)
.stdout( .stdout(kitchensink_stdout_logs_file.try_clone()?)
kitchensink_stdout_logs_file .stderr(kitchensink_stderr_logs_file.try_clone()?)
.try_clone() .spawn()?
.context("Failed to clone kitchensink stdout log file handle")?,
)
.stderr(
kitchensink_stderr_logs_file
.try_clone()
.context("Failed to clone kitchensink stderr log file handle")?,
)
.spawn()
.context("Failed to spawn substrate node process")?
.into(); .into();
// Give the node a moment to boot // Give the node a moment to boot
@@ -250,18 +234,14 @@ impl KitchensinkNode {
Self::SUBSTRATE_READY_MARKER, Self::SUBSTRATE_READY_MARKER,
Duration::from_secs(60), Duration::from_secs(60),
) { ) {
self.shutdown() self.shutdown()?;
.context("Failed to gracefully shutdown after substrate start error")?;
return Err(error); return Err(error);
}; };
let eth_proxy_stdout_logs_file = open_options let eth_proxy_stdout_logs_file = open_options
.clone() .clone()
.open(self.proxy_stdout_log_file_path()) .open(self.proxy_stdout_log_file_path())?;
.context("Failed to open eth-proxy stdout logs file")?; let eth_proxy_stderr_logs_file = open_options.open(self.proxy_stderr_log_file_path())?;
let eth_proxy_stderr_logs_file = open_options
.open(self.proxy_stderr_log_file_path())
.context("Failed to open eth-proxy stderr logs file")?;
self.process_proxy = Command::new(&self.eth_proxy_binary) self.process_proxy = Command::new(&self.eth_proxy_binary)
.arg("--dev") .arg("--dev")
.arg("--rpc-port") .arg("--rpc-port")
@@ -271,18 +251,9 @@ impl KitchensinkNode {
.arg("--rpc-max-connections") .arg("--rpc-max-connections")
.arg(u32::MAX.to_string()) .arg(u32::MAX.to_string())
.env("RUST_LOG", Self::PROXY_LOG_ENV) .env("RUST_LOG", Self::PROXY_LOG_ENV)
.stdout( .stdout(eth_proxy_stdout_logs_file.try_clone()?)
eth_proxy_stdout_logs_file .stderr(eth_proxy_stderr_logs_file.try_clone()?)
.try_clone() .spawn()?
.context("Failed to clone eth-proxy stdout log file handle")?,
)
.stderr(
eth_proxy_stderr_logs_file
.try_clone()
.context("Failed to clone eth-proxy stderr log file handle")?,
)
.spawn()
.context("Failed to spawn eth-proxy process")?
.into(); .into();
if let Err(error) = Self::wait_ready( if let Err(error) = Self::wait_ready(
@@ -290,8 +261,7 @@ impl KitchensinkNode {
Self::ETH_PROXY_READY_MARKER, Self::ETH_PROXY_READY_MARKER,
Duration::from_secs(60), Duration::from_secs(60),
) { ) {
self.shutdown() self.shutdown()?;
.context("Failed to gracefully shutdown after eth-proxy start error")?;
return Err(error); return Err(error);
}; };
@@ -416,14 +386,11 @@ impl EthereumNode for KitchensinkNode {
) -> anyhow::Result<TransactionReceipt> { ) -> anyhow::Result<TransactionReceipt> {
let receipt = self let receipt = self
.provider() .provider()
.await .await?
.context("Failed to create provider for transaction submission")?
.send_transaction(transaction) .send_transaction(transaction)
.await .await?
.context("Failed to submit transaction to kitchensink proxy")?
.get_receipt() .get_receipt()
.await .await?;
.context("Failed to fetch transaction receipt from kitchensink proxy")?;
Ok(receipt) Ok(receipt)
} }
@@ -433,12 +400,11 @@ impl EthereumNode for KitchensinkNode {
trace_options: GethDebugTracingOptions, trace_options: GethDebugTracingOptions,
) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> { ) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> {
let tx_hash = transaction.transaction_hash; let tx_hash = transaction.transaction_hash;
self.provider() Ok(self
.await .provider()
.context("Failed to create provider for debug tracing")? .await?
.debug_trace_transaction(tx_hash, trace_options) .debug_trace_transaction(tx_hash, trace_options)
.await .await?)
.context("Failed to obtain debug trace from kitchensink proxy")
} }
async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result<DiffMode> { async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result<DiffMode> {
@@ -459,8 +425,7 @@ impl EthereumNode for KitchensinkNode {
async fn balance_of(&self, address: Address) -> anyhow::Result<U256> { async fn balance_of(&self, address: Address) -> anyhow::Result<U256> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Kitchensink provider")?
.get_balance(address) .get_balance(address)
.await .await
.map_err(Into::into) .map_err(Into::into)
@@ -472,8 +437,7 @@ impl EthereumNode for KitchensinkNode {
keys: Vec<StorageKey>, keys: Vec<StorageKey>,
) -> anyhow::Result<EIP1186AccountProofResponse> { ) -> anyhow::Result<EIP1186AccountProofResponse> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Kitchensink provider")?
.get_proof(address, keys) .get_proof(address, keys)
.latest() .latest()
.await .await
@@ -484,8 +448,7 @@ impl EthereumNode for KitchensinkNode {
impl ResolverApi for KitchensinkNode { impl ResolverApi for KitchensinkNode {
async fn chain_id(&self) -> anyhow::Result<alloy::primitives::ChainId> { async fn chain_id(&self) -> anyhow::Result<alloy::primitives::ChainId> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Kitchensink provider")?
.get_chain_id() .get_chain_id()
.await .await
.map_err(Into::into) .map_err(Into::into)
@@ -493,8 +456,7 @@ impl ResolverApi for KitchensinkNode {
async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result<u128> { async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result<u128> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Kitchensink provider")?
.get_transaction_receipt(*tx_hash) .get_transaction_receipt(*tx_hash)
.await? .await?
.context("Failed to get the transaction receipt") .context("Failed to get the transaction receipt")
@@ -503,45 +465,37 @@ impl ResolverApi for KitchensinkNode {
async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result<u128> { async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result<u128> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Kitchensink provider")?
.get_block_by_number(number) .get_block_by_number(number)
.await .await?
.context("Failed to get the kitchensink block")? .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
.map(|block| block.header.gas_limit as _) .map(|block| block.header.gas_limit as _)
} }
async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result<Address> { async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result<Address> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Kitchensink provider")?
.get_block_by_number(number) .get_block_by_number(number)
.await .await?
.context("Failed to get the kitchensink block")? .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
.map(|block| block.header.beneficiary) .map(|block| block.header.beneficiary)
} }
async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result<U256> { async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result<U256> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Kitchensink provider")?
.get_block_by_number(number) .get_block_by_number(number)
.await .await?
.context("Failed to get the kitchensink block")? .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
.map(|block| U256::from_be_bytes(block.header.mix_hash.0)) .map(|block| U256::from_be_bytes(block.header.mix_hash.0))
} }
async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result<u64> { async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result<u64> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Kitchensink provider")?
.get_block_by_number(number) .get_block_by_number(number)
.await .await?
.context("Failed to get the kitchensink block")? .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
.and_then(|block| { .and_then(|block| {
block block
.header .header
@@ -552,30 +506,25 @@ impl ResolverApi for KitchensinkNode {
async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockHash> { async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockHash> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Kitchensink provider")?
.get_block_by_number(number) .get_block_by_number(number)
.await .await?
.context("Failed to get the kitchensink block")? .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
.map(|block| block.header.hash) .map(|block| block.header.hash)
} }
async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> { async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Kitchensink provider")?
.get_block_by_number(number) .get_block_by_number(number)
.await .await?
.context("Failed to get the kitchensink block")? .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
.map(|block| block.header.timestamp) .map(|block| block.header.timestamp)
} }
async fn last_block_number(&self) -> anyhow::Result<BlockNumber> { async fn last_block_number(&self) -> anyhow::Result<BlockNumber> {
self.provider() self.provider()
.await .await?
.context("Failed to get the Kitchensink provider")?
.get_block_number() .get_block_number()
.await .await
.map_err(Into::into) .map_err(Into::into)
@@ -583,47 +532,35 @@ impl ResolverApi for KitchensinkNode {
} }
impl Node for KitchensinkNode { impl Node for KitchensinkNode {
fn new( fn new(config: &Arguments) -> Self {
context: impl AsRef<WorkingDirectoryConfiguration> let kitchensink_directory = config.directory().join(Self::BASE_DIRECTORY);
+ AsRef<ConcurrencyConfiguration>
+ AsRef<GenesisConfiguration>
+ AsRef<WalletConfiguration>
+ AsRef<GethConfiguration>
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Clone,
) -> Self {
let kitchensink_configuration = AsRef::<KitchensinkConfiguration>::as_ref(&context);
let dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
let eth_rpc_configuration = AsRef::<EthRpcConfiguration>::as_ref(&context);
let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context);
let kitchensink_directory = working_directory_configuration
.as_path()
.join(Self::BASE_DIRECTORY);
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
let base_directory = kitchensink_directory.join(id.to_string()); let base_directory = kitchensink_directory.join(id.to_string());
let logs_directory = base_directory.join(Self::LOGS_DIRECTORY); let logs_directory = base_directory.join(Self::LOGS_DIRECTORY);
let wallet = wallet_configuration.wallet(); let mut wallet = config.wallet();
for signer in (1..=config.private_keys_to_add)
.map(|id| U256::from(id))
.map(|id| id.to_be_bytes::<32>())
.map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap())
{
wallet.register_signer(signer);
}
Self { Self {
id, id,
substrate_binary: kitchensink_configuration.path.clone(), substrate_binary: config.kitchensink.clone(),
dev_node_binary: dev_node_configuration.path.clone(), dev_node_binary: config.revive_dev_node.clone(),
eth_proxy_binary: eth_rpc_configuration.path.clone(), eth_proxy_binary: config.eth_proxy.clone(),
rpc_url: String::new(), rpc_url: String::new(),
base_directory, base_directory,
logs_directory, logs_directory,
process_substrate: None, process_substrate: None,
process_proxy: None, process_proxy: None,
wallet: wallet.clone(), wallet: Arc::new(wallet),
chain_id_filler: Default::default(), chain_id_filler: Default::default(),
nonce_manager: Default::default(), nonce_manager: Default::default(),
use_kitchensink_not_dev_node: kitchensink_configuration.use_kitchensink, use_kitchensink_not_dev_node: config.use_kitchensink_not_dev_node,
// We know that we only need to be storing 4 files so we can specify that when creating // We know that we only need to be storing 4 files so we can specify that when creating
// the vector. It's the stdout and stderr of the substrate-node and the eth-rpc. // the vector. It's the stdout and stderr of the substrate-node and the eth-rpc.
logs_file_to_flush: Vec::with_capacity(4), logs_file_to_flush: Vec::with_capacity(4),
@@ -664,8 +601,8 @@ impl Node for KitchensinkNode {
Ok(()) Ok(())
} }
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> { fn spawn(&mut self, genesis: String) -> anyhow::Result<()> {
self.init(genesis)?.spawn_process() self.init(&genesis)?.spawn_process()
} }
fn version(&self) -> anyhow::Result<String> { fn version(&self) -> anyhow::Result<String> {
@@ -674,10 +611,8 @@ impl Node for KitchensinkNode {
.stdin(Stdio::null()) .stdin(Stdio::null())
.stdout(Stdio::piped()) .stdout(Stdio::piped())
.stderr(Stdio::null()) .stderr(Stdio::null())
.spawn() .spawn()?
.context("Failed to spawn kitchensink --version")? .wait_with_output()?
.wait_with_output()
.context("Failed to wait for kitchensink --version")?
.stdout; .stdout;
Ok(String::from_utf8_lossy(&output).into()) Ok(String::from_utf8_lossy(&output).into())
} }
@@ -1130,20 +1065,25 @@ impl BlockHeader for KitchenSinkHeader {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use alloy::rpc::types::TransactionRequest; use alloy::rpc::types::TransactionRequest;
use revive_dt_config::Arguments;
use std::path::PathBuf;
use std::sync::{LazyLock, Mutex}; use std::sync::{LazyLock, Mutex};
use std::fs; use std::fs;
use super::*; use super::*;
use crate::Node; use crate::{GENESIS_JSON, Node};
fn test_config() -> ExecutionContext { fn test_config() -> Arguments {
let mut context = ExecutionContext::default(); Arguments {
context.kitchensink_configuration.use_kitchensink = true; kitchensink: PathBuf::from("substrate-node"),
context eth_proxy: PathBuf::from("eth-rpc"),
use_kitchensink_not_dev_node: true,
..Default::default()
}
} }
fn new_node() -> (ExecutionContext, KitchensinkNode) { fn new_node() -> (KitchensinkNode, Arguments) {
// Note: When we run the tests in the CI we found that if they're all // Note: When we run the tests in the CI we found that if they're all
// run in parallel then the CI is unable to start all of the nodes in // run in parallel then the CI is unable to start all of the nodes in
// time and their start up times-out. Therefore, we want all of the // time and their start up times-out. Therefore, we want all of the
@@ -1162,36 +1102,32 @@ mod tests {
static NODE_START_MUTEX: Mutex<()> = Mutex::new(()); static NODE_START_MUTEX: Mutex<()> = Mutex::new(());
let _guard = NODE_START_MUTEX.lock().unwrap(); let _guard = NODE_START_MUTEX.lock().unwrap();
let context = test_config(); let args = test_config();
let mut node = KitchensinkNode::new(&context); let mut node = KitchensinkNode::new(&args);
node.init(context.genesis_configuration.genesis().unwrap().clone()) node.init(GENESIS_JSON)
.expect("Failed to initialize the node") .expect("Failed to initialize the node")
.spawn_process() .spawn_process()
.expect("Failed to spawn the node process"); .expect("Failed to spawn the node process");
(context, node) (node, args)
} }
/// A shared node that multiple tests can use. It starts up once. /// A shared node that multiple tests can use. It starts up once.
fn shared_node() -> &'static KitchensinkNode { fn shared_node() -> &'static KitchensinkNode {
static NODE: LazyLock<(ExecutionContext, KitchensinkNode)> = LazyLock::new(|| { static NODE: LazyLock<(KitchensinkNode, Arguments)> = LazyLock::new(|| {
let (context, node) = new_node(); let (node, args) = new_node();
(context, node) (node, args)
}); });
&NODE.1 &NODE.0
} }
#[tokio::test] #[tokio::test]
async fn node_mines_simple_transfer_transaction_and_returns_receipt() { async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
// Arrange // Arrange
let (context, node) = new_node(); let (node, args) = new_node();
let provider = node.provider().await.expect("Failed to create provider"); let provider = node.provider().await.expect("Failed to create provider");
let account_address = context let account_address = args.wallet().default_signer().address();
.wallet_configuration
.wallet()
.default_signer()
.address();
let transaction = TransactionRequest::default() let transaction = TransactionRequest::default()
.to(account_address) .to(account_address)
.value(U256::from(100_000_000_000_000u128)); .value(U256::from(100_000_000_000_000u128));
@@ -1225,9 +1161,7 @@ mod tests {
let mut dummy_node = KitchensinkNode::new(&test_config()); let mut dummy_node = KitchensinkNode::new(&test_config());
// Call `init()` // Call `init()`
dummy_node dummy_node.init(genesis_content).expect("init failed");
.init(serde_json::from_str(genesis_content).unwrap())
.expect("init failed");
// Check that the patched chainspec file was generated // Check that the patched chainspec file was generated
let final_chainspec_path = dummy_node let final_chainspec_path = dummy_node
@@ -1338,9 +1272,19 @@ mod tests {
} }
#[test] #[test]
fn version_works() { fn spawn_works() {
let node = shared_node(); let config = test_config();
let mut node = KitchensinkNode::new(&config);
node.spawn(GENESIS_JSON.to_string()).unwrap();
}
#[test]
fn version_works() {
let config = test_config();
let node = KitchensinkNode::new(&config);
let version = node.version().unwrap(); let version = node.version().unwrap();
assert!( assert!(
@@ -1351,8 +1295,9 @@ mod tests {
#[test] #[test]
fn eth_rpc_version_works() { fn eth_rpc_version_works() {
let node = shared_node(); let config = test_config();
let node = KitchensinkNode::new(&config);
let version = node.eth_rpc_version().unwrap(); let version = node.eth_rpc_version().unwrap();
assert!( assert!(
+6 -14
View File
@@ -1,8 +1,7 @@
//! This crate implements the testing nodes. //! This crate implements the testing nodes.
use alloy::genesis::Genesis;
use revive_common::EVMVersion; use revive_common::EVMVersion;
use revive_dt_config::*; use revive_dt_config::Arguments;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
pub mod common; pub mod common;
@@ -11,20 +10,13 @@ pub mod geth;
pub mod kitchensink; pub mod kitchensink;
pub mod pool; pub mod pool;
/// The default genesis configuration.
pub const GENESIS_JSON: &str = include_str!("../../../genesis.json");
/// An abstract interface for testing nodes. /// An abstract interface for testing nodes.
pub trait Node: EthereumNode { pub trait Node: EthereumNode {
/// Create a new uninitialized instance. /// Create a new uninitialized instance.
fn new( fn new(config: &Arguments) -> Self;
context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<ConcurrencyConfiguration>
+ AsRef<GenesisConfiguration>
+ AsRef<WalletConfiguration>
+ AsRef<GethConfiguration>
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Clone,
) -> Self;
/// Returns the identifier of the node. /// Returns the identifier of the node.
fn id(&self) -> usize; fn id(&self) -> usize;
@@ -32,7 +24,7 @@ pub trait Node: EthereumNode {
/// Spawns a node configured according to the genesis json. /// Spawns a node configured according to the genesis json.
/// ///
/// Blocking until it's ready to accept transactions. /// Blocking until it's ready to accept transactions.
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()>; fn spawn(&mut self, genesis: String) -> anyhow::Result<()>;
/// Prune the node instance and related data. /// Prune the node instance and related data.
/// ///
+17 -48
View File
@@ -5,13 +5,10 @@ use std::{
thread, thread,
}; };
use alloy::genesis::Genesis; use revive_dt_common::cached_fs::read_to_string;
use anyhow::Context as _;
use revive_dt_config::{ use anyhow::Context;
ConcurrencyConfiguration, EthRpcConfiguration, GenesisConfiguration, GethConfiguration, use revive_dt_config::Arguments;
KitchensinkConfiguration, ReviveDevNodeConfiguration, WalletConfiguration,
WorkingDirectoryConfiguration,
};
use tracing::info; use tracing::info;
use crate::Node; use crate::Node;
@@ -28,31 +25,18 @@ where
T: Node + Send + 'static, T: Node + Send + 'static,
{ {
/// Create a new Pool. This will start as many nodes as there are workers in `config`. /// Create a new Pool. This will start as many nodes as there are workers in `config`.
pub fn new( pub fn new(config: &Arguments) -> anyhow::Result<Self> {
context: impl AsRef<WorkingDirectoryConfiguration> let nodes = config.number_of_nodes;
+ AsRef<ConcurrencyConfiguration> let genesis = read_to_string(&config.genesis_file).context(format!(
+ AsRef<GenesisConfiguration> "can not read genesis file: {}",
+ AsRef<WalletConfiguration> config.genesis_file.display()
+ AsRef<GethConfiguration> ))?;
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Send
+ Sync
+ Clone
+ 'static,
) -> anyhow::Result<Self> {
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let nodes = concurrency_configuration.number_of_nodes;
let genesis = genesis_configuration.genesis()?;
let mut handles = Vec::with_capacity(nodes); let mut handles = Vec::with_capacity(nodes);
for _ in 0..nodes { for _ in 0..nodes {
let context = context.clone(); let config = config.clone();
let genesis = genesis.clone(); let genesis = genesis.clone();
handles.push(thread::spawn(move || spawn_node::<T>(context, genesis))); handles.push(thread::spawn(move || spawn_node::<T>(&config, genesis)));
} }
let mut nodes = Vec::with_capacity(nodes); let mut nodes = Vec::with_capacity(nodes);
@@ -60,10 +44,8 @@ where
nodes.push( nodes.push(
handle handle
.join() .join()
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error)) .map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))?
.context("Failed to join node spawn thread")? .map_err(|error| anyhow::anyhow!("node failed to spawn: {error}"))?,
.map_err(|error| anyhow::anyhow!("node failed to spawn: {error}"))
.context("Node failed to spawn")?,
); );
} }
@@ -80,27 +62,14 @@ where
} }
} }
fn spawn_node<T: Node + Send>( fn spawn_node<T: Node + Send>(args: &Arguments, genesis: String) -> anyhow::Result<T> {
context: impl AsRef<WorkingDirectoryConfiguration> let mut node = T::new(args);
+ AsRef<ConcurrencyConfiguration>
+ AsRef<GenesisConfiguration>
+ AsRef<WalletConfiguration>
+ AsRef<GethConfiguration>
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Clone
+ 'static,
genesis: Genesis,
) -> anyhow::Result<T> {
let mut node = T::new(context);
info!( info!(
id = node.id(), id = node.id(),
connection_string = node.connection_string(), connection_string = node.connection_string(),
"Spawning node" "Spawning node"
); );
node.spawn(genesis) node.spawn(genesis)?;
.context("Failed to spawn node process")?;
info!( info!(
id = node.id(), id = node.id(),
connection_string = node.connection_string(), connection_string = node.connection_string(),
+45 -55
View File
@@ -9,10 +9,10 @@ use std::{
}; };
use alloy_primitives::Address; use alloy_primitives::Address;
use anyhow::{Context as _, Result}; use anyhow::Result;
use indexmap::IndexMap; use indexmap::IndexMap;
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode}; use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
use revive_dt_config::{Context, TestingPlatform}; use revive_dt_config::{Arguments, TestingPlatform};
use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance}; use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance};
use semver::Version; use semver::Version;
use serde::Serialize; use serde::Serialize;
@@ -36,11 +36,11 @@ pub struct ReportAggregator {
} }
impl ReportAggregator { impl ReportAggregator {
pub fn new(context: Context) -> Self { pub fn new(config: Arguments) -> Self {
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>(); let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
let (listener_tx, _) = channel::<ReporterEvent>(1024); let (listener_tx, _) = channel::<ReporterEvent>(1024);
Self { Self {
report: Report::new(context), report: Report::new(config),
remaining_cases: Default::default(), remaining_cases: Default::default(),
runner_tx: Some(runner_tx), runner_tx: Some(runner_tx),
runner_rx, runner_rx,
@@ -113,35 +113,19 @@ impl ReportAggregator {
debug!("Report aggregation completed"); debug!("Report aggregation completed");
let file_name = { let file_name = {
let current_timestamp = SystemTime::now() let current_timestamp = SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs();
.duration_since(UNIX_EPOCH)
.context("System clock is before UNIX_EPOCH; cannot compute report timestamp")?
.as_secs();
let mut file_name = current_timestamp.to_string(); let mut file_name = current_timestamp.to_string();
file_name.push_str(".json"); file_name.push_str(".json");
file_name file_name
}; };
let file_path = self let file_path = self.report.config.directory().join(file_name);
.report
.context
.working_directory_configuration()
.as_path()
.join(file_name);
let file = OpenOptions::new() let file = OpenOptions::new()
.create(true) .create(true)
.write(true) .write(true)
.truncate(true) .truncate(true)
.read(false) .read(false)
.open(&file_path) .open(file_path)?;
.with_context(|| { serde_json::to_writer_pretty(file, &self.report)?;
format!(
"Failed to open report file for writing: {}",
file_path.display()
)
})?;
serde_json::to_writer_pretty(&file, &self.report).with_context(|| {
format!("Failed to serialize report JSON to {}", file_path.display())
})?;
Ok(()) Ok(())
} }
@@ -287,16 +271,8 @@ impl ReportAggregator {
&mut self, &mut self,
event: PreLinkContractsCompilationSucceededEvent, event: PreLinkContractsCompilationSucceededEvent,
) { ) {
let include_input = self let include_input = self.report.config.report_include_compiler_input;
.report let include_output = self.report.config.report_include_compiler_output;
.context
.report_configuration()
.include_compiler_input;
let include_output = self
.report
.context
.report_configuration()
.include_compiler_output;
let execution_information = self.execution_information(&event.execution_specifier); let execution_information = self.execution_information(&event.execution_specifier);
@@ -324,16 +300,8 @@ impl ReportAggregator {
&mut self, &mut self,
event: PostLinkContractsCompilationSucceededEvent, event: PostLinkContractsCompilationSucceededEvent,
) { ) {
let include_input = self let include_input = self.report.config.report_include_compiler_input;
.report let include_output = self.report.config.report_include_compiler_output;
.context
.report_configuration()
.include_compiler_input;
let include_output = self
.report
.context
.report_configuration()
.include_compiler_output;
let execution_information = self.execution_information(&event.execution_specifier); let execution_information = self.execution_information(&event.execution_specifier);
@@ -361,13 +329,21 @@ impl ReportAggregator {
&mut self, &mut self,
event: PreLinkContractsCompilationFailedEvent, event: PreLinkContractsCompilationFailedEvent,
) { ) {
let include_input = self.report.config.report_include_compiler_input;
let execution_information = self.execution_information(&event.execution_specifier); let execution_information = self.execution_information(&event.execution_specifier);
let compiler_input = if include_input {
event.compiler_input
} else {
None
};
execution_information.pre_link_compilation_status = Some(CompilationStatus::Failure { execution_information.pre_link_compilation_status = Some(CompilationStatus::Failure {
reason: event.reason, reason: event.reason,
compiler_version: event.compiler_version, compiler_version: event.compiler_version,
compiler_path: event.compiler_path, compiler_path: event.compiler_path,
compiler_input: event.compiler_input, compiler_input,
}); });
} }
@@ -375,13 +351,21 @@ impl ReportAggregator {
&mut self, &mut self,
event: PostLinkContractsCompilationFailedEvent, event: PostLinkContractsCompilationFailedEvent,
) { ) {
let include_input = self.report.config.report_include_compiler_input;
let execution_information = self.execution_information(&event.execution_specifier); let execution_information = self.execution_information(&event.execution_specifier);
let compiler_input = if include_input {
event.compiler_input
} else {
None
};
execution_information.post_link_compilation_status = Some(CompilationStatus::Failure { execution_information.post_link_compilation_status = Some(CompilationStatus::Failure {
reason: event.reason, reason: event.reason,
compiler_version: event.compiler_version, compiler_version: event.compiler_version,
compiler_path: event.compiler_path, compiler_path: event.compiler_path,
compiler_input: event.compiler_input, compiler_input,
}); });
} }
@@ -427,8 +411,12 @@ impl ReportAggregator {
#[serde_as] #[serde_as]
#[derive(Clone, Debug, Serialize)] #[derive(Clone, Debug, Serialize)]
pub struct Report { pub struct Report {
/// The context that the tool was started up with. /// The configuration that the tool was started up with.
pub context: Context, pub config: Arguments,
/// The platform of the leader chain.
pub leader_platform: TestingPlatform,
/// The platform of the follower chain.
pub follower_platform: TestingPlatform,
/// The list of corpus files that the tool found. /// The list of corpus files that the tool found.
pub corpora: Vec<Corpus>, pub corpora: Vec<Corpus>,
/// The list of metadata files that were found by the tool. /// The list of metadata files that were found by the tool.
@@ -440,9 +428,11 @@ pub struct Report {
} }
impl Report { impl Report {
pub fn new(context: Context) -> Self { pub fn new(config: Arguments) -> Self {
Self { Self {
context, leader_platform: config.leader,
follower_platform: config.follower,
config,
corpora: Default::default(), corpora: Default::default(),
metadata_files: Default::default(), metadata_files: Default::default(),
test_case_information: Default::default(), test_case_information: Default::default(),
@@ -532,12 +522,12 @@ pub enum CompilationStatus {
/// The path of the compiler used to compile the contracts. /// The path of the compiler used to compile the contracts.
compiler_path: PathBuf, compiler_path: PathBuf,
/// The input provided to the compiler to compile the contracts. This is only included if /// The input provided to the compiler to compile the contracts. This is only included if
/// the appropriate flag is set in the CLI context and if the contracts were not cached and /// the appropriate flag is set in the CLI configuration and if the contracts were not
/// the compiler was invoked. /// cached and the compiler was invoked.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_input: Option<CompilerInput>, compiler_input: Option<CompilerInput>,
/// The output of the compiler. This is only included if the appropriate flag is set in the /// The output of the compiler. This is only included if the appropriate flag is set in the
/// CLI contexts. /// CLI configurations.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_output: Option<CompilerOutput>, compiler_output: Option<CompilerOutput>,
}, },
@@ -552,8 +542,8 @@ pub enum CompilationStatus {
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_path: Option<PathBuf>, compiler_path: Option<PathBuf>,
/// The input provided to the compiler to compile the contracts. This is only included if /// The input provided to the compiler to compile the contracts. This is only included if
/// the appropriate flag is set in the CLI context and if the contracts were not cached and /// the appropriate flag is set in the CLI configuration and if the contracts were not
/// the compiler was invoked. /// cached and the compiler was invoked.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_input: Option<CompilerInput>, compiler_input: Option<CompilerInput>,
}, },
+1 -3
View File
@@ -4,7 +4,6 @@
use std::{collections::BTreeMap, path::PathBuf, sync::Arc}; use std::{collections::BTreeMap, path::PathBuf, sync::Arc};
use alloy_primitives::Address; use alloy_primitives::Address;
use anyhow::Context as _;
use indexmap::IndexMap; use indexmap::IndexMap;
use revive_dt_compiler::{CompilerInput, CompilerOutput}; use revive_dt_compiler::{CompilerInput, CompilerOutput};
use revive_dt_config::TestingPlatform; use revive_dt_config::TestingPlatform;
@@ -631,8 +630,7 @@ define_event! {
impl RunnerEventReporter { impl RunnerEventReporter {
pub async fn subscribe(&self) -> anyhow::Result<broadcast::Receiver<ReporterEvent>> { pub async fn subscribe(&self) -> anyhow::Result<broadcast::Receiver<ReporterEvent>> {
let (tx, rx) = oneshot::channel::<broadcast::Receiver<ReporterEvent>>(); let (tx, rx) = oneshot::channel::<broadcast::Receiver<ReporterEvent>>();
self.report_subscribe_to_events_event(tx) self.report_subscribe_to_events_event(tx)?;
.context("Failed to send subscribe request to reporter task")?;
rx.await.map_err(Into::into) rx.await.map_err(Into::into)
} }
} }
+11 -49
View File
@@ -9,11 +9,9 @@ use std::{
sync::LazyLock, sync::LazyLock,
}; };
use semver::Version;
use tokio::sync::Mutex; use tokio::sync::Mutex;
use crate::download::SolcDownloader; use crate::download::SolcDownloader;
use anyhow::Context as _;
pub const SOLC_CACHE_DIRECTORY: &str = "solc"; pub const SOLC_CACHE_DIRECTORY: &str = "solc";
pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default); pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default);
@@ -21,7 +19,7 @@ pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new
pub(crate) async fn get_or_download( pub(crate) async fn get_or_download(
working_directory: &Path, working_directory: &Path,
downloader: &SolcDownloader, downloader: &SolcDownloader,
) -> anyhow::Result<(Version, PathBuf)> { ) -> anyhow::Result<PathBuf> {
let target_directory = working_directory let target_directory = working_directory
.join(SOLC_CACHE_DIRECTORY) .join(SOLC_CACHE_DIRECTORY)
.join(downloader.version.to_string()); .join(downloader.version.to_string());
@@ -30,26 +28,14 @@ pub(crate) async fn get_or_download(
let mut cache = SOLC_CACHER.lock().await; let mut cache = SOLC_CACHER.lock().await;
if cache.contains(&target_file) { if cache.contains(&target_file) {
tracing::debug!("using cached solc: {}", target_file.display()); tracing::debug!("using cached solc: {}", target_file.display());
return Ok((downloader.version.clone(), target_file)); return Ok(target_file);
} }
create_dir_all(&target_directory).with_context(|| { create_dir_all(target_directory)?;
format!( download_to_file(&target_file, downloader).await?;
"Failed to create solc cache directory: {}",
target_directory.display()
)
})?;
download_to_file(&target_file, downloader)
.await
.with_context(|| {
format!(
"Failed to write downloaded solc to {}",
target_file.display()
)
})?;
cache.insert(target_file.clone()); cache.insert(target_file.clone());
Ok((downloader.version.clone(), target_file)) Ok(target_file)
} }
async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::Result<()> { async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::Result<()> {
@@ -59,26 +45,14 @@ async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::R
#[cfg(unix)] #[cfg(unix)]
{ {
let mut permissions = file let mut permissions = file.metadata()?.permissions();
.metadata()
.with_context(|| format!("Failed to read metadata for {}", path.display()))?
.permissions();
permissions.set_mode(permissions.mode() | 0o111); permissions.set_mode(permissions.mode() | 0o111);
file.set_permissions(permissions).with_context(|| { file.set_permissions(permissions)?;
format!("Failed to set executable permissions on {}", path.display())
})?;
} }
let mut file = BufWriter::new(file); let mut file = BufWriter::new(file);
file.write_all( file.write_all(&downloader.download().await?)?;
&downloader file.flush()?;
.download()
.await
.context("Failed to download solc binary bytes")?,
)
.with_context(|| format!("Failed to write solc binary to {}", path.display()))?;
file.flush()
.with_context(|| format!("Failed to flush file {}", path.display()))?;
drop(file); drop(file);
#[cfg(target_os = "macos")] #[cfg(target_os = "macos")]
@@ -89,20 +63,8 @@ async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::R
.stderr(std::process::Stdio::null()) .stderr(std::process::Stdio::null())
.stdout(std::process::Stdio::null()) .stdout(std::process::Stdio::null())
.stdout(std::process::Stdio::null()) .stdout(std::process::Stdio::null())
.spawn() .spawn()?
.with_context(|| { .wait()?;
format!(
"Failed to spawn xattr to remove quarantine attribute on {}",
path.display()
)
})?
.wait()
.with_context(|| {
format!(
"Failed waiting for xattr operation to complete on {}",
path.display()
)
})?;
Ok(()) Ok(())
} }
+5 -27
View File
@@ -11,7 +11,6 @@ use semver::Version;
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
use crate::list::List; use crate::list::List;
use anyhow::Context as _;
pub static LIST_CACHE: LazyLock<Mutex<HashMap<&'static str, List>>> = pub static LIST_CACHE: LazyLock<Mutex<HashMap<&'static str, List>>> =
LazyLock::new(Default::default); LazyLock::new(Default::default);
@@ -31,12 +30,7 @@ impl List {
return Ok(list.clone()); return Ok(list.clone());
} }
let body: List = reqwest::get(url) let body: List = reqwest::get(url).await?.json().await?;
.await
.with_context(|| format!("Failed to GET solc list from {url}"))?
.json()
.await
.with_context(|| format!("Failed to deserialize solc list JSON from {url}"))?;
LIST_CACHE.lock().unwrap().insert(url, body.clone()); LIST_CACHE.lock().unwrap().insert(url, body.clone());
@@ -74,8 +68,7 @@ impl SolcDownloader {
}), }),
VersionOrRequirement::Requirement(requirement) => { VersionOrRequirement::Requirement(requirement) => {
let Some(version) = List::download(list) let Some(version) = List::download(list)
.await .await?
.with_context(|| format!("Failed to download solc builds list from {list}"))?
.builds .builds
.into_iter() .into_iter()
.map(|build| build.version) .map(|build| build.version)
@@ -114,20 +107,11 @@ impl SolcDownloader {
/// Errors out if the download fails or the digest of the downloaded file /// Errors out if the download fails or the digest of the downloaded file
/// mismatches the expected digest from the release [List]. /// mismatches the expected digest from the release [List].
pub async fn download(&self) -> anyhow::Result<Vec<u8>> { pub async fn download(&self) -> anyhow::Result<Vec<u8>> {
let builds = List::download(self.list) let builds = List::download(self.list).await?.builds;
.await
.with_context(|| format!("Failed to download solc builds list from {}", self.list))?
.builds;
let build = builds let build = builds
.iter() .iter()
.find(|build| build.version == self.version) .find(|build| build.version == self.version)
.ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version)) .ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version))?;
.with_context(|| {
format!(
"Requested solc version {} was not found in builds list fetched from {}",
self.version, self.list
)
})?;
let path = build.path.clone(); let path = build.path.clone();
let expected_digest = build let expected_digest = build
@@ -137,13 +121,7 @@ impl SolcDownloader {
.to_string(); .to_string();
let url = format!("{}/{}/{}", Self::BASE_URL, self.target, path.display()); let url = format!("{}/{}/{}", Self::BASE_URL, self.target, path.display());
let file = reqwest::get(&url) let file = reqwest::get(url).await?.bytes().await?.to_vec();
.await
.with_context(|| format!("Failed to GET solc binary from {url}"))?
.bytes()
.await
.with_context(|| format!("Failed to read solc binary bytes from {url}"))?
.to_vec();
if hex::encode(Sha256::digest(&file)) != expected_digest { if hex::encode(Sha256::digest(&file)) != expected_digest {
anyhow::bail!("sha256 mismatch for solc version {}", self.version); anyhow::bail!("sha256 mismatch for solc version {}", self.version);
+2 -5
View File
@@ -5,12 +5,10 @@
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use anyhow::Context as _;
use cache::get_or_download; use cache::get_or_download;
use download::SolcDownloader; use download::SolcDownloader;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
use semver::Version;
pub mod cache; pub mod cache;
pub mod download; pub mod download;
@@ -25,7 +23,7 @@ pub async fn download_solc(
cache_directory: &Path, cache_directory: &Path,
version: impl Into<VersionOrRequirement>, version: impl Into<VersionOrRequirement>,
wasm: bool, wasm: bool,
) -> anyhow::Result<(Version, PathBuf)> { ) -> anyhow::Result<PathBuf> {
let downloader = if wasm { let downloader = if wasm {
SolcDownloader::wasm(version).await SolcDownloader::wasm(version).await
} else if cfg!(target_os = "linux") { } else if cfg!(target_os = "linux") {
@@ -36,8 +34,7 @@ pub async fn download_solc(
SolcDownloader::windows(version).await SolcDownloader::windows(version).await
} else { } else {
unimplemented!() unimplemented!()
} }?;
.context("Failed to initialize the Solc Downloader")?;
get_or_download(cache_directory, &downloader).await get_or_download(cache_directory, &downloader).await
} }
-102
View File
@@ -1,102 +0,0 @@
#!/bin/bash
# Revive Differential Tests - Quick Start Script
# This script clones the test repository, sets up the corpus file, and runs the tool
set -e # Exit on any error
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Configuration
TEST_REPO_URL="https://github.com/paritytech/resolc-compiler-tests"
TEST_REPO_DIR="resolc-compiler-tests"
CORPUS_FILE="./corpus.json"
WORKDIR="workdir"
# Optional positional argument: path to polkadot-sdk directory
POLKADOT_SDK_DIR="${1:-}"
# Binary paths (default to names in $PATH)
REVIVE_DEV_NODE_BIN="revive-dev-node"
ETH_RPC_BIN="eth-rpc"
SUBSTRATE_NODE_BIN="substrate-node"
echo -e "${GREEN}=== Revive Differential Tests Quick Start ===${NC}"
echo ""
# Check if test repo already exists
if [ -d "$TEST_REPO_DIR" ]; then
echo -e "${YELLOW}Test repository already exists. Pulling latest changes...${NC}"
cd "$TEST_REPO_DIR"
git pull
cd ..
else
echo -e "${GREEN}Cloning test repository...${NC}"
git clone "$TEST_REPO_URL"
fi
# If polkadot-sdk path is provided, verify and use binaries from there; build if needed
if [ -n "$POLKADOT_SDK_DIR" ]; then
if [ ! -d "$POLKADOT_SDK_DIR" ]; then
echo -e "${RED}Provided polkadot-sdk directory does not exist: $POLKADOT_SDK_DIR${NC}"
exit 1
fi
POLKADOT_SDK_DIR=$(realpath "$POLKADOT_SDK_DIR")
echo -e "${GREEN}Using polkadot-sdk at: $POLKADOT_SDK_DIR${NC}"
REVIVE_DEV_NODE_BIN="$POLKADOT_SDK_DIR/target/release/revive-dev-node"
ETH_RPC_BIN="$POLKADOT_SDK_DIR/target/release/eth-rpc"
SUBSTRATE_NODE_BIN="$POLKADOT_SDK_DIR/target/release/substrate-node"
if [ ! -x "$REVIVE_DEV_NODE_BIN" ] || [ ! -x "$ETH_RPC_BIN" ] || [ ! -x "$SUBSTRATE_NODE_BIN" ]; then
echo -e "${YELLOW}Required binaries not found in release target. Building...${NC}"
(cd "$POLKADOT_SDK_DIR" && cargo build --release --package staging-node-cli --package pallet-revive-eth-rpc --package revive-dev-node)
fi
for bin in "$REVIVE_DEV_NODE_BIN" "$ETH_RPC_BIN" "$SUBSTRATE_NODE_BIN"; do
if [ ! -x "$bin" ]; then
echo -e "${RED}Expected binary not found after build: $bin${NC}"
exit 1
fi
done
else
echo -e "${YELLOW}No polkadot-sdk path provided. Using binaries from $PATH.${NC}"
fi
# Create corpus file with absolute path resolved at runtime
echo -e "${GREEN}Creating corpus file...${NC}"
ABSOLUTE_PATH=$(realpath "$TEST_REPO_DIR/fixtures/solidity/")
cat > "$CORPUS_FILE" << EOF
{
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
"path": "$ABSOLUTE_PATH"
}
EOF
echo -e "${GREEN}Corpus file created: $CORPUS_FILE${NC}"
# Create workdir if it doesn't exist
mkdir -p "$WORKDIR"
echo -e "${GREEN}Starting differential tests...${NC}"
echo "This may take a while..."
echo ""
# Run the tool
RUST_LOG="error" cargo run --release -- execute-tests \
--corpus "$CORPUS_FILE" \
--working-directory "$WORKDIR" \
--concurrency.number-of-nodes 5 \
--kitchensink.path "$SUBSTRATE_NODE_BIN" \
--revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
--eth-rpc.path "$ETH_RPC_BIN" \
> logs.log \
2> output.log
echo -e "${GREEN}=== Test run completed! ===${NC}"