Compare commits

...

16 Commits

Author SHA1 Message Date
James Wilson b6e0701ec9 Remove another unused dep 2025-08-27 15:29:00 +01:00
James Wilson f99b3dbc3c Improve error message 2025-08-27 13:16:55 +01:00
James Wilson 0e79594bb2 Remove semver from report crate 2025-08-27 13:15:24 +01:00
James Wilson 5c75228496 Pass solc version into compiler rather than returning from compilation. Allows for better reporting 2025-08-27 13:13:55 +01:00
James Wilson 8229675ba8 re-add context bits lost in merge conflicts 2025-08-27 10:47:35 +01:00
James Wilson dd94b12b34 Return solc_version in CompilerOutput 2025-08-27 10:45:22 +01:00
James Wilson 4af9f6695d Merge branch 'main' into jsdw-use-mode-solc-version 2025-08-27 09:48:58 +01:00
Omar 8b1afc36a3 Better errors in report (#155) 2025-08-26 15:35:19 +00:00
James Wilson 4d8adb553c Merge branch 'main' into jsdw-use-mode-solc-version
Note: some reporting in cached_compiler.rs is commented out and needs handling
2025-08-26 11:34:32 +01:00
Omar 60328cd493 Add a Quick Run Script (#152)
* Add a quick run script

* Add more context to errors

* Fix the issue with corpus directory canonicalization

* Update the quick run script

* Edit the runner script

* Support specifying the path of the polkadot sdk
2025-08-25 21:03:28 +00:00
Omar eb264fcc7b feature/fix abi finding resolc (#154)
* Configure kitchensink to use devnode by default

* Update the kitchensink tests

* Fix the logic for finding the ABI in resolc

* Edit how CLI reporter prints
2025-08-25 20:47:29 +00:00
Omar 84b139d3b4 Configure kitchensink to use devnode by default (#153)
* Configure kitchensink to use devnode by default

* Update the kitchensink tests
2025-08-25 15:46:06 +00:00
James Wilson bdd2eab194 Tidyup 2025-08-21 12:45:36 +01:00
James Wilson eb754bc9e8 remove temp_dir; tempfile has all we need 2025-08-21 12:36:23 +01:00
James Wilson 0e5e57e703 Merge branch 'main' into jsdw-use-mode-solc-version 2025-08-21 12:26:05 +01:00
James Wilson 491a9a32b2 Use the solc version required in tests rather than one on PATH 2025-08-21 12:23:54 +01:00
34 changed files with 1195 additions and 772 deletions
+2
View File
@@ -9,3 +9,5 @@ node_modules
*.log
profile.json.gz
resolc-compiler-tests
workdir
Generated
+3 -10
View File
@@ -4492,6 +4492,7 @@ dependencies = [
"semver 1.0.26",
"serde",
"serde_json",
"tempfile",
"tokio",
"tracing",
]
@@ -4504,7 +4505,7 @@ dependencies = [
"clap",
"semver 1.0.26",
"serde",
"temp-dir",
"tempfile",
]
[[package]]
@@ -4529,7 +4530,6 @@ dependencies = [
"semver 1.0.26",
"serde",
"serde_json",
"temp-dir",
"tempfile",
"tokio",
"tracing",
@@ -4571,7 +4571,7 @@ dependencies = [
"serde_json",
"sp-core",
"sp-runtime",
"temp-dir",
"tempfile",
"tokio",
"tracing",
]
@@ -4596,7 +4596,6 @@ dependencies = [
"revive-dt-compiler",
"revive-dt-config",
"revive-dt-format",
"semver 1.0.26",
"serde",
"serde_json",
"serde_with",
@@ -5818,12 +5817,6 @@ version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
[[package]]
name = "temp-dir"
version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "83176759e9416cf81ee66cb6508dbfe9c96f20b8b56265a39917551c23c70964"
[[package]]
name = "tempfile"
version = "3.20.0"
-1
View File
@@ -48,7 +48,6 @@ serde_with = { version = "3.14.0" }
sha2 = { version = "0.10.9" }
sp-core = "36.1.0"
sp-runtime = "41.1.0"
temp-dir = { version = "0.1.16" }
tempfile = "3.3"
thiserror = "2"
tokio = { version = "1.47.0", default-features = false, features = [
+14 -5
View File
@@ -3,19 +3,28 @@ use std::{
path::Path,
};
use anyhow::Result;
use anyhow::{Context, Result};
/// This method clears the passed directory of all of the files and directories contained within
/// without deleting the directory.
pub fn clear_directory(path: impl AsRef<Path>) -> Result<()> {
for entry in read_dir(path.as_ref())? {
let entry = entry?;
for entry in read_dir(path.as_ref())
.with_context(|| format!("Failed to read directory: {}", path.as_ref().display()))?
{
let entry = entry.with_context(|| {
format!(
"Failed to read an entry in directory: {}",
path.as_ref().display()
)
})?;
let entry_path = entry.path();
if entry_path.is_file() {
remove_file(entry_path)?
remove_file(&entry_path)
.with_context(|| format!("Failed to remove file: {}", entry_path.display()))?
} else {
remove_dir_all(entry_path)?
remove_dir_all(&entry_path)
.with_context(|| format!("Failed to remove directory: {}", entry_path.display()))?
}
}
Ok(())
+5 -2
View File
@@ -1,7 +1,7 @@
use std::ops::ControlFlow;
use std::time::Duration;
use anyhow::{Result, anyhow};
use anyhow::{Context as _, Result, anyhow};
const EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION: Duration = Duration::from_secs(60);
@@ -38,7 +38,10 @@ where
));
}
match future().await? {
match future()
.await
.context("Polled future returned an error during polling loop")?
{
ControlFlow::Continue(()) => {
let next_wait_duration = match polling_wait_behavior {
PollingWaitBehavior::Constant(duration) => duration,
@@ -6,6 +6,42 @@ pub enum VersionOrRequirement {
Requirement(VersionReq),
}
impl VersionOrRequirement {
/// A helper function to convert a [`semver::Version`] into a [`semver::VersionReq`].
pub fn version_to_requirement(version: &Version) -> VersionReq {
// Ignoring "build" metadata in the version, we can turn
// it into a requirement which is an exact match for the
// given version and nothing else:
VersionReq {
comparators: vec![semver::Comparator {
op: semver::Op::Exact,
major: version.major,
minor: Some(version.minor),
patch: Some(version.patch),
pre: version.pre.clone(),
}],
}
}
}
impl serde::Serialize for VersionOrRequirement {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match self {
VersionOrRequirement::Version(v) => serializer.serialize_str(&v.to_string()),
VersionOrRequirement::Requirement(r) => serializer.serialize_str(&r.to_string()),
}
}
}
impl Default for VersionOrRequirement {
fn default() -> Self {
VersionOrRequirement::Requirement(VersionReq::STAR)
}
}
impl From<Version> for VersionOrRequirement {
fn from(value: Version) -> Self {
Self::Version(value)
@@ -18,24 +54,45 @@ impl From<VersionReq> for VersionOrRequirement {
}
}
impl From<VersionOrRequirement> for VersionReq {
fn from(value: VersionOrRequirement) -> Self {
match value {
VersionOrRequirement::Version(version) => {
VersionOrRequirement::version_to_requirement(&version)
}
VersionOrRequirement::Requirement(version_req) => version_req,
}
}
}
impl TryFrom<VersionOrRequirement> for Version {
type Error = anyhow::Error;
fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> {
let VersionOrRequirement::Version(version) = value else {
anyhow::bail!("Version or requirement was not a version");
};
Ok(version)
}
}
impl TryFrom<VersionOrRequirement> for VersionReq {
type Error = anyhow::Error;
fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> {
let VersionOrRequirement::Requirement(requirement) = value else {
anyhow::bail!("Version or requirement was not a requirement");
};
Ok(requirement)
match value {
VersionOrRequirement::Version(version) => Ok(version),
VersionOrRequirement::Requirement(mut version_req) => {
if version_req.comparators.len() != 1 {
anyhow::bail!(
"The version requirement in VersionOrRequirement is not a single exact version"
);
}
let c = version_req.comparators.pop().unwrap();
let (semver::Op::Exact, Some(minor), Some(patch)) = (c.op, c.minor, c.patch) else {
anyhow::bail!(
"The version requirement in VersionOrRequirement is not an exact version"
);
};
Ok(Version {
major: c.major,
minor,
patch,
pre: c.pre,
build: Default::default(),
})
}
}
}
}
+3
View File
@@ -26,5 +26,8 @@ serde_json = { workspace = true }
tracing = { workspace = true }
tokio = { workspace = true }
[dev-dependencies]
tempfile = { workspace = true }
[lints]
workspace = true
-4
View File
@@ -1,4 +0,0 @@
use semver::Version;
/// This is the first version of solc that supports the `--via-ir` flag / "viaIR" input JSON.
pub const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13);
+22 -25
View File
@@ -3,7 +3,7 @@
//! - Polkadot revive resolc compiler
//! - Polkadot revive Wasm compiler
mod constants;
mod utils;
use std::{
collections::HashMap,
@@ -13,17 +13,19 @@ use std::{
use alloy::json_abi::JsonAbi;
use alloy_primitives::Address;
use semver::Version;
use anyhow::Context;
use serde::{Deserialize, Serialize};
use revive_common::EVMVersion;
use revive_dt_common::cached_fs::read_to_string;
use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::Arguments;
// Re-export this as it's a part of the compiler interface.
pub use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
// Expose functionality for instantiating a SolcCompiler.
pub use utils::{SolcCompiler, solc_compiler};
pub mod revive_js;
pub mod revive_resolc;
pub mod solc;
@@ -40,28 +42,19 @@ pub trait SolidityCompiler {
additional_options: Self::Options,
) -> impl Future<Output = anyhow::Result<CompilerOutput>>;
fn new(solc_executable: PathBuf) -> Self;
fn get_compiler_executable(
config: &Arguments,
version: impl Into<VersionOrRequirement>,
) -> impl Future<Output = anyhow::Result<PathBuf>>;
fn version(&self) -> impl Future<Output = anyhow::Result<Version>>;
/// Instantiate a new compiler.
fn new(config: &Arguments) -> Self;
/// Does the compiler support the provided mode and version settings?
fn supports_mode(
compiler_version: &Version,
optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline,
) -> bool;
fn supports_mode(optimize_setting: ModeOptimizerSetting, pipeline: ModePipeline) -> bool;
}
/// The generic compilation input configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize)]
pub struct CompilerInput {
pub pipeline: Option<ModePipeline>,
pub optimization: Option<ModeOptimizerSetting>,
pub solc: Option<SolcCompiler>,
pub evm_version: Option<EVMVersion>,
pub allow_paths: Vec<PathBuf>,
pub base_path: Option<PathBuf>,
@@ -99,6 +92,7 @@ where
input: CompilerInput {
pipeline: Default::default(),
optimization: Default::default(),
solc: Default::default(),
evm_version: Default::default(),
allow_paths: Default::default(),
base_path: Default::default(),
@@ -110,6 +104,11 @@ where
}
}
pub fn with_solc(mut self, value: impl Into<Option<SolcCompiler>>) -> Self {
self.input.solc = value.into();
self
}
pub fn with_optimization(mut self, value: impl Into<Option<ModeOptimizerSetting>>) -> Self {
self.input.optimization = value.into();
self
@@ -136,9 +135,10 @@ where
}
pub fn with_source(mut self, path: impl AsRef<Path>) -> anyhow::Result<Self> {
self.input
.sources
.insert(path.as_ref().to_path_buf(), read_to_string(path.as_ref())?);
self.input.sources.insert(
path.as_ref().to_path_buf(),
read_to_string(path.as_ref()).context("Failed to read the contract source")?,
);
Ok(self)
}
@@ -177,11 +177,8 @@ where
callback(self)
}
pub async fn try_build(
self,
compiler_path: impl AsRef<Path>,
) -> anyhow::Result<CompilerOutput> {
T::new(compiler_path.as_ref().to_path_buf())
pub async fn try_build(self, config: &Arguments) -> anyhow::Result<CompilerOutput> {
T::new(config)
.build(self.input, self.additional_options)
.await
}
+73 -125
View File
@@ -1,14 +1,8 @@
//! Implements the [SolidityCompiler] trait with `resolc` for
//! compiling contracts to PolkaVM (PVM) bytecode.
use std::{
path::PathBuf,
process::{Command, Stdio},
sync::LazyLock,
};
use std::{path::PathBuf, process::Stdio};
use dashmap::DashMap;
use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::Arguments;
use revive_solc_json_interface::{
SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
@@ -23,10 +17,6 @@ use anyhow::Context;
use semver::Version;
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
// TODO: I believe that we need to also pass the solc compiler to resolc so that resolc uses the
// specified solc compiler. I believe that currently we completely ignore the specified solc binary
// when invoking resolc which doesn't seem right if we're using solc as a compiler frontend.
/// A wrapper around the `resolc` binary, emitting PVM-compatible bytecode.
#[derive(Debug)]
pub struct Resolc {
@@ -43,6 +33,7 @@ impl SolidityCompiler for Resolc {
CompilerInput {
pipeline,
optimization,
solc,
evm_version,
allow_paths,
base_path,
@@ -60,6 +51,7 @@ impl SolidityCompiler for Resolc {
);
}
let solc = solc.ok_or_else(|| anyhow::anyhow!("solc compiler not provided to resolc."))?;
let input = SolcStandardJsonInput {
language: SolcStandardJsonInputLanguage::Solidity,
sources: sources
@@ -105,6 +97,8 @@ impl SolidityCompiler for Resolc {
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.arg("--solc")
.arg(&solc.path)
.arg("--standard-json");
if let Some(ref base_path) = base_path {
@@ -119,18 +113,28 @@ impl SolidityCompiler for Resolc {
.join(","),
);
}
let mut child = command.spawn()?;
let mut child = command
.spawn()
.with_context(|| format!("Failed to spawn resolc at {}", self.resolc_path.display()))?;
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
let serialized_input = serde_json::to_vec(&input)?;
stdin_pipe.write_all(&serialized_input).await?;
let serialized_input = serde_json::to_vec(&input)
.context("Failed to serialize Standard JSON input for resolc")?;
stdin_pipe
.write_all(&serialized_input)
.await
.context("Failed to write Standard JSON to resolc stdin")?;
let output = child.wait_with_output().await?;
let output = child
.wait_with_output()
.await
.context("Failed while waiting for resolc process to finish")?;
let stdout = output.stdout;
let stderr = output.stderr;
if !output.status.success() {
let json_in = serde_json::to_string_pretty(&input)?;
let json_in = serde_json::to_string_pretty(&input)
.context("Failed to pretty-print Standard JSON input for logging")?;
let message = String::from_utf8_lossy(&stderr);
tracing::error!(
status = %output.status,
@@ -141,12 +145,14 @@ impl SolidityCompiler for Resolc {
anyhow::bail!("Compilation failed with an error: {message}");
}
let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout).map_err(|e| {
anyhow::anyhow!(
"failed to parse resolc JSON output: {e}\nstderr: {}",
String::from_utf8_lossy(&stderr)
)
})?;
let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout)
.map_err(|e| {
anyhow::anyhow!(
"failed to parse resolc JSON output: {e}\nstderr: {}",
String::from_utf8_lossy(&stderr)
)
})
.context("Failed to parse resolc standard JSON output")?;
tracing::debug!(
output = %serde_json::to_string(&parsed).unwrap(),
@@ -173,7 +179,10 @@ impl SolidityCompiler for Resolc {
let mut compiler_output = CompilerOutput::default();
for (source_path, contracts) in contracts.into_iter() {
let source_path = PathBuf::from(source_path).canonicalize()?;
let src_for_msg = source_path.clone();
let source_path = PathBuf::from(source_path)
.canonicalize()
.with_context(|| format!("Failed to canonicalize path {src_for_msg}"))?;
let map = compiler_output.contracts.entry(source_path).or_default();
for (contract_name, contract_information) in contracts.into_iter() {
@@ -181,23 +190,41 @@ impl SolidityCompiler for Resolc {
.evm
.and_then(|evm| evm.bytecode.clone())
.context("Unexpected - Contract compiled with resolc has no bytecode")?;
let abi = contract_information
.metadata
.as_ref()
.and_then(|metadata| metadata.as_object())
.and_then(|metadata| metadata.get("solc_metadata"))
.and_then(|solc_metadata| solc_metadata.as_str())
.and_then(|metadata| serde_json::from_str::<serde_json::Value>(metadata).ok())
.and_then(|metadata| {
metadata.get("output").and_then(|output| {
output
.get("abi")
.and_then(|abi| serde_json::from_value::<JsonAbi>(abi.clone()).ok())
})
})
.context(
"Unexpected - Failed to get the ABI for a contract compiled with resolc",
)?;
let abi = {
let metadata = contract_information
.metadata
.as_ref()
.context("No metadata found for the contract")?;
let solc_metadata_str = match metadata {
serde_json::Value::String(solc_metadata_str) => solc_metadata_str.as_str(),
serde_json::Value::Object(metadata_object) => {
let solc_metadata_value = metadata_object
.get("solc_metadata")
.context("Contract doesn't have a 'solc_metadata' field")?;
solc_metadata_value
.as_str()
.context("The 'solc_metadata' field is not a string")?
}
serde_json::Value::Null
| serde_json::Value::Bool(_)
| serde_json::Value::Number(_)
| serde_json::Value::Array(_) => {
anyhow::bail!("Unsupported type of metadata {metadata:?}")
}
};
let solc_metadata =
serde_json::from_str::<serde_json::Value>(solc_metadata_str).context(
"Failed to deserialize the solc_metadata as a serde_json generic value",
)?;
let output_value = solc_metadata
.get("output")
.context("solc_metadata doesn't have an output field")?;
let abi_value = output_value
.get("abi")
.context("solc_metadata output doesn't contain an abi field")?;
serde_json::from_value::<JsonAbi>(abi_value.clone())
.context("ABI found in solc_metadata output is not valid ABI")?
};
map.insert(contract_name, (bytecode.object, abi));
}
}
@@ -205,94 +232,15 @@ impl SolidityCompiler for Resolc {
Ok(compiler_output)
}
fn new(resolc_path: PathBuf) -> Self {
Resolc { resolc_path }
}
async fn get_compiler_executable(
config: &Arguments,
_version: impl Into<VersionOrRequirement>,
) -> anyhow::Result<PathBuf> {
if !config.resolc.as_os_str().is_empty() {
return Ok(config.resolc.clone());
}
Ok(PathBuf::from("resolc"))
}
async fn version(&self) -> anyhow::Result<semver::Version> {
/// This is a cache of the path of the compiler to the version number of the compiler. We
/// choose to cache the version in this way rather than through a field on the struct since
/// compiler objects are being created all the time from the path and the compiler object is
/// not reused over time.
static VERSION_CACHE: LazyLock<DashMap<PathBuf, Version>> = LazyLock::new(Default::default);
match VERSION_CACHE.entry(self.resolc_path.clone()) {
dashmap::Entry::Occupied(occupied_entry) => Ok(occupied_entry.get().clone()),
dashmap::Entry::Vacant(vacant_entry) => {
let output = Command::new(self.resolc_path.as_path())
.arg("--version")
.stdout(Stdio::piped())
.spawn()?
.wait_with_output()?
.stdout;
let output = String::from_utf8_lossy(&output);
let version_string = output
.split("version ")
.nth(1)
.context("Version parsing failed")?
.split("+")
.next()
.context("Version parsing failed")?;
let version = Version::parse(version_string)?;
vacant_entry.insert(version.clone());
Ok(version)
}
fn new(config: &Arguments) -> Self {
Resolc {
resolc_path: config.resolc.clone(),
}
}
fn supports_mode(
_compiler_version: &Version,
_optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline,
) -> bool {
// We only support the Y (IE compile via Yul IR) mode here, which also means that we can
// only use solc version 0.8.13 and above. We must always compile via Yul IR as resolc
// needs this to translate to LLVM IR and then RISCV.
// Note: the original implementation of this function looked like the following:
// ```
// pipeline == ModePipeline::ViaYulIR && compiler_version >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR
// ```
// However, that implementation is sadly incorrect since the version that's passed into this
// function is not the version of solc but the version of resolc. This is despite the fact
// that resolc depends on Solc for the initial Yul codegen. Therefore, we have skipped the
// version check until we do a better integrations between resolc and solc.
fn supports_mode(_optimize_setting: ModeOptimizerSetting, pipeline: ModePipeline) -> bool {
// We only support the Y (IE compile via Yul IR) mode here. We must always compile
// via Yul IR as resolc needs this to translate to LLVM IR and then RISCV.
pipeline == ModePipeline::ViaYulIR
}
}
#[cfg(test)]
mod test {
use super::*;
#[tokio::test]
async fn compiler_version_can_be_obtained() {
// Arrange
let args = Arguments::default();
let path = Resolc::get_compiler_executable(&args, Version::new(0, 7, 6))
.await
.unwrap();
let compiler = Resolc::new(path);
// Act
let version = compiler.version().await;
// Assert
let _ = version.expect("Failed to get version");
}
}
+42 -132
View File
@@ -1,20 +1,8 @@
//! Implements the [SolidityCompiler] trait with solc for
//! compiling contracts to EVM bytecode.
use std::{
path::PathBuf,
process::{Command, Stdio},
sync::LazyLock,
};
use dashmap::DashMap;
use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::Arguments;
use revive_dt_solc_binaries::download_solc;
use super::constants::SOLC_VERSION_SUPPORTING_VIA_YUL_IR;
use super::utils;
use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
use anyhow::Context;
use foundry_compilers_artifacts::{
output_selection::{
@@ -23,13 +11,12 @@ use foundry_compilers_artifacts::{
solc::CompilerOutput as SolcOutput,
solc::*,
};
use semver::Version;
use revive_dt_config::Arguments;
use std::process::Stdio;
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
#[derive(Debug)]
pub struct Solc {
solc_path: PathBuf,
}
pub struct Solc {}
impl SolidityCompiler for Solc {
type Options = ();
@@ -40,6 +27,7 @@ impl SolidityCompiler for Solc {
CompilerInput {
pipeline,
optimization,
solc,
evm_version,
allow_paths,
base_path,
@@ -49,7 +37,9 @@ impl SolidityCompiler for Solc {
}: CompilerInput,
_: Self::Options,
) -> anyhow::Result<CompilerOutput> {
let compiler_supports_via_ir = self.version().await? >= SOLC_VERSION_SUPPORTING_VIA_YUL_IR;
let solc = solc.ok_or_else(|| anyhow::anyhow!("solc compiler not provided to resolc."))?;
let compiler_supports_via_ir =
utils::solc_versions_supporting_yul_ir().matches(&solc.version);
// Be careful to entirely omit the viaIR field if the compiler does not support it,
// as it will error if you provide fields it does not know about. Because
@@ -115,7 +105,7 @@ impl SolidityCompiler for Solc {
},
};
let mut command = AsyncCommand::new(&self.solc_path);
let mut command = AsyncCommand::new(&solc.path);
command
.stdin(Stdio::piped())
.stdout(Stdio::piped())
@@ -134,15 +124,25 @@ impl SolidityCompiler for Solc {
.join(","),
);
}
let mut child = command.spawn()?;
let mut child = command
.spawn()
.with_context(|| format!("Failed to spawn solc at {}", solc.path.display()))?;
let stdin = child.stdin.as_mut().expect("should be piped");
let serialized_input = serde_json::to_vec(&input)?;
stdin.write_all(&serialized_input).await?;
let output = child.wait_with_output().await?;
let serialized_input = serde_json::to_vec(&input)
.context("Failed to serialize Standard JSON input for solc")?;
stdin
.write_all(&serialized_input)
.await
.context("Failed to write Standard JSON to solc stdin")?;
let output = child
.wait_with_output()
.await
.context("Failed while waiting for solc process to finish")?;
if !output.status.success() {
let json_in = serde_json::to_string_pretty(&input)?;
let json_in = serde_json::to_string_pretty(&input)
.context("Failed to pretty-print Standard JSON input for logging")?;
let message = String::from_utf8_lossy(&output.stderr);
tracing::error!(
status = %output.status,
@@ -153,12 +153,14 @@ impl SolidityCompiler for Solc {
anyhow::bail!("Compilation failed with an error: {message}");
}
let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout).map_err(|e| {
anyhow::anyhow!(
"failed to parse resolc JSON output: {e}\nstderr: {}",
String::from_utf8_lossy(&output.stdout)
)
})?;
let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout)
.map_err(|e| {
anyhow::anyhow!(
"failed to parse resolc JSON output: {e}\nstderr: {}",
String::from_utf8_lossy(&output.stdout)
)
})
.context("Failed to parse solc standard JSON output")?;
// Detecting if the compiler output contained errors and reporting them through logs and
// errors instead of returning the compiler output that might contain errors.
@@ -178,7 +180,12 @@ impl SolidityCompiler for Solc {
for (contract_path, contracts) in parsed.contracts {
let map = compiler_output
.contracts
.entry(contract_path.canonicalize()?)
.entry(contract_path.canonicalize().with_context(|| {
format!(
"Failed to canonicalize contract path {}",
contract_path.display()
)
})?)
.or_default();
for (contract_name, contract_info) in contracts.into_iter() {
let source_code = contract_info
@@ -199,110 +206,13 @@ impl SolidityCompiler for Solc {
Ok(compiler_output)
}
fn new(solc_path: PathBuf) -> Self {
Self { solc_path }
fn new(_config: &Arguments) -> Self {
Self {}
}
async fn get_compiler_executable(
config: &Arguments,
version: impl Into<VersionOrRequirement>,
) -> anyhow::Result<PathBuf> {
let path = download_solc(config.directory(), version, config.wasm).await?;
Ok(path)
}
async fn version(&self) -> anyhow::Result<semver::Version> {
/// This is a cache of the path of the compiler to the version number of the compiler. We
/// choose to cache the version in this way rather than through a field on the struct since
/// compiler objects are being created all the time from the path and the compiler object is
/// not reused over time.
static VERSION_CACHE: LazyLock<DashMap<PathBuf, Version>> = LazyLock::new(Default::default);
match VERSION_CACHE.entry(self.solc_path.clone()) {
dashmap::Entry::Occupied(occupied_entry) => Ok(occupied_entry.get().clone()),
dashmap::Entry::Vacant(vacant_entry) => {
// The following is the parsing code for the version from the solc version strings
// which look like the following:
// ```
// solc, the solidity compiler commandline interface
// Version: 0.8.30+commit.73712a01.Darwin.appleclang
// ```
let child = Command::new(self.solc_path.as_path())
.arg("--version")
.stdout(Stdio::piped())
.spawn()?;
let output = child.wait_with_output()?;
let output = String::from_utf8_lossy(&output.stdout);
let version_line = output
.split("Version: ")
.nth(1)
.context("Version parsing failed")?;
let version_string = version_line
.split("+")
.next()
.context("Version parsing failed")?;
let version = Version::parse(version_string)?;
vacant_entry.insert(version.clone());
Ok(version)
}
}
}
fn supports_mode(
compiler_version: &Version,
_optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline,
) -> bool {
fn supports_mode(_optimize_setting: ModeOptimizerSetting, pipeline: ModePipeline) -> bool {
// solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support mode E
// (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler is new enough.
pipeline == ModePipeline::ViaEVMAssembly
|| (pipeline == ModePipeline::ViaYulIR
&& compiler_version >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR)
}
}
#[cfg(test)]
mod test {
use super::*;
#[tokio::test]
async fn compiler_version_can_be_obtained() {
// Arrange
let args = Arguments::default();
let path = Solc::get_compiler_executable(&args, Version::new(0, 7, 6))
.await
.unwrap();
let compiler = Solc::new(path);
// Act
let version = compiler.version().await;
// Assert
assert_eq!(
version.expect("Failed to get version"),
Version::new(0, 7, 6)
)
}
#[tokio::test]
async fn compiler_version_can_be_obtained1() {
// Arrange
let args = Arguments::default();
let path = Solc::get_compiler_executable(&args, Version::new(0, 4, 21))
.await
.unwrap();
let compiler = Solc::new(path);
// Act
let version = compiler.version().await;
// Assert
assert_eq!(
version.expect("Failed to get version"),
Version::new(0, 4, 21)
)
pipeline == ModePipeline::ViaEVMAssembly || pipeline == ModePipeline::ViaYulIR
}
}
+159
View File
@@ -0,0 +1,159 @@
use serde::{Deserialize, Serialize};
use std::{
path::{Path, PathBuf},
process::{Command, Stdio},
sync::LazyLock,
};
use anyhow::Context;
use dashmap::DashMap;
use revive_dt_common::types::{ModePipeline, VersionOrRequirement};
use semver::{Version, VersionReq};
/// Return the path and version of a suitable `solc` compiler given the requirements provided.
///
/// This caches any compiler binaries/paths that are downloaded as a result of calling this.
pub async fn solc_compiler(
cache_directory: &Path,
fallback_version: &Version,
required_version: Option<&VersionReq>,
pipeline: ModePipeline,
) -> anyhow::Result<SolcCompiler> {
// Require Yul compatible solc, or any if we don't care about compiling via Yul.
let mut version_req = if pipeline == ModePipeline::ViaYulIR {
solc_versions_supporting_yul_ir()
} else {
VersionReq::STAR
};
// Take into account the version requirements passed in, too.
if let Some(other_version_req) = required_version {
version_req
.comparators
.extend(other_version_req.comparators.iter().cloned());
}
// If no requirements yet then fall back to the fallback version.
let version_req = if version_req == VersionReq::STAR {
VersionOrRequirement::version_to_requirement(fallback_version)
} else {
version_req
};
// Download (or pull from cache) a suitable solc compiler given this.
let solc_path =
revive_dt_solc_binaries::download_solc(cache_directory, version_req, false).await?;
let solc_version = solc_version(&solc_path).await?;
Ok(SolcCompiler {
version: solc_version,
path: solc_path,
})
}
/// A `solc` compiler, returned from [`solc_compiler`].
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SolcCompiler {
/// Version of the compiler.
pub version: Version,
/// Path to the compiler executable.
pub path: PathBuf,
}
/// Fetch the solc version given a path to the executable
async fn solc_version(solc_path: &Path) -> anyhow::Result<semver::Version> {
/// This is a cache of the path of the compiler to the version number of the compiler. We
/// choose to cache the version in this way rather than through a field on the struct since
/// compiler objects are being created all the time from the path and the compiler object is
/// not reused over time.
static VERSION_CACHE: LazyLock<DashMap<PathBuf, Version>> = LazyLock::new(Default::default);
match VERSION_CACHE.entry(solc_path.to_path_buf()) {
dashmap::Entry::Occupied(occupied_entry) => Ok(occupied_entry.get().clone()),
dashmap::Entry::Vacant(vacant_entry) => {
// The following is the parsing code for the version from the solc version strings
// which look like the following:
// ```
// solc, the solidity compiler commandline interface
// Version: 0.8.30+commit.73712a01.Darwin.appleclang
// ```
let child = Command::new(solc_path)
.arg("--version")
.stdout(Stdio::piped())
.spawn()?;
let output = child.wait_with_output()?;
let output = String::from_utf8_lossy(&output.stdout);
let version_line = output
.split("Version: ")
.nth(1)
.context("Version parsing failed")?;
let version_string = version_line
.split("+")
.next()
.context("Version parsing failed")?;
let version = Version::parse(version_string)?;
vacant_entry.insert(version.clone());
Ok(version)
}
}
}
/// This returns the solc versions which support Yul IR.
pub fn solc_versions_supporting_yul_ir() -> VersionReq {
use semver::{Comparator, Op, Prerelease, VersionReq};
VersionReq {
comparators: vec![Comparator {
op: Op::GreaterEq,
major: 0,
minor: Some(8),
patch: Some(13),
pre: Prerelease::EMPTY,
}],
}
}
#[cfg(test)]
mod test {
use super::*;
#[tokio::test]
async fn compiler_version_can_be_obtained() {
// Arrange
let temp_dir = tempfile::tempdir().expect("can create tempdir");
let solc_path =
revive_dt_solc_binaries::download_solc(temp_dir.path(), Version::new(0, 7, 6), false)
.await
.expect("can download solc");
// Act
let version = solc_version(&solc_path).await;
// Assert
assert_eq!(
version.expect("Failed to get version"),
Version::new(0, 7, 6)
)
}
#[tokio::test]
async fn compiler_version_can_be_obtained1() {
// Arrange
let temp_dir = tempfile::tempdir().expect("can create tempdir");
let solc_path =
revive_dt_solc_binaries::download_solc(temp_dir.path(), Version::new(0, 4, 21), false)
.await
.expect("can download solc");
// Act
let version = solc_version(&solc_path).await;
// Assert
assert_eq!(
version.expect("Failed to get version"),
Version::new(0, 4, 21)
)
}
}
+3 -10
View File
@@ -1,16 +1,12 @@
use std::path::PathBuf;
use revive_dt_compiler::{Compiler, SolidityCompiler, revive_resolc::Resolc, solc::Solc};
use revive_dt_compiler::{Compiler, revive_resolc::Resolc, solc::Solc};
use revive_dt_config::Arguments;
use semver::Version;
#[tokio::test]
async fn contracts_can_be_compiled_with_solc() {
// Arrange
let args = Arguments::default();
let compiler_path = Solc::get_compiler_executable(&args, Version::new(0, 8, 30))
.await
.unwrap();
// Act
let output = Compiler::<Solc>::new()
@@ -18,7 +14,7 @@ async fn contracts_can_be_compiled_with_solc() {
.unwrap()
.with_source("./tests/assets/array_one_element/main.sol")
.unwrap()
.try_build(compiler_path)
.try_build(&args)
.await;
// Assert
@@ -49,9 +45,6 @@ async fn contracts_can_be_compiled_with_solc() {
async fn contracts_can_be_compiled_with_resolc() {
// Arrange
let args = Arguments::default();
let compiler_path = Resolc::get_compiler_executable(&args, Version::new(0, 8, 30))
.await
.unwrap();
// Act
let output = Compiler::<Resolc>::new()
@@ -59,7 +52,7 @@ async fn contracts_can_be_compiled_with_resolc() {
.unwrap()
.with_source("./tests/assets/array_one_element/main.sol")
.unwrap()
.try_build(compiler_path)
.try_build(&args)
.await;
// Assert
+1 -1
View File
@@ -12,7 +12,7 @@ rust-version.workspace = true
alloy = { workspace = true }
clap = { workspace = true }
semver = { workspace = true }
temp-dir = { workspace = true }
tempfile = { workspace = true }
serde = { workspace = true }
[lints]
+13 -1
View File
@@ -10,7 +10,7 @@ use alloy::{network::EthereumWallet, signers::local::PrivateKeySigner};
use clap::{Parser, ValueEnum};
use semver::Version;
use serde::{Deserialize, Serialize};
use temp_dir::TempDir;
use tempfile::TempDir;
#[derive(Debug, Parser, Clone, Serialize, Deserialize)]
#[command(name = "retester")]
@@ -115,6 +115,18 @@ pub struct Arguments {
#[arg(short, long = "kitchensink", default_value = "substrate-node")]
pub kitchensink: PathBuf,
/// The path to the `revive-dev-node` executable.
///
/// By default it uses `revive-dev-node` binary found in `$PATH`.
#[arg(long = "revive-dev-node", default_value = "revive-dev-node")]
pub revive_dev_node: PathBuf,
/// By default the tool uses the revive-dev-node when it's running differential tests against
/// PolkaVM since the dev-node is much faster than kitchensink. This flag allows the caller to
/// configure the tool to use kitchensink rather than the dev-node.
#[arg(long)]
pub use_kitchensink_not_dev_node: bool,
/// The path to the `eth_proxy` executable.
///
/// By default it uses `eth-rpc` binary found in `$PATH`.
-1
View File
@@ -36,7 +36,6 @@ tracing-subscriber = { workspace = true }
semver = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
temp-dir = { workspace = true }
tempfile = { workspace = true }
[lints]
+54 -92
View File
@@ -9,12 +9,12 @@ use std::{
use futures::FutureExt;
use revive_dt_common::iterators::FilesWithExtensionIterator;
use revive_dt_compiler::{Compiler, CompilerInput, CompilerOutput, Mode, SolidityCompiler};
use revive_dt_compiler::{Compiler, CompilerInput, CompilerOutput, Mode, SolcCompiler};
use revive_dt_config::Arguments;
use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata};
use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address};
use anyhow::{Error, Result};
use anyhow::{Context as _, Error, Result};
use once_cell::sync::Lazy;
use semver::Version;
use serde::{Deserialize, Serialize};
@@ -29,7 +29,10 @@ impl CachedCompiler {
pub async fn new(path: impl AsRef<Path>, invalidate_cache: bool) -> Result<Self> {
let mut cache = ArtifactsCache::new(path);
if invalidate_cache {
cache = cache.with_invalidated_cache().await?;
cache = cache
.with_invalidated_cache()
.await
.context("Failed to invalidate compilation cache directory")?;
}
Ok(Self(cache))
}
@@ -50,64 +53,38 @@ impl CachedCompiler {
&self,
metadata: &Metadata,
metadata_file_path: impl AsRef<Path>,
solc: SolcCompiler,
mode: &Mode,
config: &Arguments,
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
compilation_success_report_callback: impl Fn(
Version,
PathBuf,
bool,
Option<CompilerInput>,
CompilerOutput,
) + Clone,
compilation_failure_report_callback: impl Fn(
Option<Version>,
Option<PathBuf>,
Option<CompilerInput>,
String,
),
) -> Result<(CompilerOutput, Version)> {
compilation_success_report_callback: impl Fn(bool, Option<CompilerInput>, CompilerOutput)
+ Clone,
compilation_failure_report_callback: impl Fn(Option<CompilerInput>, String),
) -> Result<CompilerOutput> {
static CACHE_KEY_LOCK: Lazy<RwLock<HashMap<CacheKey, Arc<Mutex<()>>>>> =
Lazy::new(Default::default);
let compiler_version_or_requirement = mode.compiler_version_to_use(config.solc.clone());
let compiler_path = <P::Compiler as SolidityCompiler>::get_compiler_executable(
config,
compiler_version_or_requirement,
)
.await
.inspect_err(|err| {
compilation_failure_report_callback(None, None, None, err.to_string())
})?;
let compiler_version = <P::Compiler as SolidityCompiler>::new(compiler_path.clone())
.version()
.await
.inspect_err(|err| {
compilation_failure_report_callback(
None,
Some(compiler_path.clone()),
None,
err.to_string(),
)
})?;
let cache_key = CacheKey {
platform_key: P::config_id().to_string(),
compiler_version: compiler_version.clone(),
compiler_version: solc.version.clone(),
metadata_file_path: metadata_file_path.as_ref().to_path_buf(),
solc_mode: mode.clone(),
};
let compilation_callback = || {
let compiler_path = compiler_path.clone();
let compiler_version = compiler_version.clone();
// let compiler_path = compiler_path.clone();
// let compiler_version = compiler_version.clone();
let compilation_success_report_callback = compilation_success_report_callback.clone();
async move {
compile_contracts::<P>(
metadata.directory()?,
compiler_path,
compiler_version,
metadata.files_to_compile()?,
metadata
.directory()
.context("Failed to get metadata directory while preparing compilation")?,
metadata
.files_to_compile()
.context("Failed to enumerate files to compile from metadata")?,
config,
solc,
mode,
deployed_libraries,
compilation_success_report_callback,
@@ -131,7 +108,10 @@ impl CachedCompiler {
Some(_) => {
debug!("Deployed libraries defined, recompilation must take place");
debug!("Cache miss");
compilation_callback().await?.compiler_output
compilation_callback()
.await
.context("Compilation callback for deployed libraries failed")?
.compiler_output
}
// If no deployed libraries are specified then we can follow the cached flow and attempt
// to lookup the compilation artifacts in the cache.
@@ -159,44 +139,36 @@ impl CachedCompiler {
match self.0.get(&cache_key).await {
Some(cache_value) => {
compilation_success_report_callback(
compiler_version.clone(),
compiler_path,
true,
None,
cache_value.compiler_output.clone(),
);
cache_value.compiler_output
}
None => compilation_callback().await?.compiler_output,
None => {
compilation_callback()
.await
.context("Compilation callback failed (cache miss path)")?
.compiler_output
}
}
}
};
Ok((compiled_contracts, compiler_version))
Ok(compiled_contracts)
}
}
#[allow(clippy::too_many_arguments)]
async fn compile_contracts<P: Platform>(
metadata_directory: impl AsRef<Path>,
compiler_path: impl AsRef<Path>,
compiler_version: Version,
mut files_to_compile: impl Iterator<Item = PathBuf>,
config: &Arguments,
solc: SolcCompiler,
mode: &Mode,
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
compilation_success_report_callback: impl Fn(
Version,
PathBuf,
bool,
Option<CompilerInput>,
CompilerOutput,
),
compilation_failure_report_callback: impl Fn(
Option<Version>,
Option<PathBuf>,
Option<CompilerInput>,
String,
),
compilation_success_report_callback: impl Fn(bool, Option<CompilerInput>, CompilerOutput),
compilation_failure_report_callback: impl Fn(Option<CompilerInput>, String),
) -> Result<CompilerOutput> {
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
.with_allowed_extension("sol")
@@ -204,6 +176,7 @@ async fn compile_contracts<P: Platform>(
.collect::<Vec<_>>();
let compiler = Compiler::<P::Compiler>::new()
.with_solc(solc)
.with_allow_path(metadata_directory)
// Handling the modes
.with_optimization(mode.optimize_setting)
@@ -212,14 +185,7 @@ async fn compile_contracts<P: Platform>(
.try_then(|compiler| {
files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path))
})
.inspect_err(|err| {
compilation_failure_report_callback(
Some(compiler_version.clone()),
Some(compiler_path.as_ref().to_path_buf()),
None,
err.to_string(),
)
})?
.inspect_err(|err| compilation_failure_report_callback(None, format!("{err:#}")))?
// Adding the deployed libraries to the compiler.
.then(|compiler| {
deployed_libraries
@@ -238,23 +204,14 @@ async fn compile_contracts<P: Platform>(
let compiler_input = compiler.input();
let compiler_output = compiler
.try_build(compiler_path.as_ref())
.try_build(config)
.await
.inspect_err(|err| {
compilation_failure_report_callback(
Some(compiler_version.clone()),
Some(compiler_path.as_ref().to_path_buf()),
Some(compiler_input.clone()),
err.to_string(),
)
})?;
compilation_success_report_callback(
compiler_version,
compiler_path.as_ref().to_path_buf(),
false,
Some(compiler_input),
compiler_output.clone(),
);
compilation_failure_report_callback(Some(compiler_input.clone()), format!("{err:#}"))
})
.context("Failed to configure compiler with sources and options")?;
compilation_success_report_callback(false, Some(compiler_input), compiler_output.clone());
Ok(compiler_output)
}
@@ -273,15 +230,20 @@ impl ArtifactsCache {
pub async fn with_invalidated_cache(self) -> Result<Self> {
cacache::clear(self.path.as_path())
.await
.map_err(Into::<Error>::into)?;
.map_err(Into::<Error>::into)
.with_context(|| format!("Failed to clear cache at {}", self.path.display()))?;
Ok(self)
}
#[instrument(level = "debug", skip_all, err)]
pub async fn insert(&self, key: &CacheKey, value: &CacheValue) -> Result<()> {
let key = bson::to_vec(key)?;
let value = bson::to_vec(value)?;
cacache::write(self.path.as_path(), key.encode_hex(), value).await?;
let key = bson::to_vec(key).context("Failed to serialize cache key (bson)")?;
let value = bson::to_vec(value).context("Failed to serialize cache value (bson)")?;
cacache::write(self.path.as_path(), key.encode_hex(), value)
.await
.with_context(|| {
format!("Failed to write cache entry under {}", self.path.display())
})?;
Ok(())
}
+41 -20
View File
@@ -86,18 +86,22 @@ where
) -> anyhow::Result<StepOutput> {
match step {
Step::FunctionCall(input) => {
let (receipt, geth_trace, diff_mode) =
self.handle_input(metadata, input, node).await?;
let (receipt, geth_trace, diff_mode) = self
.handle_input(metadata, input, node)
.await
.context("Failed to handle function call step")?;
Ok(StepOutput::FunctionCall(receipt, geth_trace, diff_mode))
}
Step::BalanceAssertion(balance_assertion) => {
self.handle_balance_assertion(metadata, balance_assertion, node)
.await?;
.await
.context("Failed to handle balance assertion step")?;
Ok(StepOutput::BalanceAssertion)
}
Step::StorageEmptyAssertion(storage_empty) => {
self.handle_storage_empty(metadata, storage_empty, node)
.await?;
.await
.context("Failed to handle storage empty assertion step")?;
Ok(StepOutput::StorageEmptyAssertion)
}
}
@@ -113,18 +117,23 @@ where
) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> {
let deployment_receipts = self
.handle_input_contract_deployment(metadata, input, node)
.await?;
.await
.context("Failed during contract deployment phase of input handling")?;
let execution_receipt = self
.handle_input_execution(input, deployment_receipts, node)
.await?;
.await
.context("Failed during transaction execution phase of input handling")?;
let tracing_result = self
.handle_input_call_frame_tracing(&execution_receipt, node)
.await?;
self.handle_input_variable_assignment(input, &tracing_result)?;
.await
.context("Failed during callframe tracing phase of input handling")?;
self.handle_input_variable_assignment(input, &tracing_result)
.context("Failed to assign variables from callframe output")?;
let (_, (geth_trace, diff_mode)) = try_join!(
self.handle_input_expectations(input, &execution_receipt, node, &tracing_result),
self.handle_input_diff(&execution_receipt, node)
)?;
)
.context("Failed while evaluating expectations and diffs in parallel")?;
Ok((execution_receipt, geth_trace, diff_mode))
}
@@ -136,9 +145,11 @@ where
node: &T::Blockchain,
) -> anyhow::Result<()> {
self.handle_balance_assertion_contract_deployment(metadata, balance_assertion, node)
.await?;
.await
.context("Failed to deploy contract for balance assertion")?;
self.handle_balance_assertion_execution(balance_assertion, node)
.await?;
.await
.context("Failed to execute balance assertion")?;
Ok(())
}
@@ -150,9 +161,11 @@ where
node: &T::Blockchain,
) -> anyhow::Result<()> {
self.handle_storage_empty_assertion_contract_deployment(metadata, storage_empty, node)
.await?;
.await
.context("Failed to deploy contract for storage empty assertion")?;
self.handle_storage_empty_assertion_execution(storage_empty, node)
.await?;
.await
.context("Failed to execute storage empty assertion")?;
Ok(())
}
@@ -191,7 +204,8 @@ where
value,
node,
)
.await?
.await
.context("Failed to get or deploy contract instance during input execution")?
{
receipts.insert(instance.clone(), receipt);
}
@@ -213,7 +227,7 @@ where
// lookup the transaction receipt in this case and continue on.
Method::Deployer => deployment_receipts
.remove(&input.instance)
.context("Failed to find deployment receipt"),
.context("Failed to find deployment receipt for constructor call"),
Method::Fallback | Method::FunctionName(_) => {
let tx = match input
.legacy_transaction(node, self.default_resolution_context())
@@ -385,7 +399,8 @@ where
let actual = &tracing_result.output.as_ref().unwrap_or_default();
if !expected
.is_equivalent(actual, resolver, resolution_context)
.await?
.await
.context("Failed to resolve calldata equivalence for return data assertion")?
{
tracing::error!(
?execution_receipt,
@@ -448,7 +463,8 @@ where
let expected = Calldata::new_compound([expected]);
if !expected
.is_equivalent(&actual.0, resolver, resolution_context)
.await?
.await
.context("Failed to resolve event topic equivalence")?
{
tracing::error!(
event_idx,
@@ -468,7 +484,8 @@ where
let actual = &actual_event.data().data;
if !expected
.is_equivalent(&actual.0, resolver, resolution_context)
.await?
.await
.context("Failed to resolve event value equivalence")?
{
tracing::error!(
event_idx,
@@ -501,8 +518,12 @@ where
let trace = node
.trace_transaction(execution_receipt, trace_options)
.await?;
let diff = node.state_diff(execution_receipt).await?;
.await
.context("Failed to obtain geth prestate tracer output")?;
let diff = node
.state_diff(execution_receipt)
.await
.context("Failed to obtain state diff for transaction")?;
Ok((trace, diff))
}
+124 -109
View File
@@ -14,15 +14,15 @@ use alloy::{
};
use anyhow::Context;
use clap::Parser;
use futures::StreamExt;
use futures::stream;
use futures::{Stream, StreamExt};
use indexmap::IndexMap;
use revive_dt_node_interaction::EthereumNode;
use revive_dt_report::{
NodeDesignation, ReportAggregator, Reporter, ReporterEvent, TestCaseStatus,
TestSpecificReporter, TestSpecifier,
};
use temp_dir::TempDir;
use tempfile::TempDir;
use tokio::{join, try_join};
use tracing::{debug, info, info_span, instrument};
use tracing_appender::non_blocking::WorkerGuard;
@@ -60,7 +60,7 @@ struct Test<'a> {
}
fn main() -> anyhow::Result<()> {
let (args, _guard) = init_cli()?;
let (args, _guard) = init_cli().context("Failed to initialize CLI and tracing subscriber")?;
info!(
leader = args.leader.to_string(),
follower = args.follower.to_string(),
@@ -74,7 +74,8 @@ fn main() -> anyhow::Result<()> {
let number_of_threads = args.number_of_threads;
let body = async move {
let tests = collect_corpora(&args)?
let tests = collect_corpora(&args)
.context("Failed to collect corpus files from provided arguments")?
.into_iter()
.inspect(|(corpus, _)| {
reporter
@@ -96,7 +97,9 @@ fn main() -> anyhow::Result<()> {
Some(platform) => {
compile_corpus(&args, &tests, platform, reporter, report_aggregator_task).await
}
None => execute_corpus(&args, &tests, reporter, report_aggregator_task).await?,
None => execute_corpus(&args, &tests, reporter, report_aggregator_task)
.await
.context("Failed to execute corpus")?,
}
Ok(())
};
@@ -182,8 +185,10 @@ where
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
{
let tests = prepare_tests::<L, F>(args, metadata_files, reporter.clone());
let driver_task = start_driver_task::<L, F>(args, tests).await?;
let tests = prepare_tests::<L, F>(metadata_files, reporter.clone());
let driver_task = start_driver_task::<L, F>(args, tests)
.await
.context("Failed to start driver task")?;
let cli_reporting_task = start_cli_reporting_task(reporter);
let (_, _, rtn) = tokio::join!(cli_reporting_task, driver_task, report_aggregator_task);
@@ -193,17 +198,16 @@ where
}
fn prepare_tests<'a, L, F>(
args: &Arguments,
metadata_files: &'a [MetadataFile],
reporter: Reporter,
) -> impl Stream<Item = Test<'a>>
) -> impl Iterator<Item = Test<'a>>
where
L: Platform,
F: Platform,
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
{
let filtered_tests = metadata_files
metadata_files
.iter()
.flat_map(|metadata_file| {
metadata_file
@@ -368,19 +372,12 @@ where
} else {
true
}
});
stream::iter(filtered_tests)
// Filter based on the compiler compatibility
.filter_map(move |test| async move {
let leader_support = does_compiler_support_mode::<L>(args, &test.mode)
.await
.ok()
.unwrap_or(false);
let follower_support = does_compiler_support_mode::<F>(args, &test.mode)
.await
.ok()
.unwrap_or(false);
})
.filter_map(move |test| {
let leader_support =
L::Compiler::supports_mode(test.mode.optimize_setting, test.mode.pipeline);
let follower_support =
F::Compiler::supports_mode(test.mode.optimize_setting, test.mode.pipeline);
let is_allowed = leader_support && follower_support;
if !is_allowed {
@@ -414,25 +411,9 @@ where
})
}
async fn does_compiler_support_mode<P: Platform>(
args: &Arguments,
mode: &Mode,
) -> anyhow::Result<bool> {
let compiler_version_or_requirement = mode.compiler_version_to_use(args.solc.clone());
let compiler_path =
P::Compiler::get_compiler_executable(args, compiler_version_or_requirement).await?;
let compiler_version = P::Compiler::new(compiler_path.clone()).version().await?;
Ok(P::Compiler::supports_mode(
&compiler_version,
mode.optimize_setting,
mode.pipeline,
))
}
async fn start_driver_task<'a, L, F>(
args: &Arguments,
tests: impl Stream<Item = Test<'a>>,
tests: impl Iterator<Item = Test<'a>>,
) -> anyhow::Result<impl Future<Output = ()>>
where
L: Platform,
@@ -442,18 +423,23 @@ where
{
info!("Starting driver task");
let leader_nodes = Arc::new(NodePool::<L::Blockchain>::new(args)?);
let follower_nodes = Arc::new(NodePool::<F::Blockchain>::new(args)?);
let leader_nodes = Arc::new(
NodePool::<L::Blockchain>::new(args).context("Failed to initialize leader node pool")?,
);
let follower_nodes = Arc::new(
NodePool::<F::Blockchain>::new(args).context("Failed to initialize follower node pool")?,
);
let number_concurrent_tasks = args.number_of_concurrent_tasks();
let cached_compiler = Arc::new(
CachedCompiler::new(
args.directory().join("compilation_cache"),
args.invalidate_compilation_cache,
)
.await?,
.await
.context("Failed to initialize cached compiler")?,
);
Ok(tests.for_each_concurrent(
Ok(stream::iter(tests).for_each_concurrent(
// We want to limit the concurrent tasks here because:
//
// 1. We don't want to overwhelm the nodes with too many requests, leading to responses timing out.
@@ -501,7 +487,7 @@ where
.report_test_succeeded_event(steps_executed)
.expect("Can't fail"),
Err(error) => reporter
.report_test_failed_event(error.to_string())
.report_test_failed_event(format!("{error:#}"))
.expect("Can't fail"),
}
}
@@ -546,22 +532,30 @@ async fn start_cli_reporting_task(reporter: Reporter) {
number_of_successes += 1;
writeln!(
buf,
"{}{}Case Succeeded{}{} - Steps Executed: {}",
GREEN, BOLD, BOLD_RESET, COLOR_RESET, steps_executed
"{}{}Case Succeeded{} - Steps Executed: {}{}",
GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET
)
}
TestCaseStatus::Failed { reason } => {
number_of_failures += 1;
writeln!(
buf,
"{}{}Case Failed{}{} - Reason: {}",
RED, BOLD, BOLD_RESET, COLOR_RESET, reason
"{}{}Case Failed{} - Reason: {}{}",
RED,
BOLD,
BOLD_RESET,
reason.trim(),
COLOR_RESET,
)
}
TestCaseStatus::Ignored { reason, .. } => writeln!(
buf,
"{}{}Case Ignored{}{} - Reason: {}",
GREY, BOLD, BOLD_RESET, COLOR_RESET, reason
"{}{}Case Ignored{} - Reason: {}{}",
GREY,
BOLD,
BOLD_RESET,
reason.trim(),
COLOR_RESET,
),
};
}
@@ -610,6 +604,14 @@ where
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
{
let solc = revive_dt_compiler::solc_compiler(
config.directory(),
&config.solc,
test.mode.version.as_ref(),
test.mode.pipeline,
)
.await?;
let leader_reporter = test
.reporter
.execution_specific_reporter(leader_node.id(), NodeDesignation::Leader);
@@ -618,41 +620,34 @@ where
.execution_specific_reporter(follower_node.id(), NodeDesignation::Follower);
let (
(
CompilerOutput {
contracts: leader_pre_link_contracts,
},
_,
),
(
CompilerOutput {
contracts: follower_pre_link_contracts,
},
_,
),
CompilerOutput {
contracts: leader_pre_link_contracts,
},
CompilerOutput {
contracts: follower_pre_link_contracts,
},
) = try_join!(
cached_compiler.compile_contracts::<L>(
test.metadata,
test.metadata_file_path,
solc.clone(),
&test.mode,
config,
None,
|compiler_version, compiler_path, is_cached, compiler_input, compiler_output| {
|is_cached, compiler_input, compiler_output| {
leader_reporter
.report_pre_link_contracts_compilation_succeeded_event(
compiler_version,
compiler_path,
is_cached,
solc.clone(),
compiler_input,
compiler_output,
)
.expect("Can't fail")
},
|compiler_version, compiler_path, compiler_input, failure_reason| {
|compiler_input, failure_reason| {
leader_reporter
.report_pre_link_contracts_compilation_failed_event(
compiler_version,
compiler_path,
solc.clone(),
compiler_input,
failure_reason,
)
@@ -662,36 +657,39 @@ where
cached_compiler.compile_contracts::<F>(
test.metadata,
test.metadata_file_path,
solc.clone(),
&test.mode,
config,
None,
|compiler_version, compiler_path, is_cached, compiler_input, compiler_output| {
|is_cached, compiler_input, compiler_output| {
follower_reporter
.report_pre_link_contracts_compilation_succeeded_event(
compiler_version,
compiler_path,
is_cached,
solc.clone(),
compiler_input,
compiler_output,
)
.expect("Can't fail")
},
|compiler_version, compiler_path, compiler_input, failure_reason| {
|compiler_input, failure_reason| {
follower_reporter
.report_pre_link_contracts_compilation_failed_event(
compiler_version,
compiler_path,
solc.clone(),
compiler_input,
failure_reason,
)
.expect("Can't fail")
}
)
)?;
)
.context("Failed to compile pre-link contracts for leader/follower in parallel")?;
let mut leader_deployed_libraries = None::<HashMap<_, _>>;
let mut follower_deployed_libraries = None::<HashMap<_, _>>;
let mut contract_sources = test.metadata.contract_sources()?;
let mut contract_sources = test
.metadata
.contract_sources()
.context("Failed to retrieve contract sources from metadata")?;
for library_instance in test
.metadata
.libraries
@@ -813,41 +811,34 @@ where
}
let (
(
CompilerOutput {
contracts: leader_post_link_contracts,
},
leader_compiler_version,
),
(
CompilerOutput {
contracts: follower_post_link_contracts,
},
follower_compiler_version,
),
CompilerOutput {
contracts: leader_post_link_contracts,
},
CompilerOutput {
contracts: follower_post_link_contracts,
},
) = try_join!(
cached_compiler.compile_contracts::<L>(
test.metadata,
test.metadata_file_path,
solc.clone(),
&test.mode,
config,
leader_deployed_libraries.as_ref(),
|compiler_version, compiler_path, is_cached, compiler_input, compiler_output| {
|is_cached, compiler_input, compiler_output| {
leader_reporter
.report_post_link_contracts_compilation_succeeded_event(
compiler_version,
compiler_path,
is_cached,
solc.clone(),
compiler_input,
compiler_output,
)
.expect("Can't fail")
},
|compiler_version, compiler_path, compiler_input, failure_reason| {
|compiler_input, failure_reason| {
leader_reporter
.report_post_link_contracts_compilation_failed_event(
compiler_version,
compiler_path,
solc.clone(),
compiler_input,
failure_reason,
)
@@ -857,41 +848,41 @@ where
cached_compiler.compile_contracts::<F>(
test.metadata,
test.metadata_file_path,
solc.clone(),
&test.mode,
config,
follower_deployed_libraries.as_ref(),
|compiler_version, compiler_path, is_cached, compiler_input, compiler_output| {
|is_cached, compiler_input, compiler_output| {
follower_reporter
.report_post_link_contracts_compilation_succeeded_event(
compiler_version,
compiler_path,
is_cached,
solc.clone(),
compiler_input,
compiler_output,
)
.expect("Can't fail")
},
|compiler_version, compiler_path, compiler_input, failure_reason| {
|compiler_input, failure_reason| {
follower_reporter
.report_post_link_contracts_compilation_failed_event(
compiler_version,
compiler_path,
solc.clone(),
compiler_input,
failure_reason,
)
.expect("Can't fail")
}
)
)?;
)
.context("Failed to compile post-link contracts for leader/follower in parallel")?;
let leader_state = CaseState::<L>::new(
leader_compiler_version,
solc.version.clone(),
leader_post_link_contracts,
leader_deployed_libraries.unwrap_or_default(),
leader_reporter,
);
let follower_state = CaseState::<F>::new(
follower_compiler_version,
solc.version.clone(),
follower_post_link_contracts,
follower_deployed_libraries.unwrap_or_default(),
follower_reporter,
@@ -934,7 +925,7 @@ async fn compile_corpus(
config: &Arguments,
tests: &[MetadataFile],
platform: &TestingPlatform,
_: Reporter,
reporter: Reporter,
report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
) {
let tests = tests.iter().flat_map(|metadata| {
@@ -953,19 +944,42 @@ async fn compile_corpus(
let compilation_task =
futures::stream::iter(tests).for_each_concurrent(None, |(metadata, mode)| {
let cached_compiler = cached_compiler.clone();
let reporter = reporter.clone();
async move {
let solc = revive_dt_compiler::solc_compiler(
config.directory(),
&config.solc,
mode.version.as_ref(),
mode.pipeline,
)
.await;
let solc = match solc {
Ok(solc) => solc,
Err(err) => {
let test_specifier = TestSpecifier {
solc_mode: mode,
metadata_file_path: metadata.metadata_file_path.clone(),
case_idx: 0.into(),
};
let _ = reporter.report_test_failed_event(test_specifier, err.to_string());
return;
}
};
match platform {
TestingPlatform::Geth => {
let _ = cached_compiler
.compile_contracts::<Geth>(
metadata,
metadata.metadata_file_path.as_path(),
solc,
&mode,
config,
None,
|_, _, _, _, _| {},
|_, _, _, _| {},
|_, _, _| {},
|_, _| {},
)
.await;
}
@@ -974,11 +988,12 @@ async fn compile_corpus(
.compile_contracts::<Kitchensink>(
metadata,
metadata.metadata_file_path.as_path(),
solc,
&mode,
config,
None,
|_, _, _, _, _| {},
|_, _, _, _| {},
|_, _, _| {},
|_, _| {},
)
.await;
}
+17 -15
View File
@@ -8,6 +8,7 @@ use serde::{Deserialize, Serialize};
use tracing::{debug, info};
use crate::metadata::{Metadata, MetadataFile};
use anyhow::Context as _;
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[serde(untagged)]
@@ -20,23 +21,24 @@ impl Corpus {
pub fn try_from_path(file_path: impl AsRef<Path>) -> anyhow::Result<Self> {
let mut corpus = File::open(file_path.as_ref())
.map_err(anyhow::Error::from)
.and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into))?;
.and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into))
.with_context(|| {
format!(
"Failed to open and deserialize corpus file at {}",
file_path.as_ref().display()
)
})?;
let corpus_directory = file_path
.as_ref()
.canonicalize()
.context("Failed to canonicalize the path to the corpus file")?
.parent()
.context("Corpus file has no parent")?
.to_path_buf();
for path in corpus.paths_iter_mut() {
*path = file_path
.as_ref()
.parent()
.ok_or_else(|| {
anyhow::anyhow!("Corpus path '{}' does not point to a file", path.display())
})?
.canonicalize()
.map_err(|error| {
anyhow::anyhow!(
"Failed to canonicalize path to corpus '{}': {error}",
path.display()
)
})?
.join(path.as_path())
*path = corpus_directory.join(path.as_path())
}
Ok(corpus)
+38 -14
View File
@@ -268,7 +268,11 @@ impl Input {
) -> anyhow::Result<Bytes> {
match self.method {
Method::Deployer | Method::Fallback => {
let calldata = self.calldata.calldata(resolver, context).await?;
let calldata = self
.calldata
.calldata(resolver, context)
.await
.context("Failed to produce calldata for deployer/fallback method")?;
Ok(calldata.into())
}
@@ -283,14 +287,15 @@ impl Input {
// Overloads are handled by providing the full function signature in the "function
// name".
// https://github.com/matter-labs/era-compiler-tester/blob/1dfa7d07cba0734ca97e24704f12dd57f6990c2c/compiler_tester/src/test/case/input/mod.rs#L158-L190
let selector = if function_name.contains('(') && function_name.contains(')') {
Function::parse(function_name)
let selector =
if function_name.contains('(') && function_name.contains(')') {
Function::parse(function_name)
.context(
"Failed to parse the provided function name into a function signature",
)?
.selector()
} else {
abi.functions()
} else {
abi.functions()
.find(|function| function.signature().starts_with(function_name))
.ok_or_else(|| {
anyhow::anyhow!(
@@ -298,9 +303,13 @@ impl Input {
function_name,
&self.instance
)
})?
})
.with_context(|| format!(
"Failed to resolve function selector for {:?} on instance {:?}",
function_name, &self.instance
))?
.selector()
};
};
// Allocating a vector that we will be using for the calldata. The vector size will be:
// 4 bytes for the function selector.
@@ -312,7 +321,8 @@ impl Input {
calldata.extend(selector.0);
self.calldata
.calldata_into_slice(&mut calldata, resolver, context)
.await?;
.await
.context("Failed to append encoded argument to calldata buffer")?;
Ok(calldata.into())
}
@@ -325,7 +335,10 @@ impl Input {
resolver: &impl ResolverApi,
context: ResolutionContext<'_>,
) -> anyhow::Result<TransactionRequest> {
let input_data = self.encoded_input(resolver, context).await?;
let input_data = self
.encoded_input(resolver, context)
.await
.context("Failed to encode input bytes for transaction request")?;
let transaction_request = TransactionRequest::default().from(self.caller).value(
self.value
.map(|value| value.into_inner())
@@ -437,7 +450,8 @@ impl Calldata {
})
.buffered(0xFF)
.try_collect::<Vec<_>>()
.await?;
.await
.context("Failed to resolve one or more calldata arguments")?;
buffer.extend(resolved.into_iter().flatten());
}
@@ -478,7 +492,10 @@ impl Calldata {
std::borrow::Cow::Borrowed(other)
};
let this = this.resolve(resolver, context).await?;
let this = this
.resolve(resolver, context)
.await
.context("Failed to resolve calldata item during equivalence check")?;
let other = U256::from_be_slice(&other);
Ok(this == other)
})
@@ -664,17 +681,24 @@ impl<T: AsRef<str>> CalldataToken<T> {
let current_block_number = match context.tip_block_number() {
Some(block_number) => *block_number,
None => resolver.last_block_number().await?,
None => resolver.last_block_number().await.context(
"Failed to query last block number while resolving $BLOCK_HASH",
)?,
};
let desired_block_number = current_block_number.saturating_sub(offset);
let block_hash = resolver.block_hash(desired_block_number.into()).await?;
let block_hash = resolver
.block_hash(desired_block_number.into())
.await
.context("Failed to resolve block hash for desired block number")?;
Ok(U256::from_be_bytes(block_hash.0))
} else if item == Self::BLOCK_NUMBER_VARIABLE {
let current_block_number = match context.tip_block_number() {
Some(block_number) => *block_number,
None => resolver.last_block_number().await?,
None => resolver.last_block_number().await.context(
"Failed to query last block number while resolving $BLOCK_NUMBER",
)?,
};
Ok(U256::from(current_block_number))
} else if item == Self::BLOCK_TIMESTAMP_VARIABLE {
+9 -1
View File
@@ -132,7 +132,15 @@ impl Metadata {
) in contracts
{
let alias = alias.clone();
let absolute_path = directory.join(contract_source_path).canonicalize()?;
let absolute_path = directory
.join(contract_source_path)
.canonicalize()
.map_err(|error| {
anyhow::anyhow!(
"Failed to canonicalize contract source path '{}': {error}",
directory.join(contract_source_path).display()
)
})?;
let contract_ident = contract_ident.clone();
sources.insert(
+19 -5
View File
@@ -1,3 +1,4 @@
use anyhow::Context;
use regex::Regex;
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
use serde::{Deserialize, Serialize};
@@ -44,21 +45,34 @@ impl FromStr for ParsedMode {
};
let pipeline = match caps.name("pipeline") {
Some(m) => Some(ModePipeline::from_str(m.as_str())?),
Some(m) => Some(
ModePipeline::from_str(m.as_str())
.context("Failed to parse mode pipeline from string")?,
),
None => None,
};
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
let optimize_setting = match caps.name("optimize_setting") {
Some(m) => Some(ModeOptimizerSetting::from_str(m.as_str())?),
Some(m) => Some(
ModeOptimizerSetting::from_str(m.as_str())
.context("Failed to parse optimizer setting from string")?,
),
None => None,
};
let version = match caps.name("version") {
Some(m) => Some(semver::VersionReq::parse(m.as_str()).map_err(|e| {
anyhow::anyhow!("Cannot parse the version requirement '{}': {e}", m.as_str())
})?),
Some(m) => Some(
semver::VersionReq::parse(m.as_str())
.map_err(|e| {
anyhow::anyhow!(
"Cannot parse the version requirement '{}': {e}",
m.as_str()
)
})
.context("Failed to parse semver requirement from mode string")?,
),
None => None,
};
+1 -1
View File
@@ -27,7 +27,7 @@ sp-core = { workspace = true }
sp-runtime = { workspace = true }
[dev-dependencies]
temp-dir = { workspace = true }
tempfile = { workspace = true }
tokio = { workspace = true }
[lints]
+106 -47
View File
@@ -101,10 +101,13 @@ impl GethNode {
let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory);
create_dir_all(&self.base_directory)?;
create_dir_all(&self.logs_directory)?;
create_dir_all(&self.base_directory)
.context("Failed to create base directory for geth node")?;
create_dir_all(&self.logs_directory)
.context("Failed to create logs directory for geth node")?;
let mut genesis = serde_json::from_str::<Genesis>(&genesis)?;
let mut genesis = serde_json::from_str::<Genesis>(&genesis)
.context("Failed to deserialize geth genesis JSON")?;
for signer_address in
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
{
@@ -116,7 +119,11 @@ impl GethNode {
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
}
let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE);
serde_json::to_writer(File::create(&genesis_path)?, &genesis)?;
serde_json::to_writer(
File::create(&genesis_path).context("Failed to create geth genesis file")?,
&genesis,
)
.context("Failed to serialize geth genesis JSON to file")?;
let mut child = Command::new(&self.geth)
.arg("--state.scheme")
@@ -127,16 +134,22 @@ impl GethNode {
.arg(genesis_path)
.stderr(Stdio::piped())
.stdout(Stdio::null())
.spawn()?;
.spawn()
.context("Failed to spawn geth --init process")?;
let mut stderr = String::new();
child
.stderr
.take()
.expect("should be piped")
.read_to_string(&mut stderr)?;
.read_to_string(&mut stderr)
.context("Failed to read geth --init stderr")?;
if !child.wait()?.success() {
if !child
.wait()
.context("Failed waiting for geth --init process to finish")?
.success()
{
anyhow::bail!("failed to initialize geth node #{:?}: {stderr}", &self.id);
}
@@ -161,8 +174,11 @@ impl GethNode {
let stdout_logs_file = open_options
.clone()
.open(self.geth_stdout_log_file_path())?;
let stderr_logs_file = open_options.open(self.geth_stderr_log_file_path())?;
.open(self.geth_stdout_log_file_path())
.context("Failed to open geth stdout logs file")?;
let stderr_logs_file = open_options
.open(self.geth_stderr_log_file_path())
.context("Failed to open geth stderr logs file")?;
self.handle = Command::new(&self.geth)
.arg("--dev")
.arg("--datadir")
@@ -182,14 +198,24 @@ impl GethNode {
.arg("full")
.arg("--gcmode")
.arg("archive")
.stderr(stderr_logs_file.try_clone()?)
.stdout(stdout_logs_file.try_clone()?)
.spawn()?
.stderr(
stderr_logs_file
.try_clone()
.context("Failed to clone geth stderr log file handle")?,
)
.stdout(
stdout_logs_file
.try_clone()
.context("Failed to clone geth stdout log file handle")?,
)
.spawn()
.context("Failed to spawn geth node process")?
.into();
if let Err(error) = self.wait_ready() {
tracing::error!(?error, "Failed to start geth, shutting down gracefully");
self.shutdown()?;
self.shutdown()
.context("Failed to gracefully shutdown after geth start error")?;
return Err(error);
}
@@ -211,7 +237,8 @@ impl GethNode {
.write(false)
.append(false)
.truncate(false)
.open(self.geth_stderr_log_file_path())?;
.open(self.geth_stderr_log_file_path())
.context("Failed to open geth stderr logs file for readiness check")?;
let maximum_wait_time = Duration::from_millis(self.start_timeout);
let mut stderr = BufReader::new(logs_file).lines();
@@ -277,11 +304,18 @@ impl EthereumNode for GethNode {
&self,
transaction: TransactionRequest,
) -> anyhow::Result<alloy::rpc::types::TransactionReceipt> {
let provider = self.provider().await?;
let provider = self
.provider()
.await
.context("Failed to create provider for transaction submission")?;
let pending_transaction = provider.send_transaction(transaction).await.inspect_err(
|err| tracing::error!(%err, "Encountered an error when submitting the transaction"),
)?;
let pending_transaction = provider
.send_transaction(transaction)
.await
.inspect_err(
|err| tracing::error!(%err, "Encountered an error when submitting the transaction"),
)
.context("Failed to submit transaction to geth node")?;
let transaction_hash = *pending_transaction.tx_hash();
// The following is a fix for the "transaction indexing is in progress" error that we used
@@ -335,7 +369,11 @@ impl EthereumNode for GethNode {
transaction: &TransactionReceipt,
trace_options: GethDebugTracingOptions,
) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> {
let provider = Arc::new(self.provider().await?);
let provider = Arc::new(
self.provider()
.await
.context("Failed to create provider for tracing")?,
);
poll(
Self::TRACE_POLLING_DURATION,
PollingWaitBehavior::Constant(Duration::from_millis(200)),
@@ -371,8 +409,10 @@ impl EthereumNode for GethNode {
});
match self
.trace_transaction(transaction, trace_options)
.await?
.try_into_pre_state_frame()?
.await
.context("Failed to trace transaction for prestate diff")?
.try_into_pre_state_frame()
.context("Failed to convert trace into pre-state frame")?
{
PreStateFrame::Diff(diff) => Ok(diff),
_ => anyhow::bail!("expected a diff mode trace"),
@@ -382,7 +422,8 @@ impl EthereumNode for GethNode {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn balance_of(&self, address: Address) -> anyhow::Result<U256> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_balance(address)
.await
.map_err(Into::into)
@@ -395,7 +436,8 @@ impl EthereumNode for GethNode {
keys: Vec<StorageKey>,
) -> anyhow::Result<EIP1186AccountProofResponse> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_proof(address, keys)
.latest()
.await
@@ -407,7 +449,8 @@ impl ResolverApi for GethNode {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn chain_id(&self) -> anyhow::Result<alloy::primitives::ChainId> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_chain_id()
.await
.map_err(Into::into)
@@ -416,7 +459,8 @@ impl ResolverApi for GethNode {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result<u128> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_transaction_receipt(*tx_hash)
.await?
.context("Failed to get the transaction receipt")
@@ -426,40 +470,48 @@ impl ResolverApi for GethNode {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result<u128> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the geth block")?
.context("Failed to get the Geth block, perhaps there are no blocks?")
.map(|block| block.header.gas_limit as _)
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result<Address> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the geth block")?
.context("Failed to get the Geth block, perhaps there are no blocks?")
.map(|block| block.header.beneficiary)
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result<U256> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the geth block")?
.context("Failed to get the Geth block, perhaps there are no blocks?")
.map(|block| U256::from_be_bytes(block.header.mix_hash.0))
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result<u64> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the geth block")?
.context("Failed to get the Geth block, perhaps there are no blocks?")
.and_then(|block| {
block
.header
@@ -471,27 +523,32 @@ impl ResolverApi for GethNode {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockHash> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the geth block")?
.context("Failed to get the Geth block, perhaps there are no blocks?")
.map(|block| block.header.hash)
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the geth block")?
.context("Failed to get the Geth block, perhaps there are no blocks?")
.map(|block| block.header.timestamp)
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn last_block_number(&self) -> anyhow::Result<BlockNumber> {
self.provider()
.await?
.await
.context("Failed to get the Geth provider")?
.get_block_number()
.await
.map_err(Into::into)
@@ -576,8 +633,10 @@ impl Node for GethNode {
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn()?
.wait_with_output()?
.spawn()
.context("Failed to spawn geth --version process")?
.wait_with_output()
.context("Failed to wait for geth --version output")?
.stdout;
Ok(String::from_utf8_lossy(&output).into())
}
@@ -605,7 +664,7 @@ impl Drop for GethNode {
mod tests {
use revive_dt_config::Arguments;
use temp_dir::TempDir;
use tempfile::TempDir;
use crate::{GENESIS_JSON, Node};
+134 -58
View File
@@ -54,6 +54,7 @@ static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
pub struct KitchensinkNode {
id: u32,
substrate_binary: PathBuf,
dev_node_binary: PathBuf,
eth_proxy_binary: PathBuf,
rpc_url: String,
base_directory: PathBuf,
@@ -63,6 +64,7 @@ pub struct KitchensinkNode {
wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager,
chain_id_filler: ChainIdFiller,
use_kitchensink_not_dev_node: bool,
/// This vector stores [`File`] objects that we use for logging which we want to flush when the
/// node object is dropped. We do not store them in a structured fashion at the moment (in
/// separate fields) as the logic that we need to apply to them is all the same regardless of
@@ -94,18 +96,30 @@ impl KitchensinkNode {
let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory);
create_dir_all(&self.base_directory)?;
create_dir_all(&self.logs_directory)?;
create_dir_all(&self.base_directory)
.context("Failed to create base directory for kitchensink node")?;
create_dir_all(&self.logs_directory)
.context("Failed to create logs directory for kitchensink node")?;
let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE);
// Note: we do not pipe the logs of this process to a separate file since this is just a
// once-off export of the default chain spec and not part of the long-running node process.
let output = Command::new(&self.substrate_binary)
.arg("export-chain-spec")
.arg("--chain")
.arg("dev")
.output()?;
let output = if self.use_kitchensink_not_dev_node {
Command::new(&self.substrate_binary)
.arg("export-chain-spec")
.arg("--chain")
.arg("dev")
.output()
.context("Failed to export the chain-spec")?
} else {
Command::new(&self.dev_node_binary)
.arg("build-spec")
.arg("--chain")
.arg("dev")
.output()
.context("Failed to export the chain-spec")?
};
if !output.status.success() {
anyhow::bail!(
@@ -114,8 +128,10 @@ impl KitchensinkNode {
);
}
let content = String::from_utf8(output.stdout)?;
let mut chainspec_json: JsonValue = serde_json::from_str(&content)?;
let content = String::from_utf8(output.stdout)
.context("Failed to decode substrate export-chain-spec output as UTF-8")?;
let mut chainspec_json: JsonValue =
serde_json::from_str(&content).context("Failed to parse substrate chain spec JSON")?;
let existing_chainspec_balances =
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"]
@@ -137,7 +153,8 @@ impl KitchensinkNode {
})
.collect();
let mut eth_balances = {
let mut genesis = serde_json::from_str::<Genesis>(genesis)?;
let mut genesis = serde_json::from_str::<Genesis>(genesis)
.context("Failed to deserialize EVM genesis JSON for kitchensink")?;
for signer_address in
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
{
@@ -148,7 +165,8 @@ impl KitchensinkNode {
.entry(signer_address)
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
}
self.extract_balance_from_genesis_file(&genesis)?
self.extract_balance_from_genesis_file(&genesis)
.context("Failed to extract balances from EVM genesis JSON")?
};
merged_balances.append(&mut eth_balances);
@@ -156,9 +174,11 @@ impl KitchensinkNode {
json!(merged_balances);
serde_json::to_writer_pretty(
std::fs::File::create(&template_chainspec_path)?,
std::fs::File::create(&template_chainspec_path)
.context("Failed to create kitchensink template chainspec file")?,
&chainspec_json,
)?;
)
.context("Failed to write kitchensink template chainspec JSON")?;
Ok(self)
}
@@ -184,11 +204,18 @@ impl KitchensinkNode {
// Start Substrate node
let kitchensink_stdout_logs_file = open_options
.clone()
.open(self.kitchensink_stdout_log_file_path())?;
.open(self.kitchensink_stdout_log_file_path())
.context("Failed to open kitchensink stdout logs file")?;
let kitchensink_stderr_logs_file = open_options
.clone()
.open(self.kitchensink_stderr_log_file_path())?;
self.process_substrate = Command::new(&self.substrate_binary)
.open(self.kitchensink_stderr_log_file_path())
.context("Failed to open kitchensink stderr logs file")?;
let node_binary_path = if self.use_kitchensink_not_dev_node {
self.substrate_binary.as_path()
} else {
self.dev_node_binary.as_path()
};
self.process_substrate = Command::new(node_binary_path)
.arg("--dev")
.arg("--chain")
.arg(chainspec_path)
@@ -206,9 +233,18 @@ impl KitchensinkNode {
.arg("--rpc-max-connections")
.arg(u32::MAX.to_string())
.env("RUST_LOG", Self::SUBSTRATE_LOG_ENV)
.stdout(kitchensink_stdout_logs_file.try_clone()?)
.stderr(kitchensink_stderr_logs_file.try_clone()?)
.spawn()?
.stdout(
kitchensink_stdout_logs_file
.try_clone()
.context("Failed to clone kitchensink stdout log file handle")?,
)
.stderr(
kitchensink_stderr_logs_file
.try_clone()
.context("Failed to clone kitchensink stderr log file handle")?,
)
.spawn()
.context("Failed to spawn substrate node process")?
.into();
// Give the node a moment to boot
@@ -217,14 +253,18 @@ impl KitchensinkNode {
Self::SUBSTRATE_READY_MARKER,
Duration::from_secs(60),
) {
self.shutdown()?;
self.shutdown()
.context("Failed to gracefully shutdown after substrate start error")?;
return Err(error);
};
let eth_proxy_stdout_logs_file = open_options
.clone()
.open(self.proxy_stdout_log_file_path())?;
let eth_proxy_stderr_logs_file = open_options.open(self.proxy_stderr_log_file_path())?;
.open(self.proxy_stdout_log_file_path())
.context("Failed to open eth-proxy stdout logs file")?;
let eth_proxy_stderr_logs_file = open_options
.open(self.proxy_stderr_log_file_path())
.context("Failed to open eth-proxy stderr logs file")?;
self.process_proxy = Command::new(&self.eth_proxy_binary)
.arg("--dev")
.arg("--rpc-port")
@@ -234,9 +274,18 @@ impl KitchensinkNode {
.arg("--rpc-max-connections")
.arg(u32::MAX.to_string())
.env("RUST_LOG", Self::PROXY_LOG_ENV)
.stdout(eth_proxy_stdout_logs_file.try_clone()?)
.stderr(eth_proxy_stderr_logs_file.try_clone()?)
.spawn()?
.stdout(
eth_proxy_stdout_logs_file
.try_clone()
.context("Failed to clone eth-proxy stdout log file handle")?,
)
.stderr(
eth_proxy_stderr_logs_file
.try_clone()
.context("Failed to clone eth-proxy stderr log file handle")?,
)
.spawn()
.context("Failed to spawn eth-proxy process")?
.into();
if let Err(error) = Self::wait_ready(
@@ -244,7 +293,8 @@ impl KitchensinkNode {
Self::ETH_PROXY_READY_MARKER,
Duration::from_secs(60),
) {
self.shutdown()?;
self.shutdown()
.context("Failed to gracefully shutdown after eth-proxy start error")?;
return Err(error);
};
@@ -369,11 +419,14 @@ impl EthereumNode for KitchensinkNode {
) -> anyhow::Result<TransactionReceipt> {
let receipt = self
.provider()
.await?
.await
.context("Failed to create provider for transaction submission")?
.send_transaction(transaction)
.await?
.await
.context("Failed to submit transaction to kitchensink proxy")?
.get_receipt()
.await?;
.await
.context("Failed to fetch transaction receipt from kitchensink proxy")?;
Ok(receipt)
}
@@ -383,11 +436,12 @@ impl EthereumNode for KitchensinkNode {
trace_options: GethDebugTracingOptions,
) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> {
let tx_hash = transaction.transaction_hash;
Ok(self
.provider()
.await?
self.provider()
.await
.context("Failed to create provider for debug tracing")?
.debug_trace_transaction(tx_hash, trace_options)
.await?)
.await
.context("Failed to obtain debug trace from kitchensink proxy")
}
async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result<DiffMode> {
@@ -408,7 +462,8 @@ impl EthereumNode for KitchensinkNode {
async fn balance_of(&self, address: Address) -> anyhow::Result<U256> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_balance(address)
.await
.map_err(Into::into)
@@ -420,7 +475,8 @@ impl EthereumNode for KitchensinkNode {
keys: Vec<StorageKey>,
) -> anyhow::Result<EIP1186AccountProofResponse> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_proof(address, keys)
.latest()
.await
@@ -431,7 +487,8 @@ impl EthereumNode for KitchensinkNode {
impl ResolverApi for KitchensinkNode {
async fn chain_id(&self) -> anyhow::Result<alloy::primitives::ChainId> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_chain_id()
.await
.map_err(Into::into)
@@ -439,7 +496,8 @@ impl ResolverApi for KitchensinkNode {
async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result<u128> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_transaction_receipt(*tx_hash)
.await?
.context("Failed to get the transaction receipt")
@@ -448,37 +506,45 @@ impl ResolverApi for KitchensinkNode {
async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result<u128> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the kitchensink block")?
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
.map(|block| block.header.gas_limit as _)
}
async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result<Address> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the kitchensink block")?
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
.map(|block| block.header.beneficiary)
}
async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result<U256> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the kitchensink block")?
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
.map(|block| U256::from_be_bytes(block.header.mix_hash.0))
}
async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result<u64> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the kitchensink block")?
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
.and_then(|block| {
block
.header
@@ -489,25 +555,30 @@ impl ResolverApi for KitchensinkNode {
async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockHash> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the kitchensink block")?
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
.map(|block| block.header.hash)
}
async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.await
.context("Failed to get the kitchensink block")?
.context("Failed to get the Kitchensink block, perhaps the chain has no blocks?")
.map(|block| block.header.timestamp)
}
async fn last_block_number(&self) -> anyhow::Result<BlockNumber> {
self.provider()
.await?
.await
.context("Failed to get the Kitchensink provider")?
.get_block_number()
.await
.map_err(Into::into)
@@ -533,6 +604,7 @@ impl Node for KitchensinkNode {
Self {
id,
substrate_binary: config.kitchensink.clone(),
dev_node_binary: config.revive_dev_node.clone(),
eth_proxy_binary: config.eth_proxy.clone(),
rpc_url: String::new(),
base_directory,
@@ -542,6 +614,7 @@ impl Node for KitchensinkNode {
wallet: Arc::new(wallet),
chain_id_filler: Default::default(),
nonce_manager: Default::default(),
use_kitchensink_not_dev_node: config.use_kitchensink_not_dev_node,
// We know that we only need to be storing 4 files so we can specify that when creating
// the vector. It's the stdout and stderr of the substrate-node and the eth-rpc.
logs_file_to_flush: Vec::with_capacity(4),
@@ -592,8 +665,10 @@ impl Node for KitchensinkNode {
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn()?
.wait_with_output()?
.spawn()
.context("Failed to spawn kitchensink --version")?
.wait_with_output()
.context("Failed to wait for kitchensink --version")?
.stdout;
Ok(String::from_utf8_lossy(&output).into())
}
@@ -1059,6 +1134,7 @@ mod tests {
Arguments {
kitchensink: PathBuf::from("substrate-node"),
eth_proxy: PathBuf::from("eth-rpc"),
use_kitchensink_not_dev_node: true,
..Default::default()
}
}
+6 -3
View File
@@ -44,8 +44,10 @@ where
nodes.push(
handle
.join()
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))?
.map_err(|error| anyhow::anyhow!("node failed to spawn: {error}"))?,
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))
.context("Failed to join node spawn thread")?
.map_err(|error| anyhow::anyhow!("node failed to spawn: {error}"))
.context("Node failed to spawn")?,
);
}
@@ -69,7 +71,8 @@ fn spawn_node<T: Node + Send>(args: &Arguments, genesis: String) -> anyhow::Resu
connection_string = node.connection_string(),
"Spawning node"
);
node.spawn(genesis)?;
node.spawn(genesis)
.context("Failed to spawn node process")?;
info!(
id = node.id(),
connection_string = node.connection_string(),
-1
View File
@@ -17,7 +17,6 @@ alloy-primitives = { workspace = true }
anyhow = { workspace = true }
paste = { workspace = true }
indexmap = { workspace = true, features = ["serde"] }
semver = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_with = { workspace = true }
+24 -25
View File
@@ -4,17 +4,15 @@
use std::{
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
fs::OpenOptions,
path::PathBuf,
time::{SystemTime, UNIX_EPOCH},
};
use alloy_primitives::Address;
use anyhow::Result;
use anyhow::{Context as _, Result};
use indexmap::IndexMap;
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode, SolcCompiler};
use revive_dt_config::{Arguments, TestingPlatform};
use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance};
use semver::Version;
use serde::Serialize;
use serde_with::{DisplayFromStr, serde_as};
use tokio::sync::{
@@ -113,7 +111,10 @@ impl ReportAggregator {
debug!("Report aggregation completed");
let file_name = {
let current_timestamp = SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs();
let current_timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.context("System clock is before UNIX_EPOCH; cannot compute report timestamp")?
.as_secs();
let mut file_name = current_timestamp.to_string();
file_name.push_str(".json");
file_name
@@ -124,8 +125,16 @@ impl ReportAggregator {
.write(true)
.truncate(true)
.read(false)
.open(file_path)?;
serde_json::to_writer_pretty(file, &self.report)?;
.open(&file_path)
.with_context(|| {
format!(
"Failed to open report file for writing: {}",
file_path.display()
)
})?;
serde_json::to_writer_pretty(&file, &self.report).with_context(|| {
format!("Failed to serialize report JSON to {}", file_path.display())
})?;
Ok(())
}
@@ -289,8 +298,7 @@ impl ReportAggregator {
execution_information.pre_link_compilation_status = Some(CompilationStatus::Success {
is_cached: event.is_cached,
compiler_version: event.compiler_version,
compiler_path: event.compiler_path,
solc_info: event.solc_info,
compiler_input,
compiler_output,
});
@@ -318,8 +326,7 @@ impl ReportAggregator {
execution_information.post_link_compilation_status = Some(CompilationStatus::Success {
is_cached: event.is_cached,
compiler_version: event.compiler_version,
compiler_path: event.compiler_path,
solc_info: event.solc_info,
compiler_input,
compiler_output,
});
@@ -341,8 +348,7 @@ impl ReportAggregator {
execution_information.pre_link_compilation_status = Some(CompilationStatus::Failure {
reason: event.reason,
compiler_version: event.compiler_version,
compiler_path: event.compiler_path,
solc_info: event.solc_info,
compiler_input,
});
}
@@ -363,8 +369,7 @@ impl ReportAggregator {
execution_information.post_link_compilation_status = Some(CompilationStatus::Failure {
reason: event.reason,
compiler_version: event.compiler_version,
compiler_path: event.compiler_path,
solc_info: event.solc_info,
compiler_input,
});
}
@@ -517,10 +522,8 @@ pub enum CompilationStatus {
Success {
/// A flag with information on whether the compilation artifacts were cached or not.
is_cached: bool,
/// The version of the compiler used to compile the contracts.
compiler_version: Version,
/// The path of the compiler used to compile the contracts.
compiler_path: PathBuf,
/// The version and path of the solc compiler used to compile the contracts.
solc_info: SolcCompiler,
/// The input provided to the compiler to compile the contracts. This is only included if
/// the appropriate flag is set in the CLI configuration and if the contracts were not
/// cached and the compiler was invoked.
@@ -535,12 +538,8 @@ pub enum CompilationStatus {
Failure {
/// The failure reason.
reason: String,
/// The version of the compiler used to compile the contracts.
#[serde(skip_serializing_if = "Option::is_none")]
compiler_version: Option<Version>,
/// The path of the compiler used to compile the contracts.
#[serde(skip_serializing_if = "Option::is_none")]
compiler_path: Option<PathBuf>,
/// The version and path of the solc compiler used to compile the contracts.
solc_info: SolcCompiler,
/// The input provided to the compiler to compile the contracts. This is only included if
/// the appropriate flag is set in the CLI configuration and if the contracts were not
/// cached and the compiler was invoked.
+13 -20
View File
@@ -1,15 +1,15 @@
//! The types associated with the events sent by the runner to the reporter.
#![allow(dead_code)]
use std::{collections::BTreeMap, path::PathBuf, sync::Arc};
use std::{collections::BTreeMap, sync::Arc};
use alloy_primitives::Address;
use anyhow::Context as _;
use indexmap::IndexMap;
use revive_dt_compiler::{CompilerInput, CompilerOutput};
use revive_dt_compiler::{CompilerInput, CompilerOutput, SolcCompiler};
use revive_dt_config::TestingPlatform;
use revive_dt_format::metadata::Metadata;
use revive_dt_format::{corpus::Corpus, metadata::ContractInstance};
use semver::Version;
use tokio::sync::{broadcast, oneshot};
use crate::{ExecutionSpecifier, ReporterEvent, TestSpecifier, common::MetadataFilePath};
@@ -546,13 +546,11 @@ define_event! {
PreLinkContractsCompilationSucceeded {
/// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>,
/// The version of the compiler used to compile the contracts.
compiler_version: Version,
/// The path of the compiler used to compile the contracts.
compiler_path: PathBuf,
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
/// anew.
is_cached: bool,
/// The version and path of the solc compiler used to compile the contracts.
solc_info: SolcCompiler,
/// The input provided to the compiler - this is optional and not provided if the
/// contracts were obtained from the cache.
compiler_input: Option<CompilerInput>,
@@ -564,13 +562,11 @@ define_event! {
PostLinkContractsCompilationSucceeded {
/// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>,
/// The version of the compiler used to compile the contracts.
compiler_version: Version,
/// The path of the compiler used to compile the contracts.
compiler_path: PathBuf,
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
/// anew.
is_cached: bool,
/// The version and path of the solc compiler used to compile the contracts.
solc_info: SolcCompiler,
/// The input provided to the compiler - this is optional and not provided if the
/// contracts were obtained from the cache.
compiler_input: Option<CompilerInput>,
@@ -582,10 +578,8 @@ define_event! {
PreLinkContractsCompilationFailed {
/// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>,
/// The version of the compiler used to compile the contracts.
compiler_version: Option<Version>,
/// The path of the compiler used to compile the contracts.
compiler_path: Option<PathBuf>,
/// The version and path of the solc compiler used to compile the contracts.
solc_info: SolcCompiler,
/// The input provided to the compiler - this is optional and not provided if the
/// contracts were obtained from the cache.
compiler_input: Option<CompilerInput>,
@@ -597,10 +591,8 @@ define_event! {
PostLinkContractsCompilationFailed {
/// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>,
/// The version of the compiler used to compile the contracts.
compiler_version: Option<Version>,
/// The path of the compiler used to compile the contracts.
compiler_path: Option<PathBuf>,
/// The version and path of the solc compiler used to compile the contracts.
solc_info: SolcCompiler,
/// The input provided to the compiler - this is optional and not provided if the
/// contracts were obtained from the cache.
compiler_input: Option<CompilerInput>,
@@ -630,7 +622,8 @@ define_event! {
impl RunnerEventReporter {
pub async fn subscribe(&self) -> anyhow::Result<broadcast::Receiver<ReporterEvent>> {
let (tx, rx) = oneshot::channel::<broadcast::Receiver<ReporterEvent>>();
self.report_subscribe_to_events_event(tx)?;
self.report_subscribe_to_events_event(tx)
.context("Failed to send subscribe request to reporter task")?;
rx.await.map_err(Into::into)
}
}
+45 -8
View File
@@ -12,6 +12,7 @@ use std::{
use tokio::sync::Mutex;
use crate::download::SolcDownloader;
use anyhow::Context;
pub const SOLC_CACHE_DIRECTORY: &str = "solc";
pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default);
@@ -31,8 +32,20 @@ pub(crate) async fn get_or_download(
return Ok(target_file);
}
create_dir_all(target_directory)?;
download_to_file(&target_file, downloader).await?;
create_dir_all(&target_directory).with_context(|| {
format!(
"Failed to create solc cache directory: {}",
target_directory.display()
)
})?;
download_to_file(&target_file, downloader)
.await
.with_context(|| {
format!(
"Failed to write downloaded solc to {}",
target_file.display()
)
})?;
cache.insert(target_file.clone());
Ok(target_file)
@@ -45,14 +58,26 @@ async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::R
#[cfg(unix)]
{
let mut permissions = file.metadata()?.permissions();
let mut permissions = file
.metadata()
.with_context(|| format!("Failed to read metadata for {}", path.display()))?
.permissions();
permissions.set_mode(permissions.mode() | 0o111);
file.set_permissions(permissions)?;
file.set_permissions(permissions).with_context(|| {
format!("Failed to set executable permissions on {}", path.display())
})?;
}
let mut file = BufWriter::new(file);
file.write_all(&downloader.download().await?)?;
file.flush()?;
file.write_all(
&downloader
.download()
.await
.context("Failed to download solc binary bytes")?,
)
.with_context(|| format!("Failed to write solc binary to {}", path.display()))?;
file.flush()
.with_context(|| format!("Failed to flush file {}", path.display()))?;
drop(file);
#[cfg(target_os = "macos")]
@@ -63,8 +88,20 @@ async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::R
.stderr(std::process::Stdio::null())
.stdout(std::process::Stdio::null())
.stdout(std::process::Stdio::null())
.spawn()?
.wait()?;
.spawn()
.with_context(|| {
format!(
"Failed to spawn xattr to remove quarantine attribute on {}",
path.display()
)
})?
.wait()
.with_context(|| {
format!(
"Failed waiting for xattr operation to complete on {}",
path.display()
)
})?;
Ok(())
}
+28 -6
View File
@@ -11,6 +11,7 @@ use semver::Version;
use sha2::{Digest, Sha256};
use crate::list::List;
use anyhow::Context;
pub static LIST_CACHE: LazyLock<Mutex<HashMap<&'static str, List>>> =
LazyLock::new(Default::default);
@@ -30,7 +31,12 @@ impl List {
return Ok(list.clone());
}
let body: List = reqwest::get(url).await?.json().await?;
let body: List = reqwest::get(url)
.await
.with_context(|| format!("Failed to GET solc list from {url}"))?
.json()
.await
.with_context(|| format!("Failed to deserialize solc list JSON from {url}"))?;
LIST_CACHE.lock().unwrap().insert(url, body.clone());
@@ -68,14 +74,15 @@ impl SolcDownloader {
}),
VersionOrRequirement::Requirement(requirement) => {
let Some(version) = List::download(list)
.await?
.await
.with_context(|| format!("Failed to download solc builds list from {list}"))?
.builds
.into_iter()
.map(|build| build.version)
.filter(|version| requirement.matches(version))
.max()
else {
anyhow::bail!("Failed to find a version that satisfies {requirement:?}");
anyhow::bail!("Failed to find a version that satisfies {requirement}");
};
Ok(Self {
version,
@@ -107,11 +114,20 @@ impl SolcDownloader {
/// Errors out if the download fails or the digest of the downloaded file
/// mismatches the expected digest from the release [List].
pub async fn download(&self) -> anyhow::Result<Vec<u8>> {
let builds = List::download(self.list).await?.builds;
let builds = List::download(self.list)
.await
.with_context(|| format!("Failed to download solc builds list from {}", self.list))?
.builds;
let build = builds
.iter()
.find(|build| build.version == self.version)
.ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version))?;
.ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version))
.with_context(|| {
format!(
"Requested solc version {} was not found in builds list fetched from {}",
self.version, self.list
)
})?;
let path = build.path.clone();
let expected_digest = build
@@ -121,7 +137,13 @@ impl SolcDownloader {
.to_string();
let url = format!("{}/{}/{}", Self::BASE_URL, self.target, path.display());
let file = reqwest::get(url).await?.bytes().await?.to_vec();
let file = reqwest::get(&url)
.await
.with_context(|| format!("Failed to GET solc binary from {url}"))?
.bytes()
.await
.with_context(|| format!("Failed to read solc binary bytes from {url}"))?
.to_vec();
if hex::encode(Sha256::digest(&file)) != expected_digest {
anyhow::bail!("sha256 mismatch for solc version {}", self.version);
+21 -14
View File
@@ -3,10 +3,9 @@
//!
//! [0]: https://binaries.soliditylang.org
use std::path::{Path, PathBuf};
use cache::get_or_download;
use download::SolcDownloader;
use std::path::{Path, PathBuf};
use revive_dt_common::types::VersionOrRequirement;
@@ -14,6 +13,25 @@ pub mod cache;
pub mod download;
pub mod list;
/// Return a [`SolcDownloader`] which can be used to download a `solc`
/// binary or return the resolved version it will download.
async fn downloader(
version: impl Into<VersionOrRequirement>,
wasm: bool,
) -> anyhow::Result<SolcDownloader> {
if wasm {
SolcDownloader::wasm(version).await
} else if cfg!(target_os = "linux") {
SolcDownloader::linux(version).await
} else if cfg!(target_os = "macos") {
SolcDownloader::macosx(version).await
} else if cfg!(target_os = "windows") {
SolcDownloader::windows(version).await
} else {
unimplemented!()
}
}
/// Downloads the solc binary for Wasm is `wasm` is set, otherwise for
/// the target platform.
///
@@ -24,17 +42,6 @@ pub async fn download_solc(
version: impl Into<VersionOrRequirement>,
wasm: bool,
) -> anyhow::Result<PathBuf> {
let downloader = if wasm {
SolcDownloader::wasm(version).await
} else if cfg!(target_os = "linux") {
SolcDownloader::linux(version).await
} else if cfg!(target_os = "macos") {
SolcDownloader::macosx(version).await
} else if cfg!(target_os = "windows") {
SolcDownloader::windows(version).await
} else {
unimplemented!()
}?;
let downloader = downloader(version, wasm).await?;
get_or_download(cache_directory, &downloader).await
}
Executable
+102
View File
@@ -0,0 +1,102 @@
#!/bin/bash
# Revive Differential Tests - Quick Start Script
# This script clones the test repository, sets up the corpus file, and runs the tool
set -e # Exit on any error
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Configuration
TEST_REPO_URL="https://github.com/paritytech/resolc-compiler-tests"
TEST_REPO_DIR="resolc-compiler-tests"
CORPUS_FILE="./corpus.json"
WORKDIR="workdir"
# Optional positional argument: path to polkadot-sdk directory
POLKADOT_SDK_DIR="${1:-}"
# Binary paths (default to names in $PATH)
REVIVE_DEV_NODE_BIN="revive-dev-node"
ETH_RPC_BIN="eth-rpc"
SUBSTRATE_NODE_BIN="substrate-node"
echo -e "${GREEN}=== Revive Differential Tests Quick Start ===${NC}"
echo ""
# Check if test repo already exists
if [ -d "$TEST_REPO_DIR" ]; then
echo -e "${YELLOW}Test repository already exists. Pulling latest changes...${NC}"
cd "$TEST_REPO_DIR"
git pull
cd ..
else
echo -e "${GREEN}Cloning test repository...${NC}"
git clone "$TEST_REPO_URL"
fi
# If polkadot-sdk path is provided, verify and use binaries from there; build if needed
if [ -n "$POLKADOT_SDK_DIR" ]; then
if [ ! -d "$POLKADOT_SDK_DIR" ]; then
echo -e "${RED}Provided polkadot-sdk directory does not exist: $POLKADOT_SDK_DIR${NC}"
exit 1
fi
POLKADOT_SDK_DIR=$(realpath "$POLKADOT_SDK_DIR")
echo -e "${GREEN}Using polkadot-sdk at: $POLKADOT_SDK_DIR${NC}"
REVIVE_DEV_NODE_BIN="$POLKADOT_SDK_DIR/target/release/revive-dev-node"
ETH_RPC_BIN="$POLKADOT_SDK_DIR/target/release/eth-rpc"
SUBSTRATE_NODE_BIN="$POLKADOT_SDK_DIR/target/release/substrate-node"
if [ ! -x "$REVIVE_DEV_NODE_BIN" ] || [ ! -x "$ETH_RPC_BIN" ] || [ ! -x "$SUBSTRATE_NODE_BIN" ]; then
echo -e "${YELLOW}Required binaries not found in release target. Building...${NC}"
(cd "$POLKADOT_SDK_DIR" && cargo build --release --package staging-node-cli --package pallet-revive-eth-rpc --package revive-dev-node)
fi
for bin in "$REVIVE_DEV_NODE_BIN" "$ETH_RPC_BIN" "$SUBSTRATE_NODE_BIN"; do
if [ ! -x "$bin" ]; then
echo -e "${RED}Expected binary not found after build: $bin${NC}"
exit 1
fi
done
else
echo -e "${YELLOW}No polkadot-sdk path provided. Using binaries from $PATH.${NC}"
fi
# Create corpus file with absolute path resolved at runtime
echo -e "${GREEN}Creating corpus file...${NC}"
ABSOLUTE_PATH=$(realpath "$TEST_REPO_DIR/fixtures/solidity/")
cat > "$CORPUS_FILE" << EOF
{
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
"path": "$ABSOLUTE_PATH"
}
EOF
echo -e "${GREEN}Corpus file created: $CORPUS_FILE${NC}"
# Create workdir if it doesn't exist
mkdir -p "$WORKDIR"
echo -e "${GREEN}Starting differential tests...${NC}"
echo "This may take a while..."
echo ""
# Run the tool
RUST_LOG="error" cargo run --release -- \
--corpus "$CORPUS_FILE" \
--workdir "$WORKDIR" \
--number-of-nodes 5 \
--kitchensink "$SUBSTRATE_NODE_BIN" \
--revive-dev-node "$REVIVE_DEV_NODE_BIN" \
--eth_proxy "$ETH_RPC_BIN" \
> logs.log \
2> output.log
echo -e "${GREEN}=== Test run completed! ===${NC}"