Compare commits

...

29 Commits

Author SHA1 Message Date
pgherveou 1659164310 save before flight 2025-10-13 13:05:07 +02:00
pgherveou 0a68800856 nit 2025-10-08 18:26:43 +02:00
pgherveou 8303d789cd use 10^6 for gas filler 2025-10-08 15:15:08 +02:00
pgherveou 40bf44fe58 fix 2025-10-08 14:50:50 +02:00
pgherveou ba8ad03290 fix 2025-10-08 14:06:03 +02:00
pgherveou 3dd99f3ac8 Merge branch 'pg/fmt' into pg/ml-runner 2025-10-08 11:42:37 +00:00
pgherveou 6618463c68 fix 2025-10-08 11:40:08 +00:00
pgherveou dffb80ac0a fixes 2025-10-08 11:18:31 +02:00
pgherveou 43a1114337 custom rpc port 2025-10-08 11:10:46 +02:00
pgherveou 3a07ea042b fix 2025-10-08 10:45:49 +02:00
pgherveou 9e2aa972db fix 2025-10-08 10:33:59 +02:00
pgherveou 86f2173e8b nit 2025-10-08 10:14:22 +02:00
pgherveou 6e658aec49 Merge branch 'pg/fmt' into pg/ml-runner 2025-10-08 10:04:38 +02:00
pgherveou 1aba74ec3e fix 2025-10-08 10:03:00 +02:00
pgherveou 180bd64bc5 Merge branch 'pg/fmt' into pg/ml-runner 2025-10-08 10:01:36 +02:00
pgherveou 967cbac349 fix 2025-10-08 10:00:32 +02:00
pgherveou a8d84c8360 fix 2025-10-08 09:59:53 +02:00
pgherveou c83a755416 Merge branch 'main' into pg/fmt 2025-10-08 09:59:42 +02:00
pgherveou 0711216539 add fmt check 2025-10-08 09:57:28 +02:00
pgherveou b40c17c0af fixes 2025-10-08 09:52:13 +02:00
pgherveou 8ae994f9de fixes 2025-10-08 09:43:36 +02:00
pgherveou 3f3cbfa934 Merge branch 'pg/fmt' into pg/ml-runner 2025-10-08 09:28:45 +02:00
pgherveou c676114fe1 apply fmt 2025-10-08 09:27:11 +02:00
pgherveou 92885351ed use polkadot-sdk rustfmt 2025-10-08 09:26:24 +02:00
pgherveou e16f8ebf59 Merge branch 'pg/fmt' into pg/ml-runner 2025-10-08 09:19:21 +02:00
pgherveou d482808eb2 add rustfmt.toml 2025-10-08 07:18:17 +00:00
pgherveou 1f84ce6f61 fix lint 2025-10-08 06:28:57 +00:00
pgherveou 765569a8b6 fix 2025-10-08 08:22:26 +02:00
pgherveou 6e64f678ee ml-runner init 2025-10-07 16:10:43 +00:00
60 changed files with 12511 additions and 12206 deletions
-1
View File
@@ -13,4 +13,3 @@ resolc-compiler-tests
workdir workdir
!/schema.json !/schema.json
!/dev-genesis.json
+25
View File
@@ -0,0 +1,25 @@
# Basic
edition = "2024"
hard_tabs = true
max_width = 100
use_small_heuristics = "Max"
# Imports
imports_granularity = "Crate"
reorder_imports = true
# Consistency
newline_style = "Unix"
# Misc
chain_width = 80
spaces_around_ranges = false
binop_separator = "Back"
reorder_impl_items = false
match_arm_leading_pipes = "Preserve"
match_arm_blocks = false
match_block_trailing_comma = true
trailing_comma = "Vertical"
trailing_semicolon = false
use_field_init_shorthand = true
# Format comments
comment_width = 100
wrap_comments = true
Generated
+21
View File
@@ -4526,6 +4526,27 @@ dependencies = [
"windows-sys 0.59.0", "windows-sys 0.59.0",
] ]
[[package]]
name = "ml-test-runner"
version = "0.1.0"
dependencies = [
"alloy",
"anyhow",
"clap",
"revive-dt-common",
"revive-dt-compiler",
"revive-dt-config",
"revive-dt-core",
"revive-dt-format",
"revive-dt-node",
"revive-dt-node-interaction",
"revive-dt-report",
"temp-dir",
"tokio",
"tracing",
"tracing-subscriber",
]
[[package]] [[package]]
name = "moka" name = "moka"
version = "0.12.10" version = "0.12.10"
+1 -1
View File
@@ -1,7 +1,7 @@
.PHONY: format clippy test machete .PHONY: format clippy test machete
format: format:
cargo fmt --all -- --check cargo +nightly fmt --all -- --check
clippy: clippy:
cargo clippy --all-features --workspace -- --deny warnings cargo clippy --all-features --workspace -- --deny warnings
+31 -32
View File
@@ -1,49 +1,48 @@
//! This module implements a cached file system allowing for results to be stored in-memory rather //! This module implements a cached file system allowing for results to be stored in-memory rather
//! rather being queried from the file system again. //! rather being queried from the file system again.
use std::fs; use std::{
use std::io::{Error, Result}; fs,
use std::path::{Path, PathBuf}; io::{Error, Result},
path::{Path, PathBuf},
};
use moka::sync::Cache; use moka::sync::Cache;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
pub fn read(path: impl AsRef<Path>) -> Result<Vec<u8>> { pub fn read(path: impl AsRef<Path>) -> Result<Vec<u8>> {
static READ_CACHE: Lazy<Cache<PathBuf, Vec<u8>>> = Lazy::new(|| Cache::new(10_000)); static READ_CACHE: Lazy<Cache<PathBuf, Vec<u8>>> = Lazy::new(|| Cache::new(10_000));
let path = path.as_ref().canonicalize()?; let path = path.as_ref().canonicalize()?;
match READ_CACHE.get(path.as_path()) { match READ_CACHE.get(path.as_path()) {
Some(content) => Ok(content), Some(content) => Ok(content),
None => { None => {
let content = fs::read(path.as_path())?; let content = fs::read(path.as_path())?;
READ_CACHE.insert(path, content.clone()); READ_CACHE.insert(path, content.clone());
Ok(content) Ok(content)
} },
} }
} }
pub fn read_to_string(path: impl AsRef<Path>) -> Result<String> { pub fn read_to_string(path: impl AsRef<Path>) -> Result<String> {
let content = read(path)?; let content = read(path)?;
String::from_utf8(content).map_err(|_| { String::from_utf8(content).map_err(|_| {
Error::new( Error::new(std::io::ErrorKind::InvalidData, "The contents of the file are not valid UTF8")
std::io::ErrorKind::InvalidData, })
"The contents of the file are not valid UTF8",
)
})
} }
pub fn read_dir(path: impl AsRef<Path>) -> Result<Box<dyn Iterator<Item = Result<PathBuf>>>> { pub fn read_dir(path: impl AsRef<Path>) -> Result<Box<dyn Iterator<Item = Result<PathBuf>>>> {
static READ_DIR_CACHE: Lazy<Cache<PathBuf, Vec<PathBuf>>> = Lazy::new(|| Cache::new(10_000)); static READ_DIR_CACHE: Lazy<Cache<PathBuf, Vec<PathBuf>>> = Lazy::new(|| Cache::new(10_000));
let path = path.as_ref().canonicalize()?; let path = path.as_ref().canonicalize()?;
match READ_DIR_CACHE.get(path.as_path()) { match READ_DIR_CACHE.get(path.as_path()) {
Some(entries) => Ok(Box::new(entries.into_iter().map(Ok)) as Box<_>), Some(entries) => Ok(Box::new(entries.into_iter().map(Ok)) as Box<_>),
None => { None => {
let entries = fs::read_dir(path.as_path())? let entries = fs::read_dir(path.as_path())?
.flat_map(|maybe_entry| maybe_entry.map(|entry| entry.path())) .flat_map(|maybe_entry| maybe_entry.map(|entry| entry.path()))
.collect(); .collect();
READ_DIR_CACHE.insert(path.clone(), entries); READ_DIR_CACHE.insert(path.clone(), entries);
Ok(read_dir(path).unwrap()) Ok(read_dir(path).unwrap())
} },
} }
} }
+18 -21
View File
@@ -1,6 +1,6 @@
use std::{ use std::{
fs::{read_dir, remove_dir_all, remove_file}, fs::{read_dir, remove_dir_all, remove_file},
path::Path, path::Path,
}; };
use anyhow::{Context, Result}; use anyhow::{Context, Result};
@@ -8,24 +8,21 @@ use anyhow::{Context, Result};
/// This method clears the passed directory of all of the files and directories contained within /// This method clears the passed directory of all of the files and directories contained within
/// without deleting the directory. /// without deleting the directory.
pub fn clear_directory(path: impl AsRef<Path>) -> Result<()> { pub fn clear_directory(path: impl AsRef<Path>) -> Result<()> {
for entry in read_dir(path.as_ref()) for entry in read_dir(path.as_ref())
.with_context(|| format!("Failed to read directory: {}", path.as_ref().display()))? .with_context(|| format!("Failed to read directory: {}", path.as_ref().display()))?
{ {
let entry = entry.with_context(|| { let entry = entry.with_context(|| {
format!( format!("Failed to read an entry in directory: {}", path.as_ref().display())
"Failed to read an entry in directory: {}", })?;
path.as_ref().display() let entry_path = entry.path();
)
})?;
let entry_path = entry.path();
if entry_path.is_file() { if entry_path.is_file() {
remove_file(&entry_path) remove_file(&entry_path)
.with_context(|| format!("Failed to remove file: {}", entry_path.display()))? .with_context(|| format!("Failed to remove file: {}", entry_path.display()))?
} else { } else {
remove_dir_all(&entry_path) remove_dir_all(&entry_path)
.with_context(|| format!("Failed to remove directory: {}", entry_path.display()))? .with_context(|| format!("Failed to remove directory: {}", entry_path.display()))?
} }
} }
Ok(()) Ok(())
} }
+38 -43
View File
@@ -1,5 +1,4 @@
use std::ops::ControlFlow; use std::{ops::ControlFlow, time::Duration};
use std::time::Duration;
use anyhow::{Context as _, Result, anyhow}; use anyhow::{Context as _, Result, anyhow};
@@ -18,55 +17,51 @@ const EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION: Duration = Duration::from_secs(60);
/// [`Break`]: ControlFlow::Break /// [`Break`]: ControlFlow::Break
/// [`Continue`]: ControlFlow::Continue /// [`Continue`]: ControlFlow::Continue
pub async fn poll<F, O>( pub async fn poll<F, O>(
polling_duration: Duration, polling_duration: Duration,
polling_wait_behavior: PollingWaitBehavior, polling_wait_behavior: PollingWaitBehavior,
mut future: impl FnMut() -> F, mut future: impl FnMut() -> F,
) -> Result<O> ) -> Result<O>
where where
F: Future<Output = Result<ControlFlow<O, ()>>>, F: Future<Output = Result<ControlFlow<O, ()>>>,
{ {
let mut retries = 0; let mut retries = 0;
let mut total_wait_duration = Duration::ZERO; let mut total_wait_duration = Duration::ZERO;
let max_allowed_wait_duration = polling_duration; let max_allowed_wait_duration = polling_duration;
loop { loop {
if total_wait_duration >= max_allowed_wait_duration { if total_wait_duration >= max_allowed_wait_duration {
break Err(anyhow!( break Err(anyhow!(
"Polling failed after {} retries and a total of {:?} of wait time", "Polling failed after {} retries and a total of {:?} of wait time",
retries, retries,
total_wait_duration total_wait_duration
)); ));
} }
match future() match future().await.context("Polled future returned an error during polling loop")? {
.await ControlFlow::Continue(()) => {
.context("Polled future returned an error during polling loop")? let next_wait_duration = match polling_wait_behavior {
{ PollingWaitBehavior::Constant(duration) => duration,
ControlFlow::Continue(()) => { PollingWaitBehavior::ExponentialBackoff =>
let next_wait_duration = match polling_wait_behavior { Duration::from_secs(2u64.pow(retries))
PollingWaitBehavior::Constant(duration) => duration, .min(EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION),
PollingWaitBehavior::ExponentialBackoff => { };
Duration::from_secs(2u64.pow(retries)) let next_wait_duration =
.min(EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION) next_wait_duration.min(max_allowed_wait_duration - total_wait_duration);
} total_wait_duration += next_wait_duration;
}; retries += 1;
let next_wait_duration =
next_wait_duration.min(max_allowed_wait_duration - total_wait_duration);
total_wait_duration += next_wait_duration;
retries += 1;
tokio::time::sleep(next_wait_duration).await; tokio::time::sleep(next_wait_duration).await;
} },
ControlFlow::Break(output) => { ControlFlow::Break(output) => {
break Ok(output); break Ok(output);
} },
} }
} }
} }
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
pub enum PollingWaitBehavior { pub enum PollingWaitBehavior {
Constant(Duration), Constant(Duration),
#[default] #[default]
ExponentialBackoff, ExponentialBackoff,
} }
+11 -11
View File
@@ -1,21 +1,21 @@
/// An iterator that could be either of two iterators. /// An iterator that could be either of two iterators.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub enum EitherIter<A, B> { pub enum EitherIter<A, B> {
A(A), A(A),
B(B), B(B),
} }
impl<A, B, T> Iterator for EitherIter<A, B> impl<A, B, T> Iterator for EitherIter<A, B>
where where
A: Iterator<Item = T>, A: Iterator<Item = T>,
B: Iterator<Item = T>, B: Iterator<Item = T>,
{ {
type Item = T; type Item = T;
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
match self { match self {
EitherIter::A(iter) => iter.next(), EitherIter::A(iter) => iter.next(),
EitherIter::B(iter) => iter.next(), EitherIter::B(iter) => iter.next(),
} }
} }
} }
@@ -1,91 +1,90 @@
use std::{ use std::{
borrow::Cow, borrow::Cow,
collections::HashSet, collections::HashSet,
path::{Path, PathBuf}, path::{Path, PathBuf},
}; };
/// An iterator that finds files of a certain extension in the provided directory. You can think of /// An iterator that finds files of a certain extension in the provided directory. You can think of
/// this a glob pattern similar to: `${path}/**/*.md` /// this a glob pattern similar to: `${path}/**/*.md`
pub struct FilesWithExtensionIterator { pub struct FilesWithExtensionIterator {
/// The set of allowed extensions that that match the requirement and that should be returned /// The set of allowed extensions that that match the requirement and that should be returned
/// when found. /// when found.
allowed_extensions: HashSet<Cow<'static, str>>, allowed_extensions: HashSet<Cow<'static, str>>,
/// The set of directories to visit next. This iterator does BFS and so these directories will /// The set of directories to visit next. This iterator does BFS and so these directories will
/// only be visited if we can't find any files in our state. /// only be visited if we can't find any files in our state.
directories_to_search: Vec<PathBuf>, directories_to_search: Vec<PathBuf>,
/// The set of files matching the allowed extensions that were found. If there are entries in /// The set of files matching the allowed extensions that were found. If there are entries in
/// this vector then they will be returned when the [`Iterator::next`] method is called. If not /// this vector then they will be returned when the [`Iterator::next`] method is called. If not
/// then we visit one of the next directories to visit. /// then we visit one of the next directories to visit.
files_matching_allowed_extensions: Vec<PathBuf>, files_matching_allowed_extensions: Vec<PathBuf>,
/// This option controls if the the cached file system should be used or not. This could be /// This option controls if the the cached file system should be used or not. This could be
/// better for certain cases where the entries in the directories do not change and therefore /// better for certain cases where the entries in the directories do not change and therefore
/// caching can be used. /// caching can be used.
use_cached_fs: bool, use_cached_fs: bool,
} }
impl FilesWithExtensionIterator { impl FilesWithExtensionIterator {
pub fn new(root_directory: impl AsRef<Path>) -> Self { pub fn new(root_directory: impl AsRef<Path>) -> Self {
Self { Self {
allowed_extensions: Default::default(), allowed_extensions: Default::default(),
directories_to_search: vec![root_directory.as_ref().to_path_buf()], directories_to_search: vec![root_directory.as_ref().to_path_buf()],
files_matching_allowed_extensions: Default::default(), files_matching_allowed_extensions: Default::default(),
use_cached_fs: Default::default(), use_cached_fs: Default::default(),
} }
} }
pub fn with_allowed_extension( pub fn with_allowed_extension(
mut self, mut self,
allowed_extension: impl Into<Cow<'static, str>>, allowed_extension: impl Into<Cow<'static, str>>,
) -> Self { ) -> Self {
self.allowed_extensions.insert(allowed_extension.into()); self.allowed_extensions.insert(allowed_extension.into());
self self
} }
pub fn with_use_cached_fs(mut self, use_cached_fs: bool) -> Self { pub fn with_use_cached_fs(mut self, use_cached_fs: bool) -> Self {
self.use_cached_fs = use_cached_fs; self.use_cached_fs = use_cached_fs;
self self
} }
} }
impl Iterator for FilesWithExtensionIterator { impl Iterator for FilesWithExtensionIterator {
type Item = PathBuf; type Item = PathBuf;
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
if let Some(file_path) = self.files_matching_allowed_extensions.pop() { if let Some(file_path) = self.files_matching_allowed_extensions.pop() {
return Some(file_path); return Some(file_path);
}; };
let directory_to_search = self.directories_to_search.pop()?; let directory_to_search = self.directories_to_search.pop()?;
let iterator = if self.use_cached_fs { let iterator = if self.use_cached_fs {
let Ok(dir_entries) = crate::cached_fs::read_dir(directory_to_search.as_path()) else { let Ok(dir_entries) = crate::cached_fs::read_dir(directory_to_search.as_path()) else {
return self.next(); return self.next();
}; };
Box::new(dir_entries) as Box<dyn Iterator<Item = std::io::Result<PathBuf>>> Box::new(dir_entries) as Box<dyn Iterator<Item = std::io::Result<PathBuf>>>
} else { } else {
let Ok(dir_entries) = std::fs::read_dir(directory_to_search) else { let Ok(dir_entries) = std::fs::read_dir(directory_to_search) else {
return self.next(); return self.next();
}; };
Box::new(dir_entries.map(|maybe_entry| maybe_entry.map(|entry| entry.path()))) as Box<_> Box::new(dir_entries.map(|maybe_entry| maybe_entry.map(|entry| entry.path()))) as Box<_>
}; };
for entry_path in iterator.flatten() { for entry_path in iterator.flatten() {
if entry_path.is_dir() { if entry_path.is_dir() {
self.directories_to_search.push(entry_path) self.directories_to_search.push(entry_path)
} else if entry_path.is_file() } else if entry_path.is_file() &&
&& entry_path.extension().is_some_and(|ext| { entry_path.extension().is_some_and(|ext| {
self.allowed_extensions self.allowed_extensions
.iter() .iter()
.any(|allowed| ext.eq_ignore_ascii_case(allowed.as_ref())) .any(|allowed| ext.eq_ignore_ascii_case(allowed.as_ref()))
}) }) {
{ self.files_matching_allowed_extensions.push(entry_path)
self.files_matching_allowed_extensions.push(entry_path) }
} }
}
self.next() self.next()
} }
} }
+19 -19
View File
@@ -1,23 +1,23 @@
#[macro_export] #[macro_export]
macro_rules! impl_for_wrapper { macro_rules! impl_for_wrapper {
(Display, $ident: ident) => { (Display, $ident: ident) => {
#[automatically_derived] #[automatically_derived]
impl std::fmt::Display for $ident { impl std::fmt::Display for $ident {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(&self.0, f) std::fmt::Display::fmt(&self.0, f)
} }
} }
}; };
(FromStr, $ident: ident) => { (FromStr, $ident: ident) => {
#[automatically_derived] #[automatically_derived]
impl std::str::FromStr for $ident { impl std::str::FromStr for $ident {
type Err = anyhow::Error; type Err = anyhow::Error;
fn from_str(s: &str) -> anyhow::Result<Self> { fn from_str(s: &str) -> anyhow::Result<Self> {
s.parse().map(Self).map_err(Into::into) s.parse().map(Self).map_err(Into::into)
} }
} }
}; };
} }
/// Defines wrappers around types. /// Defines wrappers around types.
@@ -135,6 +135,6 @@ macro_rules! define_wrapper_type {
}; };
} }
/// Technically not needed but this allows for the macro to be found in the `macros` module of the /// Technically not needed but this allows for the macro to be found in the `macros` module of
/// crate in addition to being found in the root of the crate. /// the crate in addition to being found in the root of the crate.
pub use {define_wrapper_type, impl_for_wrapper}; pub use {define_wrapper_type, impl_for_wrapper};
+100 -100
View File
@@ -7,128 +7,128 @@ use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
/// could be thought of like the target triple from Rust and LLVM where it specifies the platform /// could be thought of like the target triple from Rust and LLVM where it specifies the platform
/// completely starting with the node, the vm, and finally the compiler used for this combination. /// completely starting with the node, the vm, and finally the compiler used for this combination.
#[derive( #[derive(
Clone, Clone,
Copy, Copy,
Debug, Debug,
PartialEq, PartialEq,
Eq, Eq,
PartialOrd, PartialOrd,
Ord, Ord,
Hash, Hash,
Serialize, Serialize,
Deserialize, Deserialize,
ValueEnum, ValueEnum,
EnumString, EnumString,
Display, Display,
AsRefStr, AsRefStr,
IntoStaticStr, IntoStaticStr,
JsonSchema, JsonSchema,
)] )]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
#[strum(serialize_all = "kebab-case")] #[strum(serialize_all = "kebab-case")]
pub enum PlatformIdentifier { pub enum PlatformIdentifier {
/// The Go-ethereum reference full node EVM implementation with the solc compiler. /// The Go-ethereum reference full node EVM implementation with the solc compiler.
GethEvmSolc, GethEvmSolc,
/// The Lighthouse Go-ethereum reference full node EVM implementation with the solc compiler. /// The Lighthouse Go-ethereum reference full node EVM implementation with the solc compiler.
LighthouseGethEvmSolc, LighthouseGethEvmSolc,
/// The kitchensink node with the PolkaVM backend with the resolc compiler. /// The kitchensink node with the PolkaVM backend with the resolc compiler.
KitchensinkPolkavmResolc, KitchensinkPolkavmResolc,
/// The kitchensink node with the REVM backend with the solc compiler. /// The kitchensink node with the REVM backend with the solc compiler.
KitchensinkRevmSolc, KitchensinkRevmSolc,
/// The revive dev node with the PolkaVM backend with the resolc compiler. /// The revive dev node with the PolkaVM backend with the resolc compiler.
ReviveDevNodePolkavmResolc, ReviveDevNodePolkavmResolc,
/// The revive dev node with the REVM backend with the solc compiler. /// The revive dev node with the REVM backend with the solc compiler.
ReviveDevNodeRevmSolc, ReviveDevNodeRevmSolc,
/// A zombienet based Substrate/Polkadot node with the PolkaVM backend with the resolc compiler. /// A zombienet based Substrate/Polkadot node with the PolkaVM backend with the resolc compiler.
ZombienetPolkavmResolc, ZombienetPolkavmResolc,
/// A zombienet based Substrate/Polkadot node with the REVM backend with the solc compiler. /// A zombienet based Substrate/Polkadot node with the REVM backend with the solc compiler.
ZombienetRevmSolc, ZombienetRevmSolc,
} }
/// An enum of the platform identifiers of all of the platforms supported by this framework. /// An enum of the platform identifiers of all of the platforms supported by this framework.
#[derive( #[derive(
Clone, Clone,
Copy, Copy,
Debug, Debug,
PartialEq, PartialEq,
Eq, Eq,
PartialOrd, PartialOrd,
Ord, Ord,
Hash, Hash,
Serialize, Serialize,
Deserialize, Deserialize,
ValueEnum, ValueEnum,
EnumString, EnumString,
Display, Display,
AsRefStr, AsRefStr,
IntoStaticStr, IntoStaticStr,
JsonSchema, JsonSchema,
)] )]
pub enum CompilerIdentifier { pub enum CompilerIdentifier {
/// The solc compiler. /// The solc compiler.
Solc, Solc,
/// The resolc compiler. /// The resolc compiler.
Resolc, Resolc,
} }
/// An enum representing the identifiers of the supported nodes. /// An enum representing the identifiers of the supported nodes.
#[derive( #[derive(
Clone, Clone,
Copy, Copy,
Debug, Debug,
PartialEq, PartialEq,
Eq, Eq,
PartialOrd, PartialOrd,
Ord, Ord,
Hash, Hash,
Serialize, Serialize,
Deserialize, Deserialize,
ValueEnum, ValueEnum,
EnumString, EnumString,
Display, Display,
AsRefStr, AsRefStr,
IntoStaticStr, IntoStaticStr,
JsonSchema, JsonSchema,
)] )]
pub enum NodeIdentifier { pub enum NodeIdentifier {
/// The go-ethereum node implementation. /// The go-ethereum node implementation.
Geth, Geth,
/// The go-ethereum node implementation. /// The go-ethereum node implementation.
LighthouseGeth, LighthouseGeth,
/// The Kitchensink node implementation. /// The Kitchensink node implementation.
Kitchensink, Kitchensink,
/// The revive dev node implementation. /// The revive dev node implementation.
ReviveDevNode, ReviveDevNode,
/// A zombienet spawned nodes /// A zombienet spawned nodes
Zombienet, Zombienet,
} }
/// An enum representing the identifiers of the supported VMs. /// An enum representing the identifiers of the supported VMs.
#[derive( #[derive(
Clone, Clone,
Copy, Copy,
Debug, Debug,
PartialEq, PartialEq,
Eq, Eq,
PartialOrd, PartialOrd,
Ord, Ord,
Hash, Hash,
Serialize, Serialize,
Deserialize, Deserialize,
ValueEnum, ValueEnum,
EnumString, EnumString,
Display, Display,
AsRefStr, AsRefStr,
IntoStaticStr, IntoStaticStr,
JsonSchema, JsonSchema,
)] )]
#[serde(rename_all = "lowercase")] #[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")] #[strum(serialize_all = "lowercase")]
pub enum VmIdentifier { pub enum VmIdentifier {
/// The ethereum virtual machine. /// The ethereum virtual machine.
Evm, Evm,
/// The EraVM virtual machine. /// The EraVM virtual machine.
EraVM, EraVM,
/// Polkadot's PolaVM Risc-v based virtual machine. /// Polkadot's PolaVM Risc-v based virtual machine.
PolkaVM, PolkaVM,
} }
+118 -122
View File
@@ -1,9 +1,7 @@
use crate::types::VersionOrRequirement; use crate::types::VersionOrRequirement;
use semver::Version; use semver::Version;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::fmt::Display; use std::{fmt::Display, str::FromStr, sync::LazyLock};
use std::str::FromStr;
use std::sync::LazyLock;
/// This represents a mode that a given test should be run with, if possible. /// This represents a mode that a given test should be run with, if possible.
/// ///
@@ -13,161 +11,159 @@ use std::sync::LazyLock;
/// Use [`ParsedMode::to_test_modes()`] to do this. /// Use [`ParsedMode::to_test_modes()`] to do this.
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct Mode { pub struct Mode {
pub pipeline: ModePipeline, pub pipeline: ModePipeline,
pub optimize_setting: ModeOptimizerSetting, pub optimize_setting: ModeOptimizerSetting,
pub version: Option<semver::VersionReq>, pub version: Option<semver::VersionReq>,
} }
impl Display for Mode { impl Display for Mode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.pipeline.fmt(f)?; self.pipeline.fmt(f)?;
f.write_str(" ")?; f.write_str(" ")?;
self.optimize_setting.fmt(f)?; self.optimize_setting.fmt(f)?;
if let Some(version) = &self.version { if let Some(version) = &self.version {
f.write_str(" ")?; f.write_str(" ")?;
version.fmt(f)?; version.fmt(f)?;
} }
Ok(()) Ok(())
} }
} }
impl Mode { impl Mode {
/// Return all of the available mode combinations. /// Return all of the available mode combinations.
pub fn all() -> impl Iterator<Item = &'static Mode> { pub fn all() -> impl Iterator<Item = &'static Mode> {
static ALL_MODES: LazyLock<Vec<Mode>> = LazyLock::new(|| { static ALL_MODES: LazyLock<Vec<Mode>> = LazyLock::new(|| {
ModePipeline::test_cases() ModePipeline::test_cases()
.flat_map(|pipeline| { .flat_map(|pipeline| {
ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode { ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode {
pipeline, pipeline,
optimize_setting, optimize_setting,
version: None, version: None,
}) })
}) })
.collect::<Vec<_>>() .collect::<Vec<_>>()
}); });
ALL_MODES.iter() ALL_MODES.iter()
} }
/// Resolves the [`Mode`]'s solidity version requirement into a [`VersionOrRequirement`] if /// Resolves the [`Mode`]'s solidity version requirement into a [`VersionOrRequirement`] if
/// the requirement is present on the object. Otherwise, the passed default version is used. /// the requirement is present on the object. Otherwise, the passed default version is used.
pub fn compiler_version_to_use(&self, default: Version) -> VersionOrRequirement { pub fn compiler_version_to_use(&self, default: Version) -> VersionOrRequirement {
match self.version { match self.version {
Some(ref requirement) => requirement.clone().into(), Some(ref requirement) => requirement.clone().into(),
None => default.into(), None => default.into(),
} }
} }
} }
/// What do we want the compiler to do? /// What do we want the compiler to do?
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum ModePipeline { pub enum ModePipeline {
/// Compile Solidity code via Yul IR /// Compile Solidity code via Yul IR
ViaYulIR, ViaYulIR,
/// Compile Solidity direct to assembly /// Compile Solidity direct to assembly
ViaEVMAssembly, ViaEVMAssembly,
} }
impl FromStr for ModePipeline { impl FromStr for ModePipeline {
type Err = anyhow::Error; type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
match s { match s {
// via Yul IR // via Yul IR
"Y" => Ok(ModePipeline::ViaYulIR), "Y" => Ok(ModePipeline::ViaYulIR),
// Don't go via Yul IR // Don't go via Yul IR
"E" => Ok(ModePipeline::ViaEVMAssembly), "E" => Ok(ModePipeline::ViaEVMAssembly),
// Anything else that we see isn't a mode at all // Anything else that we see isn't a mode at all
_ => Err(anyhow::anyhow!( _ => Err(anyhow::anyhow!("Unsupported pipeline '{s}': expected 'Y' or 'E'")),
"Unsupported pipeline '{s}': expected 'Y' or 'E'" }
)), }
}
}
} }
impl Display for ModePipeline { impl Display for ModePipeline {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self { match self {
ModePipeline::ViaYulIR => f.write_str("Y"), ModePipeline::ViaYulIR => f.write_str("Y"),
ModePipeline::ViaEVMAssembly => f.write_str("E"), ModePipeline::ViaEVMAssembly => f.write_str("E"),
} }
} }
} }
impl ModePipeline { impl ModePipeline {
/// Should we go via Yul IR? /// Should we go via Yul IR?
pub fn via_yul_ir(&self) -> bool { pub fn via_yul_ir(&self) -> bool {
matches!(self, ModePipeline::ViaYulIR) matches!(self, ModePipeline::ViaYulIR)
} }
/// An iterator over the available pipelines that we'd like to test, /// An iterator over the available pipelines that we'd like to test,
/// when an explicit pipeline was not specified. /// when an explicit pipeline was not specified.
pub fn test_cases() -> impl Iterator<Item = ModePipeline> + Clone { pub fn test_cases() -> impl Iterator<Item = ModePipeline> + Clone {
[ModePipeline::ViaYulIR, ModePipeline::ViaEVMAssembly].into_iter() [ModePipeline::ViaYulIR, ModePipeline::ViaEVMAssembly].into_iter()
} }
} }
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum ModeOptimizerSetting { pub enum ModeOptimizerSetting {
/// 0 / -: Don't apply any optimizations /// 0 / -: Don't apply any optimizations
M0, M0,
/// 1: Apply less than default optimizations /// 1: Apply less than default optimizations
M1, M1,
/// 2: Apply the default optimizations /// 2: Apply the default optimizations
M2, M2,
/// 3 / +: Apply aggressive optimizations /// 3 / +: Apply aggressive optimizations
M3, M3,
/// s: Optimize for size /// s: Optimize for size
Ms, Ms,
/// z: Aggressively optimize for size /// z: Aggressively optimize for size
Mz, Mz,
} }
impl FromStr for ModeOptimizerSetting { impl FromStr for ModeOptimizerSetting {
type Err = anyhow::Error; type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
match s { match s {
"M0" => Ok(ModeOptimizerSetting::M0), "M0" => Ok(ModeOptimizerSetting::M0),
"M1" => Ok(ModeOptimizerSetting::M1), "M1" => Ok(ModeOptimizerSetting::M1),
"M2" => Ok(ModeOptimizerSetting::M2), "M2" => Ok(ModeOptimizerSetting::M2),
"M3" => Ok(ModeOptimizerSetting::M3), "M3" => Ok(ModeOptimizerSetting::M3),
"Ms" => Ok(ModeOptimizerSetting::Ms), "Ms" => Ok(ModeOptimizerSetting::Ms),
"Mz" => Ok(ModeOptimizerSetting::Mz), "Mz" => Ok(ModeOptimizerSetting::Mz),
_ => Err(anyhow::anyhow!( _ => Err(anyhow::anyhow!(
"Unsupported optimizer setting '{s}': expected 'M0', 'M1', 'M2', 'M3', 'Ms' or 'Mz'" "Unsupported optimizer setting '{s}': expected 'M0', 'M1', 'M2', 'M3', 'Ms' or 'Mz'"
)), )),
} }
} }
} }
impl Display for ModeOptimizerSetting { impl Display for ModeOptimizerSetting {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self { match self {
ModeOptimizerSetting::M0 => f.write_str("M0"), ModeOptimizerSetting::M0 => f.write_str("M0"),
ModeOptimizerSetting::M1 => f.write_str("M1"), ModeOptimizerSetting::M1 => f.write_str("M1"),
ModeOptimizerSetting::M2 => f.write_str("M2"), ModeOptimizerSetting::M2 => f.write_str("M2"),
ModeOptimizerSetting::M3 => f.write_str("M3"), ModeOptimizerSetting::M3 => f.write_str("M3"),
ModeOptimizerSetting::Ms => f.write_str("Ms"), ModeOptimizerSetting::Ms => f.write_str("Ms"),
ModeOptimizerSetting::Mz => f.write_str("Mz"), ModeOptimizerSetting::Mz => f.write_str("Mz"),
} }
} }
} }
impl ModeOptimizerSetting { impl ModeOptimizerSetting {
/// An iterator over the available optimizer settings that we'd like to test, /// An iterator over the available optimizer settings that we'd like to test,
/// when an explicit optimizer setting was not specified. /// when an explicit optimizer setting was not specified.
pub fn test_cases() -> impl Iterator<Item = ModeOptimizerSetting> + Clone { pub fn test_cases() -> impl Iterator<Item = ModeOptimizerSetting> + Clone {
[ [
// No optimizations: // No optimizations:
ModeOptimizerSetting::M0, ModeOptimizerSetting::M0,
// Aggressive optimizations: // Aggressive optimizations:
ModeOptimizerSetting::M3, ModeOptimizerSetting::M3,
] ]
.into_iter() .into_iter()
} }
/// Are any optimizations enabled? /// Are any optimizations enabled?
pub fn optimizations_enabled(&self) -> bool { pub fn optimizations_enabled(&self) -> bool {
!matches!(self, ModeOptimizerSetting::M0) !matches!(self, ModeOptimizerSetting::M0)
} }
} }
@@ -1,36 +1,32 @@
use alloy::primitives::U256; use alloy::{primitives::U256, signers::local::PrivateKeySigner};
use alloy::signers::local::PrivateKeySigner;
use anyhow::{Context, Result, bail}; use anyhow::{Context, Result, bail};
/// This is a sequential private key allocator. When instantiated, it allocated private keys in /// This is a sequential private key allocator. When instantiated, it allocated private keys in
/// sequentially and in order until the maximum private key specified is reached. /// sequentially and in order until the maximum private key specified is reached.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct PrivateKeyAllocator { pub struct PrivateKeyAllocator {
/// The next private key to be returned by the allocator when requested. /// The next private key to be returned by the allocator when requested.
next_private_key: U256, next_private_key: U256,
/// The highest private key (exclusive) that can be returned by this allocator. /// The highest private key (exclusive) that can be returned by this allocator.
highest_private_key_inclusive: U256, highest_private_key_inclusive: U256,
} }
impl PrivateKeyAllocator { impl PrivateKeyAllocator {
/// Creates a new instance of the private key allocator. /// Creates a new instance of the private key allocator.
pub fn new(highest_private_key_inclusive: U256) -> Self { pub fn new(highest_private_key_inclusive: U256) -> Self {
Self { Self { next_private_key: U256::ONE, highest_private_key_inclusive }
next_private_key: U256::ONE, }
highest_private_key_inclusive,
}
}
/// Allocates a new private key and errors out if the maximum private key has been reached. /// Allocates a new private key and errors out if the maximum private key has been reached.
pub fn allocate(&mut self) -> Result<PrivateKeySigner> { pub fn allocate(&mut self) -> Result<PrivateKeySigner> {
if self.next_private_key > self.highest_private_key_inclusive { if self.next_private_key > self.highest_private_key_inclusive {
bail!("Attempted to allocate a private key but failed since all have been allocated"); bail!("Attempted to allocate a private key but failed since all have been allocated");
}; };
let private_key = let private_key =
PrivateKeySigner::from_slice(self.next_private_key.to_be_bytes::<32>().as_slice()) PrivateKeySigner::from_slice(self.next_private_key.to_be_bytes::<32>().as_slice())
.context("Failed to convert the private key digits into a private key")?; .context("Failed to convert the private key digits into a private key")?;
self.next_private_key += U256::ONE; self.next_private_key += U256::ONE;
Ok(private_key) Ok(private_key)
} }
} }
+12 -15
View File
@@ -1,24 +1,21 @@
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
pub struct RoundRobinPool<T> { pub struct RoundRobinPool<T> {
next_index: AtomicUsize, next_index: AtomicUsize,
items: Vec<T>, items: Vec<T>,
} }
impl<T> RoundRobinPool<T> { impl<T> RoundRobinPool<T> {
pub fn new(items: Vec<T>) -> Self { pub fn new(items: Vec<T>) -> Self {
Self { Self { next_index: Default::default(), items }
next_index: Default::default(), }
items,
}
}
pub fn round_robin(&self) -> &T { pub fn round_robin(&self) -> &T {
let current = self.next_index.fetch_add(1, Ordering::SeqCst) % self.items.len(); let current = self.next_index.fetch_add(1, Ordering::SeqCst) % self.items.len();
self.items.get(current).unwrap() self.items.get(current).unwrap()
} }
pub fn iter(&self) -> impl Iterator<Item = &T> { pub fn iter(&self) -> impl Iterator<Item = &T> {
self.items.iter() self.items.iter()
} }
} }
@@ -2,40 +2,40 @@ use semver::{Version, VersionReq};
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub enum VersionOrRequirement { pub enum VersionOrRequirement {
Version(Version), Version(Version),
Requirement(VersionReq), Requirement(VersionReq),
} }
impl From<Version> for VersionOrRequirement { impl From<Version> for VersionOrRequirement {
fn from(value: Version) -> Self { fn from(value: Version) -> Self {
Self::Version(value) Self::Version(value)
} }
} }
impl From<VersionReq> for VersionOrRequirement { impl From<VersionReq> for VersionOrRequirement {
fn from(value: VersionReq) -> Self { fn from(value: VersionReq) -> Self {
Self::Requirement(value) Self::Requirement(value)
} }
} }
impl TryFrom<VersionOrRequirement> for Version { impl TryFrom<VersionOrRequirement> for Version {
type Error = anyhow::Error; type Error = anyhow::Error;
fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> { fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> {
let VersionOrRequirement::Version(version) = value else { let VersionOrRequirement::Version(version) = value else {
anyhow::bail!("Version or requirement was not a version"); anyhow::bail!("Version or requirement was not a version");
}; };
Ok(version) Ok(version)
} }
} }
impl TryFrom<VersionOrRequirement> for VersionReq { impl TryFrom<VersionOrRequirement> for VersionReq {
type Error = anyhow::Error; type Error = anyhow::Error;
fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> { fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> {
let VersionOrRequirement::Requirement(requirement) = value else { let VersionOrRequirement::Requirement(requirement) = value else {
anyhow::bail!("Version or requirement was not a requirement"); anyhow::bail!("Version or requirement was not a requirement");
}; };
Ok(requirement) Ok(requirement)
} }
} }
+111 -112
View File
@@ -4,14 +4,13 @@
//! - Polkadot revive Wasm compiler //! - Polkadot revive Wasm compiler
use std::{ use std::{
collections::HashMap, collections::HashMap,
hash::Hash, hash::Hash,
path::{Path, PathBuf}, path::{Path, PathBuf},
pin::Pin, pin::Pin,
}; };
use alloy::json_abi::JsonAbi; use alloy::{json_abi::JsonAbi, primitives::Address};
use alloy::primitives::Address;
use anyhow::{Context as _, Result}; use anyhow::{Context as _, Result};
use semver::Version; use semver::Version;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -28,149 +27,149 @@ pub mod solc;
/// A common interface for all supported Solidity compilers. /// A common interface for all supported Solidity compilers.
pub trait SolidityCompiler { pub trait SolidityCompiler {
/// Returns the version of the compiler. /// Returns the version of the compiler.
fn version(&self) -> &Version; fn version(&self) -> &Version;
/// Returns the path of the compiler executable. /// Returns the path of the compiler executable.
fn path(&self) -> &Path; fn path(&self) -> &Path;
/// The low-level compiler interface. /// The low-level compiler interface.
fn build( fn build(
&self, &self,
input: CompilerInput, input: CompilerInput,
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>>; ) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>>;
/// Does the compiler support the provided mode and version settings. /// Does the compiler support the provided mode and version settings.
fn supports_mode( fn supports_mode(
&self, &self,
optimizer_setting: ModeOptimizerSetting, optimizer_setting: ModeOptimizerSetting,
pipeline: ModePipeline, pipeline: ModePipeline,
) -> bool; ) -> bool;
} }
/// The generic compilation input configuration. /// The generic compilation input configuration.
#[derive(Clone, Debug, Default, Serialize, Deserialize)] #[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct CompilerInput { pub struct CompilerInput {
pub pipeline: Option<ModePipeline>, pub pipeline: Option<ModePipeline>,
pub optimization: Option<ModeOptimizerSetting>, pub optimization: Option<ModeOptimizerSetting>,
pub evm_version: Option<EVMVersion>, pub evm_version: Option<EVMVersion>,
pub allow_paths: Vec<PathBuf>, pub allow_paths: Vec<PathBuf>,
pub base_path: Option<PathBuf>, pub base_path: Option<PathBuf>,
pub sources: HashMap<PathBuf, String>, pub sources: HashMap<PathBuf, String>,
pub libraries: HashMap<PathBuf, HashMap<String, Address>>, pub libraries: HashMap<PathBuf, HashMap<String, Address>>,
pub revert_string_handling: Option<RevertString>, pub revert_string_handling: Option<RevertString>,
} }
/// The generic compilation output configuration. /// The generic compilation output configuration.
#[derive(Debug, Clone, Default, Serialize, Deserialize)] #[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct CompilerOutput { pub struct CompilerOutput {
/// The compiled contracts. The bytecode of the contract is kept as a string in case linking is /// The compiled contracts. The bytecode of the contract is kept as a string in case linking is
/// required and the compiled source has placeholders. /// required and the compiled source has placeholders.
pub contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>, pub contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
} }
/// A generic builder style interface for configuring the supported compiler options. /// A generic builder style interface for configuring the supported compiler options.
#[derive(Default)] #[derive(Default)]
pub struct Compiler { pub struct Compiler {
input: CompilerInput, input: CompilerInput,
} }
impl Compiler { impl Compiler {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
input: CompilerInput { input: CompilerInput {
pipeline: Default::default(), pipeline: Default::default(),
optimization: Default::default(), optimization: Default::default(),
evm_version: Default::default(), evm_version: Default::default(),
allow_paths: Default::default(), allow_paths: Default::default(),
base_path: Default::default(), base_path: Default::default(),
sources: Default::default(), sources: Default::default(),
libraries: Default::default(), libraries: Default::default(),
revert_string_handling: Default::default(), revert_string_handling: Default::default(),
}, },
} }
} }
pub fn with_optimization(mut self, value: impl Into<Option<ModeOptimizerSetting>>) -> Self { pub fn with_optimization(mut self, value: impl Into<Option<ModeOptimizerSetting>>) -> Self {
self.input.optimization = value.into(); self.input.optimization = value.into();
self self
} }
pub fn with_pipeline(mut self, value: impl Into<Option<ModePipeline>>) -> Self { pub fn with_pipeline(mut self, value: impl Into<Option<ModePipeline>>) -> Self {
self.input.pipeline = value.into(); self.input.pipeline = value.into();
self self
} }
pub fn with_evm_version(mut self, version: impl Into<Option<EVMVersion>>) -> Self { pub fn with_evm_version(mut self, version: impl Into<Option<EVMVersion>>) -> Self {
self.input.evm_version = version.into(); self.input.evm_version = version.into();
self self
} }
pub fn with_allow_path(mut self, path: impl AsRef<Path>) -> Self { pub fn with_allow_path(mut self, path: impl AsRef<Path>) -> Self {
self.input.allow_paths.push(path.as_ref().into()); self.input.allow_paths.push(path.as_ref().into());
self self
} }
pub fn with_base_path(mut self, path: impl Into<Option<PathBuf>>) -> Self { pub fn with_base_path(mut self, path: impl Into<Option<PathBuf>>) -> Self {
self.input.base_path = path.into(); self.input.base_path = path.into();
self self
} }
pub fn with_source(mut self, path: impl AsRef<Path>) -> Result<Self> { pub fn with_source(mut self, path: impl AsRef<Path>) -> Result<Self> {
self.input.sources.insert( self.input.sources.insert(
path.as_ref().to_path_buf(), path.as_ref().to_path_buf(),
read_to_string(path.as_ref()).context("Failed to read the contract source")?, read_to_string(path.as_ref()).context("Failed to read the contract source")?,
); );
Ok(self) Ok(self)
} }
pub fn with_library( pub fn with_library(
mut self, mut self,
path: impl AsRef<Path>, path: impl AsRef<Path>,
name: impl AsRef<str>, name: impl AsRef<str>,
address: Address, address: Address,
) -> Self { ) -> Self {
self.input self.input
.libraries .libraries
.entry(path.as_ref().to_path_buf()) .entry(path.as_ref().to_path_buf())
.or_default() .or_default()
.insert(name.as_ref().into(), address); .insert(name.as_ref().into(), address);
self self
} }
pub fn with_revert_string_handling( pub fn with_revert_string_handling(
mut self, mut self,
revert_string_handling: impl Into<Option<RevertString>>, revert_string_handling: impl Into<Option<RevertString>>,
) -> Self { ) -> Self {
self.input.revert_string_handling = revert_string_handling.into(); self.input.revert_string_handling = revert_string_handling.into();
self self
} }
pub fn then(self, callback: impl FnOnce(Self) -> Self) -> Self { pub fn then(self, callback: impl FnOnce(Self) -> Self) -> Self {
callback(self) callback(self)
} }
pub fn try_then<E>(self, callback: impl FnOnce(Self) -> Result<Self, E>) -> Result<Self, E> { pub fn try_then<E>(self, callback: impl FnOnce(Self) -> Result<Self, E>) -> Result<Self, E> {
callback(self) callback(self)
} }
pub async fn try_build(self, compiler: &dyn SolidityCompiler) -> Result<CompilerOutput> { pub async fn try_build(self, compiler: &dyn SolidityCompiler) -> Result<CompilerOutput> {
compiler.build(self.input).await compiler.build(self.input).await
} }
pub fn input(&self) -> &CompilerInput { pub fn input(&self) -> &CompilerInput {
&self.input &self.input
} }
} }
/// Defines how the compiler should handle revert strings. /// Defines how the compiler should handle revert strings.
#[derive( #[derive(
Clone, Debug, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize, Clone, Debug, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize,
)] )]
pub enum RevertString { pub enum RevertString {
#[default] #[default]
Default, Default,
Debug, Debug,
Strip, Strip,
VerboseDebug, VerboseDebug,
} }
+241 -249
View File
@@ -2,24 +2,24 @@
//! compiling contracts to PolkaVM (PVM) bytecode. //! compiling contracts to PolkaVM (PVM) bytecode.
use std::{ use std::{
path::PathBuf, path::PathBuf,
pin::Pin, pin::Pin,
process::Stdio, process::Stdio,
sync::{Arc, LazyLock}, sync::{Arc, LazyLock},
}; };
use dashmap::DashMap; use dashmap::DashMap;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration}; use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
use revive_solc_json_interface::{ use revive_solc_json_interface::{
SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings, SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection, SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection,
SolcStandardJsonOutput, SolcStandardJsonOutput,
}; };
use tracing::{Span, field::display}; use tracing::{Span, field::display};
use crate::{ use crate::{
CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler, solc::Solc, CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler, solc::Solc,
}; };
use alloy::json_abi::JsonAbi; use alloy::json_abi::JsonAbi;
@@ -33,55 +33,52 @@ pub struct Resolc(Arc<ResolcInner>);
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
struct ResolcInner { struct ResolcInner {
/// The internal solc compiler that the resolc compiler uses as a compiler frontend. /// The internal solc compiler that the resolc compiler uses as a compiler frontend.
solc: Solc, solc: Solc,
/// Path to the `resolc` executable /// Path to the `resolc` executable
resolc_path: PathBuf, resolc_path: PathBuf,
} }
impl Resolc { impl Resolc {
pub async fn new( pub async fn new(
context: impl AsRef<SolcConfiguration> context: impl AsRef<SolcConfiguration>
+ AsRef<ResolcConfiguration> + AsRef<ResolcConfiguration>
+ AsRef<WorkingDirectoryConfiguration>, + AsRef<WorkingDirectoryConfiguration>,
version: impl Into<Option<VersionOrRequirement>>, version: impl Into<Option<VersionOrRequirement>>,
) -> Result<Self> { ) -> Result<Self> {
/// This is a cache of all of the resolc compiler objects. Since we do not currently support /// This is a cache of all of the resolc compiler objects. Since we do not currently support
/// multiple resolc compiler versions, so our cache is just keyed by the solc compiler and /// multiple resolc compiler versions, so our cache is just keyed by the solc compiler and
/// its version to the resolc compiler. /// its version to the resolc compiler.
static COMPILERS_CACHE: LazyLock<DashMap<Solc, Resolc>> = LazyLock::new(Default::default); static COMPILERS_CACHE: LazyLock<DashMap<Solc, Resolc>> = LazyLock::new(Default::default);
let resolc_configuration = AsRef::<ResolcConfiguration>::as_ref(&context); let resolc_configuration = AsRef::<ResolcConfiguration>::as_ref(&context);
let solc = Solc::new(&context, version) let solc = Solc::new(&context, version)
.await .await
.context("Failed to create the solc compiler frontend for resolc")?; .context("Failed to create the solc compiler frontend for resolc")?;
Ok(COMPILERS_CACHE Ok(COMPILERS_CACHE
.entry(solc.clone()) .entry(solc.clone())
.or_insert_with(|| { .or_insert_with(|| {
Self(Arc::new(ResolcInner { Self(Arc::new(ResolcInner { solc, resolc_path: resolc_configuration.path.clone() }))
solc, })
resolc_path: resolc_configuration.path.clone(), .clone())
})) }
})
.clone())
}
} }
impl SolidityCompiler for Resolc { impl SolidityCompiler for Resolc {
fn version(&self) -> &Version { fn version(&self) -> &Version {
// We currently return the solc compiler version since we do not support multiple resolc // We currently return the solc compiler version since we do not support multiple resolc
// compiler versions. // compiler versions.
SolidityCompiler::version(&self.0.solc) SolidityCompiler::version(&self.0.solc)
} }
fn path(&self) -> &std::path::Path { fn path(&self) -> &std::path::Path {
&self.0.resolc_path &self.0.resolc_path
} }
#[tracing::instrument(level = "debug", ret)] #[tracing::instrument(level = "debug", ret)]
#[tracing::instrument( #[tracing::instrument(
level = "error", level = "error",
skip_all, skip_all,
fields( fields(
@@ -91,221 +88,216 @@ impl SolidityCompiler for Resolc {
), ),
err(Debug) err(Debug)
)] )]
fn build( fn build(
&self, &self,
CompilerInput { CompilerInput {
pipeline, pipeline,
optimization, optimization,
evm_version, evm_version,
allow_paths, allow_paths,
base_path, base_path,
sources, sources,
libraries, libraries,
// TODO: this is currently not being handled since there is no way to pass it into // TODO: this is currently not being handled since there is no way to pass it into
// resolc. So, we need to go back to this later once it's supported. // resolc. So, we need to go back to this later once it's supported.
revert_string_handling: _, revert_string_handling: _,
}: CompilerInput, }: CompilerInput,
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>> { ) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>> {
Box::pin(async move { Box::pin(async move {
if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) { if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) {
anyhow::bail!( anyhow::bail!(
"Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}" "Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}"
); );
} }
let input = SolcStandardJsonInput { let input = SolcStandardJsonInput {
language: SolcStandardJsonInputLanguage::Solidity, language: SolcStandardJsonInputLanguage::Solidity,
sources: sources sources: sources
.into_iter() .into_iter()
.map(|(path, source)| (path.display().to_string(), source.into())) .map(|(path, source)| (path.display().to_string(), source.into()))
.collect(), .collect(),
settings: SolcStandardJsonInputSettings { settings: SolcStandardJsonInputSettings {
evm_version, evm_version,
libraries: Some( libraries: Some(
libraries libraries
.into_iter() .into_iter()
.map(|(source_code, libraries_map)| { .map(|(source_code, libraries_map)| {
( (
source_code.display().to_string(), source_code.display().to_string(),
libraries_map libraries_map
.into_iter() .into_iter()
.map(|(library_ident, library_address)| { .map(|(library_ident, library_address)| {
(library_ident, library_address.to_string()) (library_ident, library_address.to_string())
}) })
.collect(), .collect(),
) )
}) })
.collect(), .collect(),
), ),
remappings: None, remappings: None,
output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()), output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()),
via_ir: Some(true), via_ir: Some(true),
optimizer: SolcStandardJsonInputSettingsOptimizer::new( optimizer: SolcStandardJsonInputSettingsOptimizer::new(
optimization optimization.unwrap_or(ModeOptimizerSetting::M0).optimizations_enabled(),
.unwrap_or(ModeOptimizerSetting::M0) None,
.optimizations_enabled(), &Version::new(0, 0, 0),
None, false,
&Version::new(0, 0, 0), ),
false, metadata: None,
), polkavm: None,
metadata: None, },
polkavm: None, };
}, Span::current().record("json_in", display(serde_json::to_string(&input).unwrap()));
};
Span::current().record("json_in", display(serde_json::to_string(&input).unwrap()));
let path = &self.0.resolc_path; let path = &self.0.resolc_path;
let mut command = AsyncCommand::new(path); let mut command = AsyncCommand::new(path);
command command
.stdin(Stdio::piped()) .stdin(Stdio::piped())
.stdout(Stdio::piped()) .stdout(Stdio::piped())
.stderr(Stdio::piped()) .stderr(Stdio::piped())
.arg("--solc") .arg("--solc")
.arg(self.0.solc.path()) .arg(self.0.solc.path())
.arg("--standard-json"); .arg("--standard-json");
if let Some(ref base_path) = base_path { if let Some(ref base_path) = base_path {
command.arg("--base-path").arg(base_path); command.arg("--base-path").arg(base_path);
} }
if !allow_paths.is_empty() { if !allow_paths.is_empty() {
command.arg("--allow-paths").arg( command.arg("--allow-paths").arg(
allow_paths allow_paths
.iter() .iter()
.map(|path| path.display().to_string()) .map(|path| path.display().to_string())
.collect::<Vec<_>>() .collect::<Vec<_>>()
.join(","), .join(","),
); );
} }
let mut child = command let mut child = command
.spawn() .spawn()
.with_context(|| format!("Failed to spawn resolc at {}", path.display()))?; .with_context(|| format!("Failed to spawn resolc at {}", path.display()))?;
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped"); let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
let serialized_input = serde_json::to_vec(&input) let serialized_input = serde_json::to_vec(&input)
.context("Failed to serialize Standard JSON input for resolc")?; .context("Failed to serialize Standard JSON input for resolc")?;
stdin_pipe stdin_pipe
.write_all(&serialized_input) .write_all(&serialized_input)
.await .await
.context("Failed to write Standard JSON to resolc stdin")?; .context("Failed to write Standard JSON to resolc stdin")?;
let output = child let output = child
.wait_with_output() .wait_with_output()
.await .await
.context("Failed while waiting for resolc process to finish")?; .context("Failed while waiting for resolc process to finish")?;
let stdout = output.stdout; let stdout = output.stdout;
let stderr = output.stderr; let stderr = output.stderr;
if !output.status.success() { if !output.status.success() {
let json_in = serde_json::to_string_pretty(&input) let json_in = serde_json::to_string_pretty(&input)
.context("Failed to pretty-print Standard JSON input for logging")?; .context("Failed to pretty-print Standard JSON input for logging")?;
let message = String::from_utf8_lossy(&stderr); let message = String::from_utf8_lossy(&stderr);
tracing::error!( tracing::error!(
status = %output.status, status = %output.status,
message = %message, message = %message,
json_input = json_in, json_input = json_in,
"Compilation using resolc failed" "Compilation using resolc failed"
); );
anyhow::bail!("Compilation failed with an error: {message}"); anyhow::bail!("Compilation failed with an error: {message}");
} }
let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout) let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout)
.map_err(|e| { .map_err(|e| {
anyhow::anyhow!( anyhow::anyhow!(
"failed to parse resolc JSON output: {e}\nstderr: {}", "failed to parse resolc JSON output: {e}\nstderr: {}",
String::from_utf8_lossy(&stderr) String::from_utf8_lossy(&stderr)
) )
}) })
.context("Failed to parse resolc standard JSON output")?; .context("Failed to parse resolc standard JSON output")?;
tracing::debug!( tracing::debug!(
output = %serde_json::to_string(&parsed).unwrap(), output = %serde_json::to_string(&parsed).unwrap(),
"Compiled successfully" "Compiled successfully"
); );
// Detecting if the compiler output contained errors and reporting them through logs and // Detecting if the compiler output contained errors and reporting them through logs and
// errors instead of returning the compiler output that might contain errors. // errors instead of returning the compiler output that might contain errors.
for error in parsed.errors.iter().flatten() { for error in parsed.errors.iter().flatten() {
if error.severity == "error" { if error.severity == "error" {
tracing::error!( tracing::error!(
?error, ?error,
?input, ?input,
output = %serde_json::to_string(&parsed).unwrap(), output = %serde_json::to_string(&parsed).unwrap(),
"Encountered an error in the compilation" "Encountered an error in the compilation"
); );
anyhow::bail!("Encountered an error in the compilation: {error}") anyhow::bail!("Encountered an error in the compilation: {error}")
} }
} }
let Some(contracts) = parsed.contracts else { let Some(contracts) = parsed.contracts else {
anyhow::bail!("Unexpected error - resolc output doesn't have a contracts section"); anyhow::bail!("Unexpected error - resolc output doesn't have a contracts section");
}; };
let mut compiler_output = CompilerOutput::default(); let mut compiler_output = CompilerOutput::default();
for (source_path, contracts) in contracts.into_iter() { for (source_path, contracts) in contracts.into_iter() {
let src_for_msg = source_path.clone(); let src_for_msg = source_path.clone();
let source_path = PathBuf::from(source_path) let source_path = PathBuf::from(source_path)
.canonicalize() .canonicalize()
.with_context(|| format!("Failed to canonicalize path {src_for_msg}"))?; .with_context(|| format!("Failed to canonicalize path {src_for_msg}"))?;
let map = compiler_output.contracts.entry(source_path).or_default(); let map = compiler_output.contracts.entry(source_path).or_default();
for (contract_name, contract_information) in contracts.into_iter() { for (contract_name, contract_information) in contracts.into_iter() {
let bytecode = contract_information let bytecode = contract_information
.evm .evm
.and_then(|evm| evm.bytecode.clone()) .and_then(|evm| evm.bytecode.clone())
.context("Unexpected - Contract compiled with resolc has no bytecode")?; .context("Unexpected - Contract compiled with resolc has no bytecode")?;
let abi = { let abi = {
let metadata = contract_information let metadata = contract_information
.metadata .metadata
.as_ref() .as_ref()
.context("No metadata found for the contract")?; .context("No metadata found for the contract")?;
let solc_metadata_str = match metadata { let solc_metadata_str = match metadata {
serde_json::Value::String(solc_metadata_str) => { serde_json::Value::String(solc_metadata_str) =>
solc_metadata_str.as_str() solc_metadata_str.as_str(),
} serde_json::Value::Object(metadata_object) => {
serde_json::Value::Object(metadata_object) => { let solc_metadata_value = metadata_object
let solc_metadata_value = metadata_object .get("solc_metadata")
.get("solc_metadata") .context("Contract doesn't have a 'solc_metadata' field")?;
.context("Contract doesn't have a 'solc_metadata' field")?; solc_metadata_value
solc_metadata_value .as_str()
.as_str() .context("The 'solc_metadata' field is not a string")?
.context("The 'solc_metadata' field is not a string")? },
} serde_json::Value::Null |
serde_json::Value::Null serde_json::Value::Bool(_) |
| serde_json::Value::Bool(_) serde_json::Value::Number(_) |
| serde_json::Value::Number(_) serde_json::Value::Array(_) => {
| serde_json::Value::Array(_) => { anyhow::bail!("Unsupported type of metadata {metadata:?}")
anyhow::bail!("Unsupported type of metadata {metadata:?}") },
} };
}; let solc_metadata =
let solc_metadata = serde_json::from_str::<serde_json::Value>( serde_json::from_str::<serde_json::Value>(solc_metadata_str).context(
solc_metadata_str, "Failed to deserialize the solc_metadata as a serde_json generic value",
) )?;
.context( let output_value = solc_metadata
"Failed to deserialize the solc_metadata as a serde_json generic value", .get("output")
)?; .context("solc_metadata doesn't have an output field")?;
let output_value = solc_metadata let abi_value = output_value
.get("output") .get("abi")
.context("solc_metadata doesn't have an output field")?; .context("solc_metadata output doesn't contain an abi field")?;
let abi_value = output_value serde_json::from_value::<JsonAbi>(abi_value.clone())
.get("abi") .context("ABI found in solc_metadata output is not valid ABI")?
.context("solc_metadata output doesn't contain an abi field")?; };
serde_json::from_value::<JsonAbi>(abi_value.clone()) map.insert(contract_name, (bytecode.object, abi));
.context("ABI found in solc_metadata output is not valid ABI")? }
}; }
map.insert(contract_name, (bytecode.object, abi));
}
}
Ok(compiler_output) Ok(compiler_output)
}) })
} }
fn supports_mode( fn supports_mode(
&self, &self,
optimize_setting: ModeOptimizerSetting, optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline, pipeline: ModePipeline,
) -> bool { ) -> bool {
pipeline == ModePipeline::ViaYulIR pipeline == ModePipeline::ViaYulIR &&
&& SolidityCompiler::supports_mode(&self.0.solc, optimize_setting, pipeline) SolidityCompiler::supports_mode(&self.0.solc, optimize_setting, pipeline)
} }
} }
+234 -242
View File
@@ -2,10 +2,10 @@
//! compiling contracts to EVM bytecode. //! compiling contracts to EVM bytecode.
use std::{ use std::{
path::PathBuf, path::PathBuf,
pin::Pin, pin::Pin,
process::Stdio, process::Stdio,
sync::{Arc, LazyLock}, sync::{Arc, LazyLock},
}; };
use dashmap::DashMap; use dashmap::DashMap;
@@ -18,11 +18,10 @@ use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, S
use anyhow::{Context as _, Result}; use anyhow::{Context as _, Result};
use foundry_compilers_artifacts::{ use foundry_compilers_artifacts::{
output_selection::{ output_selection::{
BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection, BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection,
}, },
solc::CompilerOutput as SolcOutput, solc::{CompilerOutput as SolcOutput, *},
solc::*,
}; };
use semver::Version; use semver::Version;
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand}; use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
@@ -32,268 +31,261 @@ pub struct Solc(Arc<SolcInner>);
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
struct SolcInner { struct SolcInner {
/// The path of the solidity compiler executable that this object uses. /// The path of the solidity compiler executable that this object uses.
solc_path: PathBuf, solc_path: PathBuf,
/// The version of the solidity compiler executable that this object uses. /// The version of the solidity compiler executable that this object uses.
solc_version: Version, solc_version: Version,
} }
impl Solc { impl Solc {
pub async fn new( pub async fn new(
context: impl AsRef<SolcConfiguration> + AsRef<WorkingDirectoryConfiguration>, context: impl AsRef<SolcConfiguration> + AsRef<WorkingDirectoryConfiguration>,
version: impl Into<Option<VersionOrRequirement>>, version: impl Into<Option<VersionOrRequirement>>,
) -> Result<Self> { ) -> Result<Self> {
// This is a cache for the compiler objects so that whenever the same compiler version is // This is a cache for the compiler objects so that whenever the same compiler version is
// requested the same object is returned. We do this as we do not want to keep cloning the // requested the same object is returned. We do this as we do not want to keep cloning the
// compiler around. // compiler around.
static COMPILERS_CACHE: LazyLock<DashMap<(PathBuf, Version), Solc>> = static COMPILERS_CACHE: LazyLock<DashMap<(PathBuf, Version), Solc>> =
LazyLock::new(Default::default); LazyLock::new(Default::default);
let working_directory_configuration = let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context); AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
let solc_configuration = AsRef::<SolcConfiguration>::as_ref(&context); let solc_configuration = AsRef::<SolcConfiguration>::as_ref(&context);
// We attempt to download the solc binary. Note the following: this call does the version // We attempt to download the solc binary. Note the following: this call does the version
// resolution for us. Therefore, even if the download didn't proceed, this function will // resolution for us. Therefore, even if the download didn't proceed, this function will
// resolve the version requirement into a canonical version of the compiler. It's then up // resolve the version requirement into a canonical version of the compiler. It's then up
// to us to either use the provided path or not. // to us to either use the provided path or not.
let version = version let version = version.into().unwrap_or_else(|| solc_configuration.version.clone().into());
.into() let (version, path) =
.unwrap_or_else(|| solc_configuration.version.clone().into()); download_solc(working_directory_configuration.as_path(), version, false)
let (version, path) = .await
download_solc(working_directory_configuration.as_path(), version, false) .context("Failed to download/get path to solc binary")?;
.await
.context("Failed to download/get path to solc binary")?;
Ok(COMPILERS_CACHE Ok(COMPILERS_CACHE
.entry((path.clone(), version.clone())) .entry((path.clone(), version.clone()))
.or_insert_with(|| { .or_insert_with(|| {
info!( info!(
solc_path = %path.display(), solc_path = %path.display(),
solc_version = %version, solc_version = %version,
"Created a new solc compiler object" "Created a new solc compiler object"
); );
Self(Arc::new(SolcInner { Self(Arc::new(SolcInner { solc_path: path, solc_version: version }))
solc_path: path, })
solc_version: version, .clone())
})) }
})
.clone())
}
} }
impl SolidityCompiler for Solc { impl SolidityCompiler for Solc {
fn version(&self) -> &Version { fn version(&self) -> &Version {
&self.0.solc_version &self.0.solc_version
} }
fn path(&self) -> &std::path::Path { fn path(&self) -> &std::path::Path {
&self.0.solc_path &self.0.solc_path
} }
#[tracing::instrument(level = "debug", ret)] #[tracing::instrument(level = "debug", ret)]
#[tracing::instrument( #[tracing::instrument(
level = "error", level = "error",
skip_all, skip_all,
fields(json_in = tracing::field::Empty), fields(json_in = tracing::field::Empty),
err(Debug) err(Debug)
)] )]
fn build( fn build(
&self, &self,
CompilerInput { CompilerInput {
pipeline, pipeline,
optimization, optimization,
evm_version, evm_version,
allow_paths, allow_paths,
base_path, base_path,
sources, sources,
libraries, libraries,
revert_string_handling, revert_string_handling,
}: CompilerInput, }: CompilerInput,
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>> { ) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>> {
Box::pin(async move { Box::pin(async move {
// Be careful to entirely omit the viaIR field if the compiler does not support it, // Be careful to entirely omit the viaIR field if the compiler does not support it,
// as it will error if you provide fields it does not know about. Because // as it will error if you provide fields it does not know about. Because
// `supports_mode` is called prior to instantiating a compiler, we should never // `supports_mode` is called prior to instantiating a compiler, we should never
// ask for something which is invalid. // ask for something which is invalid.
let via_ir = match (pipeline, self.compiler_supports_yul()) { let via_ir = match (pipeline, self.compiler_supports_yul()) {
(pipeline, true) => pipeline.map(|p| p.via_yul_ir()), (pipeline, true) => pipeline.map(|p| p.via_yul_ir()),
(_pipeline, false) => None, (_pipeline, false) => None,
}; };
let input = SolcInput { let input = SolcInput {
language: SolcLanguage::Solidity, language: SolcLanguage::Solidity,
sources: Sources( sources: Sources(
sources sources
.into_iter() .into_iter()
.map(|(source_path, source_code)| (source_path, Source::new(source_code))) .map(|(source_path, source_code)| (source_path, Source::new(source_code)))
.collect(), .collect(),
), ),
settings: Settings { settings: Settings {
optimizer: Optimizer { optimizer: Optimizer {
enabled: optimization.map(|o| o.optimizations_enabled()), enabled: optimization.map(|o| o.optimizations_enabled()),
details: Some(Default::default()), details: Some(Default::default()),
..Default::default() ..Default::default()
}, },
output_selection: OutputSelection::common_output_selection( output_selection: OutputSelection::common_output_selection(
[ [
ContractOutputSelection::Abi, ContractOutputSelection::Abi,
ContractOutputSelection::Evm(EvmOutputSelection::ByteCode( ContractOutputSelection::Evm(EvmOutputSelection::ByteCode(
BytecodeOutputSelection::Object, BytecodeOutputSelection::Object,
)), )),
] ]
.into_iter() .into_iter()
.map(|item| item.to_string()), .map(|item| item.to_string()),
), ),
evm_version: evm_version.map(|version| version.to_string().parse().unwrap()), evm_version: evm_version.map(|version| version.to_string().parse().unwrap()),
via_ir, via_ir,
libraries: Libraries { libraries: Libraries {
libs: libraries libs: libraries
.into_iter() .into_iter()
.map(|(file_path, libraries)| { .map(|(file_path, libraries)| {
( (
file_path, file_path,
libraries libraries
.into_iter() .into_iter()
.map(|(library_name, library_address)| { .map(|(library_name, library_address)| {
(library_name, library_address.to_string()) (library_name, library_address.to_string())
}) })
.collect(), .collect(),
) )
}) })
.collect(), .collect(),
}, },
debug: revert_string_handling.map(|revert_string_handling| DebuggingSettings { debug: revert_string_handling.map(|revert_string_handling| DebuggingSettings {
revert_strings: match revert_string_handling { revert_strings: match revert_string_handling {
crate::RevertString::Default => Some(RevertStrings::Default), crate::RevertString::Default => Some(RevertStrings::Default),
crate::RevertString::Debug => Some(RevertStrings::Debug), crate::RevertString::Debug => Some(RevertStrings::Debug),
crate::RevertString::Strip => Some(RevertStrings::Strip), crate::RevertString::Strip => Some(RevertStrings::Strip),
crate::RevertString::VerboseDebug => Some(RevertStrings::VerboseDebug), crate::RevertString::VerboseDebug => Some(RevertStrings::VerboseDebug),
}, },
debug_info: Default::default(), debug_info: Default::default(),
}), }),
..Default::default() ..Default::default()
}, },
}; };
Span::current().record("json_in", display(serde_json::to_string(&input).unwrap())); Span::current().record("json_in", display(serde_json::to_string(&input).unwrap()));
let path = &self.0.solc_path; let path = &self.0.solc_path;
let mut command = AsyncCommand::new(path); let mut command = AsyncCommand::new(path);
command command
.stdin(Stdio::piped()) .stdin(Stdio::piped())
.stdout(Stdio::piped()) .stdout(Stdio::piped())
.stderr(Stdio::null()) .stderr(Stdio::null())
.arg("--standard-json"); .arg("--standard-json");
if let Some(ref base_path) = base_path { if let Some(ref base_path) = base_path {
command.arg("--base-path").arg(base_path); command.arg("--base-path").arg(base_path);
} }
if !allow_paths.is_empty() { if !allow_paths.is_empty() {
command.arg("--allow-paths").arg( command.arg("--allow-paths").arg(
allow_paths allow_paths
.iter() .iter()
.map(|path| path.display().to_string()) .map(|path| path.display().to_string())
.collect::<Vec<_>>() .collect::<Vec<_>>()
.join(","), .join(","),
); );
} }
let mut child = command let mut child = command
.spawn() .spawn()
.with_context(|| format!("Failed to spawn solc at {}", path.display()))?; .with_context(|| format!("Failed to spawn solc at {}", path.display()))?;
let stdin = child.stdin.as_mut().expect("should be piped"); let stdin = child.stdin.as_mut().expect("should be piped");
let serialized_input = serde_json::to_vec(&input) let serialized_input = serde_json::to_vec(&input)
.context("Failed to serialize Standard JSON input for solc")?; .context("Failed to serialize Standard JSON input for solc")?;
stdin stdin
.write_all(&serialized_input) .write_all(&serialized_input)
.await .await
.context("Failed to write Standard JSON to solc stdin")?; .context("Failed to write Standard JSON to solc stdin")?;
let output = child let output = child
.wait_with_output() .wait_with_output()
.await .await
.context("Failed while waiting for solc process to finish")?; .context("Failed while waiting for solc process to finish")?;
if !output.status.success() { if !output.status.success() {
let json_in = serde_json::to_string_pretty(&input) let json_in = serde_json::to_string_pretty(&input)
.context("Failed to pretty-print Standard JSON input for logging")?; .context("Failed to pretty-print Standard JSON input for logging")?;
tracing::error!( tracing::error!(
status = %output.status, status = %output.status,
json_input = json_in, json_input = json_in,
"Compilation using solc failed" "Compilation using solc failed"
); );
anyhow::bail!("Compilation failed"); anyhow::bail!("Compilation failed");
} }
let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout) let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout)
.map_err(|e| { .map_err(|e| {
anyhow::anyhow!( anyhow::anyhow!(
"failed to parse resolc JSON output: {e}\nstdout: {}", "failed to parse resolc JSON output: {e}\nstdout: {}",
String::from_utf8_lossy(&output.stdout) String::from_utf8_lossy(&output.stdout)
) )
}) })
.context("Failed to parse solc standard JSON output")?; .context("Failed to parse solc standard JSON output")?;
// Detecting if the compiler output contained errors and reporting them through logs and // Detecting if the compiler output contained errors and reporting them through logs and
// errors instead of returning the compiler output that might contain errors. // errors instead of returning the compiler output that might contain errors.
for error in parsed.errors.iter() { for error in parsed.errors.iter() {
if error.severity == Severity::Error { if error.severity == Severity::Error {
tracing::error!(?error, ?input, "Encountered an error in the compilation"); tracing::error!(?error, ?input, "Encountered an error in the compilation");
anyhow::bail!("Encountered an error in the compilation: {error}") anyhow::bail!("Encountered an error in the compilation: {error}")
} }
} }
tracing::debug!( tracing::debug!(
output = %String::from_utf8_lossy(&output.stdout).to_string(), output = %String::from_utf8_lossy(&output.stdout).to_string(),
"Compiled successfully" "Compiled successfully"
); );
let mut compiler_output = CompilerOutput::default(); let mut compiler_output = CompilerOutput::default();
for (contract_path, contracts) in parsed.contracts { for (contract_path, contracts) in parsed.contracts {
let map = compiler_output let map = compiler_output
.contracts .contracts
.entry(contract_path.canonicalize().with_context(|| { .entry(contract_path.canonicalize().with_context(|| {
format!( format!("Failed to canonicalize contract path {}", contract_path.display())
"Failed to canonicalize contract path {}", })?)
contract_path.display() .or_default();
) for (contract_name, contract_info) in contracts.into_iter() {
})?) let source_code = contract_info
.or_default(); .evm
for (contract_name, contract_info) in contracts.into_iter() { .and_then(|evm| evm.bytecode)
let source_code = contract_info .map(|bytecode| match bytecode.object {
.evm BytecodeObject::Bytecode(bytecode) => bytecode.to_string(),
.and_then(|evm| evm.bytecode) BytecodeObject::Unlinked(unlinked) => unlinked,
.map(|bytecode| match bytecode.object { })
BytecodeObject::Bytecode(bytecode) => bytecode.to_string(), .context("Unexpected - contract compiled with solc has no source code")?;
BytecodeObject::Unlinked(unlinked) => unlinked, let abi = contract_info
}) .abi
.context("Unexpected - contract compiled with solc has no source code")?; .context("Unexpected - contract compiled with solc as no ABI")?;
let abi = contract_info map.insert(contract_name, (source_code, abi));
.abi }
.context("Unexpected - contract compiled with solc as no ABI")?; }
map.insert(contract_name, (source_code, abi));
}
}
Ok(compiler_output) Ok(compiler_output)
}) })
} }
fn supports_mode( fn supports_mode(
&self, &self,
_optimize_setting: ModeOptimizerSetting, _optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline, pipeline: ModePipeline,
) -> bool { ) -> bool {
// solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support mode E // solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support
// (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler is new enough. // mode E (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler
pipeline == ModePipeline::ViaEVMAssembly // is new enough.
|| (pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul()) pipeline == ModePipeline::ViaEVMAssembly ||
} (pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul())
}
} }
impl Solc { impl Solc {
fn compiler_supports_yul(&self) -> bool { fn compiler_supports_yul(&self) -> bool {
const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13); const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13);
SolidityCompiler::version(self) >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR SolidityCompiler::version(self) >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR
} }
} }
+68 -68
View File
@@ -7,82 +7,82 @@ use semver::Version;
#[tokio::test] #[tokio::test]
async fn contracts_can_be_compiled_with_solc() { async fn contracts_can_be_compiled_with_solc() {
// Arrange // Arrange
let args = TestExecutionContext::default(); let args = TestExecutionContext::default();
let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30))) let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
.await .await
.unwrap(); .unwrap();
// Act // Act
let output = Compiler::new() let output = Compiler::new()
.with_source("./tests/assets/array_one_element/callable.sol") .with_source("./tests/assets/array_one_element/callable.sol")
.unwrap() .unwrap()
.with_source("./tests/assets/array_one_element/main.sol") .with_source("./tests/assets/array_one_element/main.sol")
.unwrap() .unwrap()
.try_build(&solc) .try_build(&solc)
.await; .await;
// Assert // Assert
let output = output.expect("Failed to compile"); let output = output.expect("Failed to compile");
assert_eq!(output.contracts.len(), 2); assert_eq!(output.contracts.len(), 2);
let main_file_contracts = output let main_file_contracts = output
.contracts .contracts
.get( .get(
&PathBuf::from("./tests/assets/array_one_element/main.sol") &PathBuf::from("./tests/assets/array_one_element/main.sol")
.canonicalize() .canonicalize()
.unwrap(), .unwrap(),
) )
.unwrap(); .unwrap();
let callable_file_contracts = output let callable_file_contracts = output
.contracts .contracts
.get( .get(
&PathBuf::from("./tests/assets/array_one_element/callable.sol") &PathBuf::from("./tests/assets/array_one_element/callable.sol")
.canonicalize() .canonicalize()
.unwrap(), .unwrap(),
) )
.unwrap(); .unwrap();
assert!(main_file_contracts.contains_key("Main")); assert!(main_file_contracts.contains_key("Main"));
assert!(callable_file_contracts.contains_key("Callable")); assert!(callable_file_contracts.contains_key("Callable"));
} }
#[tokio::test] #[tokio::test]
async fn contracts_can_be_compiled_with_resolc() { async fn contracts_can_be_compiled_with_resolc() {
// Arrange // Arrange
let args = TestExecutionContext::default(); let args = TestExecutionContext::default();
let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30))) let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
.await .await
.unwrap(); .unwrap();
// Act // Act
let output = Compiler::new() let output = Compiler::new()
.with_source("./tests/assets/array_one_element/callable.sol") .with_source("./tests/assets/array_one_element/callable.sol")
.unwrap() .unwrap()
.with_source("./tests/assets/array_one_element/main.sol") .with_source("./tests/assets/array_one_element/main.sol")
.unwrap() .unwrap()
.try_build(&resolc) .try_build(&resolc)
.await; .await;
// Assert // Assert
let output = output.expect("Failed to compile"); let output = output.expect("Failed to compile");
assert_eq!(output.contracts.len(), 2); assert_eq!(output.contracts.len(), 2);
let main_file_contracts = output let main_file_contracts = output
.contracts .contracts
.get( .get(
&PathBuf::from("./tests/assets/array_one_element/main.sol") &PathBuf::from("./tests/assets/array_one_element/main.sol")
.canonicalize() .canonicalize()
.unwrap(), .unwrap(),
) )
.unwrap(); .unwrap();
let callable_file_contracts = output let callable_file_contracts = output
.contracts .contracts
.get( .get(
&PathBuf::from("./tests/assets/array_one_element/callable.sol") &PathBuf::from("./tests/assets/array_one_element/callable.sol")
.canonicalize() .canonicalize()
.unwrap(), .unwrap(),
) )
.unwrap(); .unwrap();
assert!(main_file_contracts.contains_key("Main")); assert!(main_file_contracts.contains_key("Main"));
assert!(callable_file_contracts.contains_key("Callable")); assert!(callable_file_contracts.contains_key("Callable"));
} }
+555 -568
View File
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
@@ -14,164 +14,157 @@ use revive_dt_config::{BenchmarkingContext, Context};
use revive_dt_report::Reporter; use revive_dt_report::Reporter;
use crate::{ use crate::{
differential_benchmarks::{Driver, Watcher, WatcherEvent}, differential_benchmarks::{Driver, Watcher, WatcherEvent},
helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream}, helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream},
}; };
/// Handles the differential testing executing it according to the information defined in the /// Handles the differential testing executing it according to the information defined in the
/// context /// context
#[instrument(level = "info", err(Debug), skip_all)] #[instrument(level = "info", err(Debug), skip_all)]
pub async fn handle_differential_benchmarks( pub async fn handle_differential_benchmarks(
mut context: BenchmarkingContext, mut context: BenchmarkingContext,
reporter: Reporter, reporter: Reporter,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
// A bit of a hack but we need to override the number of nodes specified through the CLI since // A bit of a hack but we need to override the number of nodes specified through the CLI since
// benchmarks can only be run on a single node. Perhaps in the future we'd have a cleaner way to // benchmarks can only be run on a single node. Perhaps in the future we'd have a cleaner way to
// do this. But, for the time being, we need to override the cli arguments. // do this. But, for the time being, we need to override the cli arguments.
if context.concurrency_configuration.number_of_nodes != 1 { if context.concurrency_configuration.number_of_nodes != 1 {
warn!( warn!(
specified_number_of_nodes = context.concurrency_configuration.number_of_nodes, specified_number_of_nodes = context.concurrency_configuration.number_of_nodes,
updated_number_of_nodes = 1, updated_number_of_nodes = 1,
"Invalid number of nodes specified through the CLI. Benchmarks can only be run on a single node. Updated the arguments." "Invalid number of nodes specified through the CLI. Benchmarks can only be run on a single node. Updated the arguments."
); );
context.concurrency_configuration.number_of_nodes = 1; context.concurrency_configuration.number_of_nodes = 1;
}; };
let full_context = Context::Benchmark(Box::new(context.clone())); let full_context = Context::Benchmark(Box::new(context.clone()));
// Discover all of the metadata files that are defined in the context. // Discover all of the metadata files that are defined in the context.
let metadata_files = collect_metadata_files(&context) let metadata_files = collect_metadata_files(&context)
.context("Failed to collect metadata files for differential testing")?; .context("Failed to collect metadata files for differential testing")?;
info!(len = metadata_files.len(), "Discovered metadata files"); info!(len = metadata_files.len(), "Discovered metadata files");
// Discover the list of platforms that the tests should run on based on the context. // Discover the list of platforms that the tests should run on based on the context.
let platforms = context let platforms = context
.platforms .platforms
.iter() .iter()
.copied() .copied()
.map(Into::<&dyn Platform>::into) .map(Into::<&dyn Platform>::into)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
// Starting the nodes of the various platforms specified in the context. Note that we use the // Starting the nodes of the various platforms specified in the context. Note that we use the
// node pool since it contains all of the code needed to spawn nodes from A to Z and therefore // node pool since it contains all of the code needed to spawn nodes from A to Z and therefore
// it's the preferred way for us to start nodes even when we're starting just a single node. The // it's the preferred way for us to start nodes even when we're starting just a single node. The
// added overhead from it is quite small (performance wise) since it's involved only when we're // added overhead from it is quite small (performance wise) since it's involved only when we're
// creating the test definitions, but it might have other maintenance overhead as it obscures // creating the test definitions, but it might have other maintenance overhead as it obscures
// the fact that only a single node is spawned. // the fact that only a single node is spawned.
let platforms_and_nodes = { let platforms_and_nodes = {
let mut map = BTreeMap::new(); let mut map = BTreeMap::new();
for platform in platforms.iter() { for platform in platforms.iter() {
let platform_identifier = platform.platform_identifier(); let platform_identifier = platform.platform_identifier();
let node_pool = NodePool::new(full_context.clone(), *platform) let node_pool = NodePool::new(full_context.clone(), *platform)
.await .await
.inspect_err(|err| { .inspect_err(|err| {
error!( error!(
?err, ?err,
%platform_identifier, %platform_identifier,
"Failed to initialize the node pool for the platform." "Failed to initialize the node pool for the platform."
) )
}) })
.context("Failed to initialize the node pool")?; .context("Failed to initialize the node pool")?;
map.insert(platform_identifier, (*platform, node_pool)); map.insert(platform_identifier, (*platform, node_pool));
} }
map map
}; };
info!("Spawned the platform nodes"); info!("Spawned the platform nodes");
// Preparing test definitions for the execution. // Preparing test definitions for the execution.
let test_definitions = create_test_definitions_stream( let test_definitions = create_test_definitions_stream(
&full_context, &full_context,
metadata_files.iter(), metadata_files.iter(),
&platforms_and_nodes, &platforms_and_nodes,
reporter.clone(), reporter.clone(),
) )
.await .await
.collect::<Vec<_>>() .collect::<Vec<_>>()
.await; .await;
info!(len = test_definitions.len(), "Created test definitions"); info!(len = test_definitions.len(), "Created test definitions");
// Creating the objects that will be shared between the various runs. The cached compiler is the // Creating the objects that will be shared between the various runs. The cached compiler is the
// only one at the current moment of time that's safe to share between runs. // only one at the current moment of time that's safe to share between runs.
let cached_compiler = CachedCompiler::new( let cached_compiler = CachedCompiler::new(
context context.working_directory.as_path().join("compilation_cache"),
.working_directory context.compilation_configuration.invalidate_compilation_cache,
.as_path() )
.join("compilation_cache"), .await
context .map(Arc::new)
.compilation_configuration .context("Failed to initialize cached compiler")?;
.invalidate_compilation_cache,
)
.await
.map(Arc::new)
.context("Failed to initialize cached compiler")?;
// Note: we do not want to run all of the workloads concurrently on all platforms. Rather, we'd // Note: we do not want to run all of the workloads concurrently on all platforms. Rather, we'd
// like to run all of the workloads for one platform, and then the next sequentially as we'd // like to run all of the workloads for one platform, and then the next sequentially as we'd
// like for the effect of concurrency to be minimized when we're doing the benchmarking. // like for the effect of concurrency to be minimized when we're doing the benchmarking.
for platform in platforms.iter() { for platform in platforms.iter() {
let platform_identifier = platform.platform_identifier(); let platform_identifier = platform.platform_identifier();
let span = info_span!("Benchmarking for the platform", %platform_identifier); let span = info_span!("Benchmarking for the platform", %platform_identifier);
let _guard = span.enter(); let _guard = span.enter();
for test_definition in test_definitions.iter() { for test_definition in test_definitions.iter() {
let platform_information = &test_definition.platforms[&platform_identifier]; let platform_information = &test_definition.platforms[&platform_identifier];
let span = info_span!( let span = info_span!(
"Executing workload", "Executing workload",
metadata_file_path = %test_definition.metadata_file_path.display(), metadata_file_path = %test_definition.metadata_file_path.display(),
case_idx = %test_definition.case_idx, case_idx = %test_definition.case_idx,
mode = %test_definition.mode, mode = %test_definition.mode,
); );
let _guard = span.enter(); let _guard = span.enter();
// Initializing all of the components requires to execute this particular workload. // Initializing all of the components requires to execute this particular workload.
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new( let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
context.wallet_configuration.highest_private_key_exclusive(), context.wallet_configuration.highest_private_key_exclusive(),
))); )));
let (watcher, watcher_tx) = Watcher::new( let (watcher, watcher_tx) = Watcher::new(
platform_identifier, platform_identifier,
platform_information platform_information
.node .node
.subscribe_to_full_blocks_information() .subscribe_to_full_blocks_information()
.await .await
.context("Failed to subscribe to full blocks information from the node")?, .context("Failed to subscribe to full blocks information from the node")?,
); );
let driver = Driver::new( let driver = Driver::new(
platform_information, platform_information,
test_definition, test_definition,
private_key_allocator, private_key_allocator,
cached_compiler.as_ref(), cached_compiler.as_ref(),
watcher_tx.clone(), watcher_tx.clone(),
test_definition test_definition
.case .case
.steps_iterator_for_benchmarks(context.default_repetition_count) .steps_iterator_for_benchmarks(context.default_repetition_count)
.enumerate() .enumerate()
.map(|(step_idx, step)| -> (StepPath, Step) { .map(|(step_idx, step)| -> (StepPath, Step) {
(StepPath::new(vec![StepIdx::new(step_idx)]), step) (StepPath::new(vec![StepIdx::new(step_idx)]), step)
}), }),
) )
.await .await
.context("Failed to create the benchmarks driver")?; .context("Failed to create the benchmarks driver")?;
futures::future::try_join( futures::future::try_join(
watcher.run(), watcher.run(),
driver.execute_all().inspect(|_| { driver.execute_all().inspect(|_| {
info!("All transactions submitted - driver completed execution"); info!("All transactions submitted - driver completed execution");
watcher_tx watcher_tx.send(WatcherEvent::AllTransactionsSubmitted).unwrap()
.send(WatcherEvent::AllTransactionsSubmitted) }),
.unwrap() )
}), .await
) .context("Failed to run the driver and executor")
.await .inspect(|(_, steps_executed)| info!(steps_executed, "Workload Execution Succeeded"))
.context("Failed to run the driver and executor") .inspect_err(|err| error!(?err, "Workload Execution Failed"))?;
.inspect(|(_, steps_executed)| info!(steps_executed, "Workload Execution Succeeded")) }
.inspect_err(|err| error!(?err, "Workload Execution Failed"))?; }
}
}
Ok(()) Ok(())
} }
@@ -1,8 +1,8 @@
use std::{collections::HashMap, path::PathBuf}; use std::{collections::HashMap, path::PathBuf};
use alloy::{ use alloy::{
json_abi::JsonAbi, json_abi::JsonAbi,
primitives::{Address, U256}, primitives::{Address, U256},
}; };
use revive_dt_format::metadata::{ContractIdent, ContractInstance}; use revive_dt_format::metadata::{ContractIdent, ContractInstance};
@@ -10,34 +10,31 @@ use revive_dt_format::metadata::{ContractIdent, ContractInstance};
#[derive(Clone)] #[derive(Clone)]
/// The state associated with the test execution of one of the workloads. /// The state associated with the test execution of one of the workloads.
pub struct ExecutionState { pub struct ExecutionState {
/// The compiled contracts, these contracts have been compiled and have had the libraries linked /// The compiled contracts, these contracts have been compiled and have had the libraries
/// against them and therefore they're ready to be deployed on-demand. /// linked against them and therefore they're ready to be deployed on-demand.
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>, pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
/// A map of all of the deployed contracts and information about them. /// A map of all of the deployed contracts and information about them.
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>, pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
/// This map stores the variables used for each one of the cases contained in the metadata file. /// This map stores the variables used for each one of the cases contained in the metadata
pub variables: HashMap<String, U256>, /// file.
pub variables: HashMap<String, U256>,
} }
impl ExecutionState { impl ExecutionState {
pub fn new( pub fn new(
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>, compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>, deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
) -> Self { ) -> Self {
Self { Self { compiled_contracts, deployed_contracts, variables: Default::default() }
compiled_contracts, }
deployed_contracts,
variables: Default::default(),
}
}
pub fn empty() -> Self { pub fn empty() -> Self {
Self { Self {
compiled_contracts: Default::default(), compiled_contracts: Default::default(),
deployed_contracts: Default::default(), deployed_contracts: Default::default(),
variables: Default::default(), variables: Default::default(),
} }
} }
} }
+148 -166
View File
@@ -6,8 +6,8 @@ use futures::{Stream, StreamExt};
use revive_dt_common::types::PlatformIdentifier; use revive_dt_common::types::PlatformIdentifier;
use revive_dt_node_interaction::MinedBlockInformation; use revive_dt_node_interaction::MinedBlockInformation;
use tokio::sync::{ use tokio::sync::{
RwLock, RwLock,
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel}, mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
}; };
use tracing::{info, instrument}; use tracing::{info, instrument};
@@ -15,193 +15,175 @@ use tracing::{info, instrument};
/// and MUST NOT be re-used between workloads since it holds important internal state for a given /// and MUST NOT be re-used between workloads since it holds important internal state for a given
/// workload and is not designed for reuse. /// workload and is not designed for reuse.
pub struct Watcher { pub struct Watcher {
/// The identifier of the platform that this watcher is for. /// The identifier of the platform that this watcher is for.
platform_identifier: PlatformIdentifier, platform_identifier: PlatformIdentifier,
/// The receive side of the channel that all of the drivers and various other parts of the code /// The receive side of the channel that all of the drivers and various other parts of the code
/// send events to the watcher on. /// send events to the watcher on.
rx: UnboundedReceiver<WatcherEvent>, rx: UnboundedReceiver<WatcherEvent>,
/// This is a stream of the blocks that were mined by the node. This is for a single platform /// This is a stream of the blocks that were mined by the node. This is for a single platform
/// and a single node from that platform. /// and a single node from that platform.
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>, blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
} }
impl Watcher { impl Watcher {
pub fn new( pub fn new(
platform_identifier: PlatformIdentifier, platform_identifier: PlatformIdentifier,
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>, blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
) -> (Self, UnboundedSender<WatcherEvent>) { ) -> (Self, UnboundedSender<WatcherEvent>) {
let (tx, rx) = unbounded_channel::<WatcherEvent>(); let (tx, rx) = unbounded_channel::<WatcherEvent>();
( (Self { platform_identifier, rx, blocks_stream }, tx)
Self { }
platform_identifier,
rx,
blocks_stream,
},
tx,
)
}
#[instrument(level = "info", skip_all)] #[instrument(level = "info", skip_all)]
pub async fn run(mut self) -> Result<()> { pub async fn run(mut self) -> Result<()> {
// The first event that the watcher receives must be a `RepetitionStartEvent` that informs // The first event that the watcher receives must be a `RepetitionStartEvent` that informs
// the watcher of the last block number that it should ignore and what the block number is // the watcher of the last block number that it should ignore and what the block number is
// for the first important block that it should look for. // for the first important block that it should look for.
let ignore_block_before = loop { let ignore_block_before = loop {
let Some(WatcherEvent::RepetitionStartEvent { let Some(WatcherEvent::RepetitionStartEvent { ignore_block_before }) =
ignore_block_before, self.rx.recv().await
}) = self.rx.recv().await else {
else { continue;
continue; };
}; break ignore_block_before;
break ignore_block_before; };
};
// This is the set of the transaction hashes that the watcher should be looking for and // This is the set of the transaction hashes that the watcher should be looking for and
// watch for them in the blocks. The watcher will keep watching for blocks until it sees // watch for them in the blocks. The watcher will keep watching for blocks until it sees
// that all of the transactions that it was watching for has been seen in the mined blocks. // that all of the transactions that it was watching for has been seen in the mined blocks.
let watch_for_transaction_hashes = Arc::new(RwLock::new(HashSet::<TxHash>::new())); let watch_for_transaction_hashes = Arc::new(RwLock::new(HashSet::<TxHash>::new()));
// A boolean that keeps track of whether all of the transactions were submitted or if more // A boolean that keeps track of whether all of the transactions were submitted or if more
// txs are expected to come through the receive side of the channel. We do not want to rely // txs are expected to come through the receive side of the channel. We do not want to rely
// on the channel closing alone for the watcher to know that all of the transactions were // on the channel closing alone for the watcher to know that all of the transactions were
// submitted and for there to be an explicit event sent by the core orchestrator that // submitted and for there to be an explicit event sent by the core orchestrator that
// informs the watcher that no further transactions are to be expected and that it can // informs the watcher that no further transactions are to be expected and that it can
// safely ignore the channel. // safely ignore the channel.
let all_transactions_submitted = Arc::new(RwLock::new(false)); let all_transactions_submitted = Arc::new(RwLock::new(false));
let watcher_event_watching_task = { let watcher_event_watching_task = {
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone(); let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
let all_transactions_submitted = all_transactions_submitted.clone(); let all_transactions_submitted = all_transactions_submitted.clone();
async move { async move {
while let Some(watcher_event) = self.rx.recv().await { while let Some(watcher_event) = self.rx.recv().await {
match watcher_event { match watcher_event {
// Subsequent repetition starts are ignored since certain workloads can // Subsequent repetition starts are ignored since certain workloads can
// contain nested repetitions and therefore there's no use in doing any // contain nested repetitions and therefore there's no use in doing any
// action if the repetitions are nested. // action if the repetitions are nested.
WatcherEvent::RepetitionStartEvent { .. } => {} WatcherEvent::RepetitionStartEvent { .. } => {},
WatcherEvent::SubmittedTransaction { transaction_hash } => { WatcherEvent::SubmittedTransaction { transaction_hash } => {
watch_for_transaction_hashes watch_for_transaction_hashes.write().await.insert(transaction_hash);
.write() },
.await WatcherEvent::AllTransactionsSubmitted => {
.insert(transaction_hash); *all_transactions_submitted.write().await = true;
} self.rx.close();
WatcherEvent::AllTransactionsSubmitted => { info!("Watcher's Events Watching Task Finished");
*all_transactions_submitted.write().await = true; break;
self.rx.close(); },
info!("Watcher's Events Watching Task Finished"); }
break; }
} }
} };
} let block_information_watching_task = {
} let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
}; let all_transactions_submitted = all_transactions_submitted.clone();
let block_information_watching_task = { let mut blocks_information_stream = self.blocks_stream;
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone(); async move {
let all_transactions_submitted = all_transactions_submitted.clone(); let mut mined_blocks_information = Vec::new();
let mut blocks_information_stream = self.blocks_stream;
async move {
let mut mined_blocks_information = Vec::new();
while let Some(block) = blocks_information_stream.next().await { while let Some(block) = blocks_information_stream.next().await {
// If the block number is equal to or less than the last block before the // If the block number is equal to or less than the last block before the
// repetition then we ignore it and continue on to the next block. // repetition then we ignore it and continue on to the next block.
if block.block_number <= ignore_block_before { if block.block_number <= ignore_block_before {
continue; continue;
} }
if *all_transactions_submitted.read().await if *all_transactions_submitted.read().await &&
&& watch_for_transaction_hashes.read().await.is_empty() watch_for_transaction_hashes.read().await.is_empty()
{ {
break; break;
} }
info!( info!(
remaining_transactions = watch_for_transaction_hashes.read().await.len(), remaining_transactions = watch_for_transaction_hashes.read().await.len(),
block_tx_count = block.transaction_hashes.len(), block_tx_count = block.transaction_hashes.len(),
"Observed a block" "Observed a block"
); );
// Remove all of the transaction hashes observed in this block from the txs we // Remove all of the transaction hashes observed in this block from the txs we
// are currently watching for. // are currently watching for.
let mut watch_for_transaction_hashes = let mut watch_for_transaction_hashes =
watch_for_transaction_hashes.write().await; watch_for_transaction_hashes.write().await;
for tx_hash in block.transaction_hashes.iter() { for tx_hash in block.transaction_hashes.iter() {
watch_for_transaction_hashes.remove(tx_hash); watch_for_transaction_hashes.remove(tx_hash);
} }
mined_blocks_information.push(block); mined_blocks_information.push(block);
} }
info!("Watcher's Block Watching Task Finished"); info!("Watcher's Block Watching Task Finished");
mined_blocks_information mined_blocks_information
} }
}; };
let (_, mined_blocks_information) = let (_, mined_blocks_information) =
futures::future::join(watcher_event_watching_task, block_information_watching_task) futures::future::join(watcher_event_watching_task, block_information_watching_task)
.await; .await;
// region:TEMPORARY // region:TEMPORARY
{ {
// TODO: The following core is TEMPORARY and will be removed once we have proper // TODO: The following core is TEMPORARY and will be removed once we have proper
// reporting in place and then it can be removed. This serves as as way of doing some // reporting in place and then it can be removed. This serves as as way of doing some
// very simple reporting for the time being. // very simple reporting for the time being.
use std::io::Write; use std::io::Write;
let mut stderr = std::io::stderr().lock(); let mut stderr = std::io::stderr().lock();
writeln!( writeln!(stderr, "Watcher information for {}", self.platform_identifier)?;
stderr, writeln!(stderr, "block_number,block_timestamp,mined_gas,block_gas_limit,tx_count")?;
"Watcher information for {}", for block in mined_blocks_information {
self.platform_identifier writeln!(
)?; stderr,
writeln!( "{},{},{},{},{}",
stderr, block.block_number,
"block_number,block_timestamp,mined_gas,block_gas_limit,tx_count" block.block_timestamp,
)?; block.mined_gas,
for block in mined_blocks_information { block.block_gas_limit,
writeln!( block.transaction_hashes.len()
stderr, )?
"{},{},{},{},{}", }
block.block_number, }
block.block_timestamp, // endregion:TEMPORARY
block.mined_gas,
block.block_gas_limit,
block.transaction_hashes.len()
)?
}
}
// endregion:TEMPORARY
Ok(()) Ok(())
} }
} }
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum WatcherEvent { pub enum WatcherEvent {
/// Informs the watcher that it should begin watching for the blocks mined by the platforms. /// Informs the watcher that it should begin watching for the blocks mined by the platforms.
/// Before the watcher receives this event it will not be watching for the mined blocks. The /// Before the watcher receives this event it will not be watching for the mined blocks. The
/// reason behind this is that we do not want the initialization transactions (e.g., contract /// reason behind this is that we do not want the initialization transactions (e.g., contract
/// deployments) to be included in the overall TPS and GPS measurements since these blocks will /// deployments) to be included in the overall TPS and GPS measurements since these blocks will
/// most likely only contain a single transaction since they're just being used for /// most likely only contain a single transaction since they're just being used for
/// initialization. /// initialization.
RepetitionStartEvent { RepetitionStartEvent {
/// This is the block number of the last block seen before the repetition started. This is /// This is the block number of the last block seen before the repetition started. This is
/// used to instruct the watcher to ignore all block prior to this block when it starts /// used to instruct the watcher to ignore all block prior to this block when it starts
/// streaming the blocks. /// streaming the blocks.
ignore_block_before: BlockNumber, ignore_block_before: BlockNumber,
}, },
/// Informs the watcher that a transaction was submitted and that the watcher should watch for a /// Informs the watcher that a transaction was submitted and that the watcher should watch for a
/// transaction with this hash in the blocks that it watches. /// transaction with this hash in the blocks that it watches.
SubmittedTransaction { SubmittedTransaction {
/// The hash of the submitted transaction. /// The hash of the submitted transaction.
transaction_hash: TxHash, transaction_hash: TxHash,
}, },
/// Informs the watcher that all of the transactions of this benchmark have been submitted and /// Informs the watcher that all of the transactions of this benchmark have been submitted and
/// that it can expect to receive no further transaction hashes and not even watch the channel /// that it can expect to receive no further transaction hashes and not even watch the channel
/// any longer. /// any longer.
AllTransactionsSubmitted, AllTransactionsSubmitted,
} }
File diff suppressed because it is too large Load Diff
+217 -228
View File
@@ -1,16 +1,16 @@
//! The main entry point into differential testing. //! The main entry point into differential testing.
use std::{ use std::{
collections::{BTreeMap, BTreeSet}, collections::{BTreeMap, BTreeSet},
io::{BufWriter, Write, stderr}, io::{BufWriter, Write, stderr},
sync::Arc, sync::Arc,
time::{Duration, Instant}, time::{Duration, Instant},
}; };
use crate::Platform;
use anyhow::Context as _; use anyhow::Context as _;
use futures::{FutureExt, StreamExt}; use futures::{FutureExt, StreamExt};
use revive_dt_common::types::PrivateKeyAllocator; use revive_dt_common::types::PrivateKeyAllocator;
use revive_dt_core::Platform;
use tokio::sync::{Mutex, RwLock, Semaphore}; use tokio::sync::{Mutex, RwLock, Semaphore};
use tracing::{Instrument, error, info, info_span, instrument}; use tracing::{Instrument, error, info, info_span, instrument};
@@ -18,260 +18,249 @@ use revive_dt_config::{Context, TestExecutionContext};
use revive_dt_report::{Reporter, ReporterEvent, TestCaseStatus}; use revive_dt_report::{Reporter, ReporterEvent, TestCaseStatus};
use crate::{ use crate::{
differential_tests::Driver, differential_tests::Driver,
helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream}, helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream},
}; };
/// Handles the differential testing executing it according to the information defined in the /// Handles the differential testing executing it according to the information defined in the
/// context /// context
#[instrument(level = "info", err(Debug), skip_all)] #[instrument(level = "info", err(Debug), skip_all)]
pub async fn handle_differential_tests( pub async fn handle_differential_tests(
context: TestExecutionContext, context: TestExecutionContext,
reporter: Reporter, reporter: Reporter,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
let reporter_clone = reporter.clone(); let reporter_clone = reporter.clone();
// Discover all of the metadata files that are defined in the context. // Discover all of the metadata files that are defined in the context.
let metadata_files = collect_metadata_files(&context) let metadata_files = collect_metadata_files(&context)
.context("Failed to collect metadata files for differential testing")?; .context("Failed to collect metadata files for differential testing")?;
info!(len = metadata_files.len(), "Discovered metadata files"); info!(len = metadata_files.len(), "Discovered metadata files");
// Discover the list of platforms that the tests should run on based on the context. // Discover the list of platforms that the tests should run on based on the context.
let platforms = context let platforms = context
.platforms .platforms
.iter() .iter()
.copied() .copied()
.map(Into::<&dyn Platform>::into) .map(Into::<&dyn Platform>::into)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
// Starting the nodes of the various platforms specified in the context. // Starting the nodes of the various platforms specified in the context.
let platforms_and_nodes = { let platforms_and_nodes = {
let mut map = BTreeMap::new(); let mut map = BTreeMap::new();
for platform in platforms.iter() { for platform in platforms.iter() {
let platform_identifier = platform.platform_identifier(); let platform_identifier = platform.platform_identifier();
let context = Context::Test(Box::new(context.clone())); let context = Context::Test(Box::new(context.clone()));
let node_pool = NodePool::new(context, *platform) let node_pool = NodePool::new(context, *platform)
.await .await
.inspect_err(|err| { .inspect_err(|err| {
error!( error!(
?err, ?err,
%platform_identifier, %platform_identifier,
"Failed to initialize the node pool for the platform." "Failed to initialize the node pool for the platform."
) )
}) })
.context("Failed to initialize the node pool")?; .context("Failed to initialize the node pool")?;
map.insert(platform_identifier, (*platform, node_pool)); map.insert(platform_identifier, (*platform, node_pool));
} }
map map
}; };
info!("Spawned the platform nodes"); info!("Spawned the platform nodes");
// Preparing test definitions. // Preparing test definitions.
let full_context = Context::Test(Box::new(context.clone())); let full_context = Context::Test(Box::new(context.clone()));
let test_definitions = create_test_definitions_stream( let test_definitions = create_test_definitions_stream(
&full_context, &full_context,
metadata_files.iter(), metadata_files.iter(),
&platforms_and_nodes, &platforms_and_nodes,
reporter.clone(), reporter.clone(),
) )
.await .await
.collect::<Vec<_>>() .collect::<Vec<_>>()
.await; .await;
info!(len = test_definitions.len(), "Created test definitions"); info!(len = test_definitions.len(), "Created test definitions");
// Creating everything else required for the driver to run. // Creating everything else required for the driver to run.
let cached_compiler = CachedCompiler::new( let cached_compiler = CachedCompiler::new(
context context.working_directory.as_path().join("compilation_cache"),
.working_directory context.compilation_configuration.invalidate_compilation_cache,
.as_path() )
.join("compilation_cache"), .await
context .map(Arc::new)
.compilation_configuration .context("Failed to initialize cached compiler")?;
.invalidate_compilation_cache, let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
) context.wallet_configuration.highest_private_key_exclusive(),
.await )));
.map(Arc::new)
.context("Failed to initialize cached compiler")?;
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
context.wallet_configuration.highest_private_key_exclusive(),
)));
// Creating the driver and executing all of the steps. // Creating the driver and executing all of the steps.
let semaphore = context let semaphore = context
.concurrency_configuration .concurrency_configuration
.concurrency_limit() .concurrency_limit()
.map(Semaphore::new) .map(Semaphore::new)
.map(Arc::new); .map(Arc::new);
let running_task_list = Arc::new(RwLock::new(BTreeSet::<usize>::new())); let running_task_list = Arc::new(RwLock::new(BTreeSet::<usize>::new()));
let driver_task = futures::future::join_all(test_definitions.iter().enumerate().map( let driver_task = futures::future::join_all(test_definitions.iter().enumerate().map(
|(test_id, test_definition)| { |(test_id, test_definition)| {
let running_task_list = running_task_list.clone(); let running_task_list = running_task_list.clone();
let semaphore = semaphore.clone(); let semaphore = semaphore.clone();
let private_key_allocator = private_key_allocator.clone(); let private_key_allocator = private_key_allocator.clone();
let cached_compiler = cached_compiler.clone(); let cached_compiler = cached_compiler.clone();
let mode = test_definition.mode.clone(); let mode = test_definition.mode.clone();
let span = info_span!( let span = info_span!(
"Executing Test Case", "Executing Test Case",
test_id, test_id,
metadata_file_path = %test_definition.metadata_file_path.display(), metadata_file_path = %test_definition.metadata_file_path.display(),
case_idx = %test_definition.case_idx, case_idx = %test_definition.case_idx,
mode = %mode, mode = %mode,
); );
async move { async move {
let permit = match semaphore.as_ref() { let permit = match semaphore.as_ref() {
Some(semaphore) => Some(semaphore.acquire().await.expect("Can't fail")), Some(semaphore) => Some(semaphore.acquire().await.expect("Can't fail")),
None => None, None => None,
}; };
running_task_list.write().await.insert(test_id); running_task_list.write().await.insert(test_id);
let driver = match Driver::new_root( let driver = match Driver::new_root(
test_definition, test_definition,
private_key_allocator, private_key_allocator,
&cached_compiler, &cached_compiler,
) )
.await .await
{ {
Ok(driver) => driver, Ok(driver) => driver,
Err(error) => { Err(error) => {
test_definition test_definition
.reporter .reporter
.report_test_failed_event(format!("{error:#}")) .report_test_failed_event(format!("{error:#}"))
.expect("Can't fail"); .expect("Can't fail");
error!("Test Case Failed"); error!("Test Case Failed");
drop(permit); drop(permit);
running_task_list.write().await.remove(&test_id); running_task_list.write().await.remove(&test_id);
return; return;
} },
}; };
info!("Created the driver for the test case"); info!("Created the driver for the test case");
match driver.execute_all().await { match driver.execute_all().await {
Ok(steps_executed) => test_definition Ok(steps_executed) => test_definition
.reporter .reporter
.report_test_succeeded_event(steps_executed) .report_test_succeeded_event(steps_executed)
.expect("Can't fail"), .expect("Can't fail"),
Err(error) => { Err(error) => {
test_definition test_definition
.reporter .reporter
.report_test_failed_event(format!("{error:#}")) .report_test_failed_event(format!("{error:#}"))
.expect("Can't fail"); .expect("Can't fail");
error!("Test Case Failed"); error!("Test Case Failed");
} },
}; };
info!("Finished the execution of the test case"); info!("Finished the execution of the test case");
drop(permit); drop(permit);
running_task_list.write().await.remove(&test_id); running_task_list.write().await.remove(&test_id);
} }
.instrument(span) .instrument(span)
}, },
)) ))
.inspect(|_| { .inspect(|_| {
info!("Finished executing all test cases"); info!("Finished executing all test cases");
reporter_clone reporter_clone.report_completion_event().expect("Can't fail")
.report_completion_event() });
.expect("Can't fail") let cli_reporting_task = start_cli_reporting_task(reporter);
});
let cli_reporting_task = start_cli_reporting_task(reporter);
tokio::task::spawn(async move { tokio::task::spawn(async move {
loop { loop {
let remaining_tasks = running_task_list.read().await; let remaining_tasks = running_task_list.read().await;
info!( info!(count = remaining_tasks.len(), ?remaining_tasks, "Remaining Tests");
count = remaining_tasks.len(), tokio::time::sleep(Duration::from_secs(10)).await
?remaining_tasks, }
"Remaining Tests" });
);
tokio::time::sleep(Duration::from_secs(10)).await
}
});
futures::future::join(driver_task, cli_reporting_task).await; futures::future::join(driver_task, cli_reporting_task).await;
Ok(()) Ok(())
} }
#[allow(irrefutable_let_patterns, clippy::uninlined_format_args)] #[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
async fn start_cli_reporting_task(reporter: Reporter) { async fn start_cli_reporting_task(reporter: Reporter) {
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail"); let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
drop(reporter); drop(reporter);
let start = Instant::now(); let start = Instant::now();
const GREEN: &str = "\x1B[32m"; const GREEN: &str = "\x1B[32m";
const RED: &str = "\x1B[31m"; const RED: &str = "\x1B[31m";
const GREY: &str = "\x1B[90m"; const GREY: &str = "\x1B[90m";
const COLOR_RESET: &str = "\x1B[0m"; const COLOR_RESET: &str = "\x1B[0m";
const BOLD: &str = "\x1B[1m"; const BOLD: &str = "\x1B[1m";
const BOLD_RESET: &str = "\x1B[22m"; const BOLD_RESET: &str = "\x1B[22m";
let mut number_of_successes = 0; let mut number_of_successes = 0;
let mut number_of_failures = 0; let mut number_of_failures = 0;
let mut buf = BufWriter::new(stderr()); let mut buf = BufWriter::new(stderr());
while let Ok(event) = aggregator_events_rx.recv().await { while let Ok(event) = aggregator_events_rx.recv().await {
let ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted { let ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
metadata_file_path, metadata_file_path,
mode, mode,
case_status, case_status,
} = event } = event
else { else {
continue; continue;
}; };
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display()); let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
for (case_idx, case_status) in case_status.into_iter() { for (case_idx, case_status) in case_status.into_iter() {
let _ = write!(buf, "\tCase Index {case_idx:>3}: "); let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
let _ = match case_status { let _ = match case_status {
TestCaseStatus::Succeeded { steps_executed } => { TestCaseStatus::Succeeded { steps_executed } => {
number_of_successes += 1; number_of_successes += 1;
writeln!( writeln!(
buf, buf,
"{}{}Case Succeeded{} - Steps Executed: {}{}", "{}{}Case Succeeded{} - Steps Executed: {}{}",
GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET
) )
} },
TestCaseStatus::Failed { reason } => { TestCaseStatus::Failed { reason } => {
number_of_failures += 1; number_of_failures += 1;
writeln!( writeln!(
buf, buf,
"{}{}Case Failed{} - Reason: {}{}", "{}{}Case Failed{} - Reason: {}{}",
RED, RED,
BOLD, BOLD,
BOLD_RESET, BOLD_RESET,
reason.trim(), reason.trim(),
COLOR_RESET, COLOR_RESET,
) )
} },
TestCaseStatus::Ignored { reason, .. } => writeln!( TestCaseStatus::Ignored { reason, .. } => writeln!(
buf, buf,
"{}{}Case Ignored{} - Reason: {}{}", "{}{}Case Ignored{} - Reason: {}{}",
GREY, GREY,
BOLD, BOLD,
BOLD_RESET, BOLD_RESET,
reason.trim(), reason.trim(),
COLOR_RESET, COLOR_RESET,
), ),
}; };
} }
let _ = writeln!(buf); let _ = writeln!(buf);
} }
// Summary at the end. // Summary at the end.
let _ = writeln!( let _ = writeln!(
buf, buf,
"{} cases: {}{}{} cases succeeded, {}{}{} cases failed in {} seconds", "{} cases: {}{}{} cases succeeded, {}{}{} cases failed in {} seconds",
number_of_successes + number_of_failures, number_of_successes + number_of_failures,
GREEN, GREEN,
number_of_successes, number_of_successes,
COLOR_RESET, COLOR_RESET,
RED, RED,
number_of_failures, number_of_failures,
COLOR_RESET, COLOR_RESET,
start.elapsed().as_secs() start.elapsed().as_secs()
); );
} }
@@ -1,8 +1,8 @@
use std::{collections::HashMap, path::PathBuf}; use std::{collections::HashMap, path::PathBuf};
use alloy::{ use alloy::{
json_abi::JsonAbi, json_abi::JsonAbi,
primitives::{Address, U256}, primitives::{Address, U256},
}; };
use revive_dt_format::metadata::{ContractIdent, ContractInstance}; use revive_dt_format::metadata::{ContractIdent, ContractInstance};
@@ -10,26 +10,23 @@ use revive_dt_format::metadata::{ContractIdent, ContractInstance};
#[derive(Clone)] #[derive(Clone)]
/// The state associated with the test execution of one of the tests. /// The state associated with the test execution of one of the tests.
pub struct ExecutionState { pub struct ExecutionState {
/// The compiled contracts, these contracts have been compiled and have had the libraries linked /// The compiled contracts, these contracts have been compiled and have had the libraries
/// against them and therefore they're ready to be deployed on-demand. /// linked against them and therefore they're ready to be deployed on-demand.
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>, pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
/// A map of all of the deployed contracts and information about them. /// A map of all of the deployed contracts and information about them.
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>, pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
/// This map stores the variables used for each one of the cases contained in the metadata file. /// This map stores the variables used for each one of the cases contained in the metadata
pub variables: HashMap<String, U256>, /// file.
pub variables: HashMap<String, U256>,
} }
impl ExecutionState { impl ExecutionState {
pub fn new( pub fn new(
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>, compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>, deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
) -> Self { ) -> Self {
Self { Self { compiled_contracts, deployed_contracts, variables: Default::default() }
compiled_contracts, }
deployed_contracts,
variables: Default::default(),
}
}
} }
+291 -302
View File
@@ -2,16 +2,16 @@
//! be reused between runs. //! be reused between runs.
use std::{ use std::{
borrow::Cow, borrow::Cow,
collections::HashMap, collections::HashMap,
path::{Path, PathBuf}, path::{Path, PathBuf},
sync::{Arc, LazyLock}, sync::{Arc, LazyLock},
}; };
use crate::Platform;
use futures::FutureExt; use futures::FutureExt;
use revive_dt_common::{iterators::FilesWithExtensionIterator, types::CompilerIdentifier}; use revive_dt_common::{iterators::FilesWithExtensionIterator, types::CompilerIdentifier};
use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler}; use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler};
use revive_dt_core::Platform;
use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata}; use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata};
use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address}; use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address};
@@ -23,33 +23,30 @@ use tokio::sync::{Mutex, RwLock, Semaphore};
use tracing::{Instrument, debug, debug_span, instrument}; use tracing::{Instrument, debug, debug_span, instrument};
pub struct CachedCompiler<'a> { pub struct CachedCompiler<'a> {
/// The cache that stores the compiled contracts. /// The cache that stores the compiled contracts.
artifacts_cache: ArtifactsCache, artifacts_cache: ArtifactsCache,
/// This is a mechanism that the cached compiler uses so that if multiple compilation requests /// This is a mechanism that the cached compiler uses so that if multiple compilation requests
/// come in for the same contract we never compile all of them and only compile it once and all /// come in for the same contract we never compile all of them and only compile it once and all
/// other tasks that request this same compilation concurrently get the cached version. /// other tasks that request this same compilation concurrently get the cached version.
cache_key_lock: RwLock<HashMap<CacheKey<'a>, Arc<Mutex<()>>>>, cache_key_lock: RwLock<HashMap<CacheKey<'a>, Arc<Mutex<()>>>>,
} }
impl<'a> CachedCompiler<'a> { impl<'a> CachedCompiler<'a> {
pub async fn new(path: impl AsRef<Path>, invalidate_cache: bool) -> Result<Self> { pub async fn new(path: impl AsRef<Path>, invalidate_cache: bool) -> Result<Self> {
let mut cache = ArtifactsCache::new(path); let mut cache = ArtifactsCache::new(path);
if invalidate_cache { if invalidate_cache {
cache = cache cache = cache
.with_invalidated_cache() .with_invalidated_cache()
.await .await
.context("Failed to invalidate compilation cache directory")?; .context("Failed to invalidate compilation cache directory")?;
} }
Ok(Self { Ok(Self { artifacts_cache: cache, cache_key_lock: Default::default() })
artifacts_cache: cache, }
cache_key_lock: Default::default(),
})
}
/// Compiles or gets the compilation artifacts from the cache. /// Compiles or gets the compilation artifacts from the cache.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
#[instrument( #[instrument(
level = "debug", level = "debug",
skip_all, skip_all,
fields( fields(
@@ -59,317 +56,309 @@ impl<'a> CachedCompiler<'a> {
), ),
err err
)] )]
pub async fn compile_contracts( pub async fn compile_contracts(
&self, &self,
metadata: &'a Metadata, metadata: &'a Metadata,
metadata_file_path: &'a Path, metadata_file_path: &'a Path,
mode: Cow<'a, Mode>, mode: Cow<'a, Mode>,
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>, deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
compiler: &dyn SolidityCompiler, compiler: &dyn SolidityCompiler,
platform: &dyn Platform, platform: &dyn Platform,
reporter: &ExecutionSpecificReporter, reporter: &ExecutionSpecificReporter,
) -> Result<CompilerOutput> { ) -> Result<CompilerOutput> {
let cache_key = CacheKey { let cache_key = CacheKey {
compiler_identifier: platform.compiler_identifier(), compiler_identifier: platform.compiler_identifier(),
compiler_version: compiler.version().clone(), compiler_version: compiler.version().clone(),
metadata_file_path, metadata_file_path,
solc_mode: mode.clone(), solc_mode: mode.clone(),
}; };
let compilation_callback = || { let compilation_callback = || {
async move { async move {
compile_contracts( compile_contracts(
metadata metadata
.directory() .directory()
.context("Failed to get metadata directory while preparing compilation")?, .context("Failed to get metadata directory while preparing compilation")?,
metadata metadata
.files_to_compile() .files_to_compile()
.context("Failed to enumerate files to compile from metadata")?, .context("Failed to enumerate files to compile from metadata")?,
&mode, &mode,
deployed_libraries, deployed_libraries,
compiler, compiler,
reporter, reporter,
) )
.map(|compilation_result| compilation_result.map(CacheValue::new)) .map(|compilation_result| compilation_result.map(CacheValue::new))
.await .await
} }
.instrument(debug_span!( .instrument(debug_span!(
"Running compilation for the cache key", "Running compilation for the cache key",
cache_key.compiler_identifier = %cache_key.compiler_identifier, cache_key.compiler_identifier = %cache_key.compiler_identifier,
cache_key.compiler_version = %cache_key.compiler_version, cache_key.compiler_version = %cache_key.compiler_version,
cache_key.metadata_file_path = %cache_key.metadata_file_path.display(), cache_key.metadata_file_path = %cache_key.metadata_file_path.display(),
cache_key.solc_mode = %cache_key.solc_mode, cache_key.solc_mode = %cache_key.solc_mode,
)) ))
}; };
let compiled_contracts = match deployed_libraries { let compiled_contracts = match deployed_libraries {
// If deployed libraries have been specified then we will re-compile the contract as it // If deployed libraries have been specified then we will re-compile the contract as it
// means that linking is required in this case. // means that linking is required in this case.
Some(_) => { Some(_) => {
debug!("Deployed libraries defined, recompilation must take place"); debug!("Deployed libraries defined, recompilation must take place");
debug!("Cache miss"); debug!("Cache miss");
compilation_callback() compilation_callback()
.await .await
.context("Compilation callback for deployed libraries failed")? .context("Compilation callback for deployed libraries failed")?
.compiler_output .compiler_output
} },
// If no deployed libraries are specified then we can follow the cached flow and attempt // If no deployed libraries are specified then we can follow the cached flow and attempt
// to lookup the compilation artifacts in the cache. // to lookup the compilation artifacts in the cache.
None => { None => {
debug!("Deployed libraries undefined, attempting to make use of cache"); debug!("Deployed libraries undefined, attempting to make use of cache");
// Lock this specific cache key such that we do not get inconsistent state. We want // Lock this specific cache key such that we do not get inconsistent state. We want
// that when multiple cases come in asking for the compilation artifacts then they // that when multiple cases come in asking for the compilation artifacts then they
// don't all trigger a compilation if there's a cache miss. Hence, the lock here. // don't all trigger a compilation if there's a cache miss. Hence, the lock here.
let read_guard = self.cache_key_lock.read().await; let read_guard = self.cache_key_lock.read().await;
let mutex = match read_guard.get(&cache_key).cloned() { let mutex = match read_guard.get(&cache_key).cloned() {
Some(value) => { Some(value) => {
drop(read_guard); drop(read_guard);
value value
} },
None => { None => {
drop(read_guard); drop(read_guard);
self.cache_key_lock self.cache_key_lock
.write() .write()
.await .await
.entry(cache_key.clone()) .entry(cache_key.clone())
.or_default() .or_default()
.clone() .clone()
} },
}; };
let _guard = mutex.lock().await; let _guard = mutex.lock().await;
match self.artifacts_cache.get(&cache_key).await { match self.artifacts_cache.get(&cache_key).await {
Some(cache_value) => { Some(cache_value) => {
if deployed_libraries.is_some() { if deployed_libraries.is_some() {
reporter reporter
.report_post_link_contracts_compilation_succeeded_event( .report_post_link_contracts_compilation_succeeded_event(
compiler.version().clone(), compiler.version().clone(),
compiler.path(), compiler.path(),
true, true,
None, None,
cache_value.compiler_output.clone(), cache_value.compiler_output.clone(),
) )
.expect("Can't happen"); .expect("Can't happen");
} else { } else {
reporter reporter
.report_pre_link_contracts_compilation_succeeded_event( .report_pre_link_contracts_compilation_succeeded_event(
compiler.version().clone(), compiler.version().clone(),
compiler.path(), compiler.path(),
true, true,
None, None,
cache_value.compiler_output.clone(), cache_value.compiler_output.clone(),
) )
.expect("Can't happen"); .expect("Can't happen");
} }
cache_value.compiler_output cache_value.compiler_output
} },
None => { None => {
let compiler_output = compilation_callback() let compiler_output = compilation_callback()
.await .await
.context("Compilation callback failed (cache miss path)")? .context("Compilation callback failed (cache miss path)")?
.compiler_output; .compiler_output;
self.artifacts_cache self.artifacts_cache
.insert( .insert(
&cache_key, &cache_key,
&CacheValue { &CacheValue { compiler_output: compiler_output.clone() },
compiler_output: compiler_output.clone(), )
}, .await
) .context(
.await "Failed to write the cached value of the compilation artifacts",
.context( )?;
"Failed to write the cached value of the compilation artifacts", compiler_output
)?; },
compiler_output }
} },
} };
}
};
Ok(compiled_contracts) Ok(compiled_contracts)
} }
} }
async fn compile_contracts( async fn compile_contracts(
metadata_directory: impl AsRef<Path>, metadata_directory: impl AsRef<Path>,
mut files_to_compile: impl Iterator<Item = PathBuf>, mut files_to_compile: impl Iterator<Item = PathBuf>,
mode: &Mode, mode: &Mode,
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>, deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
compiler: &dyn SolidityCompiler, compiler: &dyn SolidityCompiler,
reporter: &ExecutionSpecificReporter, reporter: &ExecutionSpecificReporter,
) -> Result<CompilerOutput> { ) -> Result<CompilerOutput> {
// Puts a limit on how many compilations we can perform at any given instance which helps us // Puts a limit on how many compilations we can perform at any given instance which helps us
// with some of the errors we've been seeing with high concurrency on MacOS (we have not tried // with some of the errors we've been seeing with high concurrency on MacOS (we have not tried
// it on Linux so we don't know if these issues also persist there or not.) // it on Linux so we don't know if these issues also persist there or not.)
static SPAWN_GATE: LazyLock<Semaphore> = LazyLock::new(|| Semaphore::new(5)); static SPAWN_GATE: LazyLock<Semaphore> = LazyLock::new(|| Semaphore::new(5));
let _permit = SPAWN_GATE.acquire().await?; let _permit = SPAWN_GATE.acquire().await?;
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref()) let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
.with_allowed_extension("sol") .with_allowed_extension("sol")
.with_use_cached_fs(true) .with_use_cached_fs(true)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let compilation = Compiler::new() let compilation = Compiler::new()
.with_allow_path(metadata_directory) .with_allow_path(metadata_directory)
// Handling the modes // Handling the modes
.with_optimization(mode.optimize_setting) .with_optimization(mode.optimize_setting)
.with_pipeline(mode.pipeline) .with_pipeline(mode.pipeline)
// Adding the contract sources to the compiler. // Adding the contract sources to the compiler.
.try_then(|compiler| { .try_then(|compiler| {
files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path)) files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path))
})? })?
// Adding the deployed libraries to the compiler. // Adding the deployed libraries to the compiler.
.then(|compiler| { .then(|compiler| {
deployed_libraries deployed_libraries
.iter() .iter()
.flat_map(|value| value.iter()) .flat_map(|value| value.iter())
.map(|(instance, (ident, address, abi))| (instance, ident, address, abi)) .map(|(instance, (ident, address, abi))| (instance, ident, address, abi))
.flat_map(|(_, ident, address, _)| { .flat_map(|(_, ident, address, _)| {
all_sources_in_dir all_sources_in_dir.iter().map(move |path| (ident, address, path))
.iter() })
.map(move |path| (ident, address, path)) .fold(compiler, |compiler, (ident, address, path)| {
}) compiler.with_library(path, ident.as_str(), *address)
.fold(compiler, |compiler, (ident, address, path)| { })
compiler.with_library(path, ident.as_str(), *address) });
})
});
let input = compilation.input().clone(); let input = compilation.input().clone();
let output = compilation.try_build(compiler).await; let output = compilation.try_build(compiler).await;
match (output.as_ref(), deployed_libraries.is_some()) { match (output.as_ref(), deployed_libraries.is_some()) {
(Ok(output), true) => { (Ok(output), true) => {
reporter reporter
.report_post_link_contracts_compilation_succeeded_event( .report_post_link_contracts_compilation_succeeded_event(
compiler.version().clone(), compiler.version().clone(),
compiler.path(), compiler.path(),
false, false,
input, input,
output.clone(), output.clone(),
) )
.expect("Can't happen"); .expect("Can't happen");
} },
(Ok(output), false) => { (Ok(output), false) => {
reporter reporter
.report_pre_link_contracts_compilation_succeeded_event( .report_pre_link_contracts_compilation_succeeded_event(
compiler.version().clone(), compiler.version().clone(),
compiler.path(), compiler.path(),
false, false,
input, input,
output.clone(), output.clone(),
) )
.expect("Can't happen"); .expect("Can't happen");
} },
(Err(err), true) => { (Err(err), true) => {
reporter reporter
.report_post_link_contracts_compilation_failed_event( .report_post_link_contracts_compilation_failed_event(
compiler.version().clone(), compiler.version().clone(),
compiler.path().to_path_buf(), compiler.path().to_path_buf(),
input, input,
format!("{err:#}"), format!("{err:#}"),
) )
.expect("Can't happen"); .expect("Can't happen");
} },
(Err(err), false) => { (Err(err), false) => {
reporter reporter
.report_pre_link_contracts_compilation_failed_event( .report_pre_link_contracts_compilation_failed_event(
compiler.version().clone(), compiler.version().clone(),
compiler.path().to_path_buf(), compiler.path().to_path_buf(),
input, input,
format!("{err:#}"), format!("{err:#}"),
) )
.expect("Can't happen"); .expect("Can't happen");
} },
} }
output output
} }
struct ArtifactsCache { struct ArtifactsCache {
path: PathBuf, path: PathBuf,
} }
impl ArtifactsCache { impl ArtifactsCache {
pub fn new(path: impl AsRef<Path>) -> Self { pub fn new(path: impl AsRef<Path>) -> Self {
Self { Self { path: path.as_ref().to_path_buf() }
path: path.as_ref().to_path_buf(), }
}
}
#[instrument(level = "debug", skip_all, err)] #[instrument(level = "debug", skip_all, err)]
pub async fn with_invalidated_cache(self) -> Result<Self> { pub async fn with_invalidated_cache(self) -> Result<Self> {
cacache::clear(self.path.as_path()) cacache::clear(self.path.as_path())
.await .await
.map_err(Into::<Error>::into) .map_err(Into::<Error>::into)
.with_context(|| format!("Failed to clear cache at {}", self.path.display()))?; .with_context(|| format!("Failed to clear cache at {}", self.path.display()))?;
Ok(self) Ok(self)
} }
#[instrument(level = "debug", skip_all, err)] #[instrument(level = "debug", skip_all, err)]
pub async fn insert(&self, key: &CacheKey<'_>, value: &CacheValue) -> Result<()> { pub async fn insert(&self, key: &CacheKey<'_>, value: &CacheValue) -> Result<()> {
let key = bson::to_vec(key).context("Failed to serialize cache key (bson)")?; let key = bson::to_vec(key).context("Failed to serialize cache key (bson)")?;
let value = bson::to_vec(value).context("Failed to serialize cache value (bson)")?; let value = bson::to_vec(value).context("Failed to serialize cache value (bson)")?;
cacache::write(self.path.as_path(), key.encode_hex(), value) cacache::write(self.path.as_path(), key.encode_hex(), value)
.await .await
.with_context(|| { .with_context(|| {
format!("Failed to write cache entry under {}", self.path.display()) format!("Failed to write cache entry under {}", self.path.display())
})?; })?;
Ok(()) Ok(())
} }
pub async fn get(&self, key: &CacheKey<'_>) -> Option<CacheValue> { pub async fn get(&self, key: &CacheKey<'_>) -> Option<CacheValue> {
let key = bson::to_vec(key).ok()?; let key = bson::to_vec(key).ok()?;
let value = cacache::read(self.path.as_path(), key.encode_hex()) let value = cacache::read(self.path.as_path(), key.encode_hex()).await.ok()?;
.await let value = bson::from_slice::<CacheValue>(&value).ok()?;
.ok()?; Some(value)
let value = bson::from_slice::<CacheValue>(&value).ok()?; }
Some(value)
}
#[instrument(level = "debug", skip_all, err)] #[instrument(level = "debug", skip_all, err)]
pub async fn get_or_insert_with( pub async fn get_or_insert_with(
&self, &self,
key: &CacheKey<'_>, key: &CacheKey<'_>,
callback: impl AsyncFnOnce() -> Result<CacheValue>, callback: impl AsyncFnOnce() -> Result<CacheValue>,
) -> Result<CacheValue> { ) -> Result<CacheValue> {
match self.get(key).await { match self.get(key).await {
Some(value) => { Some(value) => {
debug!("Cache hit"); debug!("Cache hit");
Ok(value) Ok(value)
} },
None => { None => {
debug!("Cache miss"); debug!("Cache miss");
let value = callback().await?; let value = callback().await?;
self.insert(key, &value).await?; self.insert(key, &value).await?;
Ok(value) Ok(value)
} },
} }
} }
} }
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)] #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)]
struct CacheKey<'a> { struct CacheKey<'a> {
/// The identifier of the used compiler. /// The identifier of the used compiler.
compiler_identifier: CompilerIdentifier, compiler_identifier: CompilerIdentifier,
/// The version of the compiler that was used to compile the artifacts. /// The version of the compiler that was used to compile the artifacts.
compiler_version: Version, compiler_version: Version,
/// The path of the metadata file that the compilation artifacts are for. /// The path of the metadata file that the compilation artifacts are for.
metadata_file_path: &'a Path, metadata_file_path: &'a Path,
/// The mode that the compilation artifacts where compiled with. /// The mode that the compilation artifacts where compiled with.
solc_mode: Cow<'a, Mode>, solc_mode: Cow<'a, Mode>,
} }
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
struct CacheValue { struct CacheValue {
/// The compiler output from the compilation run. /// The compiler output from the compilation run.
compiler_output: CompilerOutput, compiler_output: CompilerOutput,
} }
impl CacheValue { impl CacheValue {
pub fn new(compiler_output: CompilerOutput) -> Self { pub fn new(compiler_output: CompilerOutput) -> Self {
Self { compiler_output } Self { compiler_output }
} }
} }
+19 -19
View File
@@ -6,28 +6,28 @@ use tracing::{info, info_span, instrument};
/// corpus files and produces a map containing all of the [`MetadataFile`]s discovered. /// corpus files and produces a map containing all of the [`MetadataFile`]s discovered.
#[instrument(level = "debug", name = "Collecting Corpora", skip_all)] #[instrument(level = "debug", name = "Collecting Corpora", skip_all)]
pub fn collect_metadata_files( pub fn collect_metadata_files(
context: impl AsRef<CorpusConfiguration>, context: impl AsRef<CorpusConfiguration>,
) -> anyhow::Result<Vec<MetadataFile>> { ) -> anyhow::Result<Vec<MetadataFile>> {
let mut metadata_files = Vec::new(); let mut metadata_files = Vec::new();
let corpus_configuration = AsRef::<CorpusConfiguration>::as_ref(&context); let corpus_configuration = AsRef::<CorpusConfiguration>::as_ref(&context);
for path in &corpus_configuration.paths { for path in &corpus_configuration.paths {
let span = info_span!("Processing corpus file", path = %path.display()); let span = info_span!("Processing corpus file", path = %path.display());
let _guard = span.enter(); let _guard = span.enter();
let corpus = Corpus::try_from_path(path)?; let corpus = Corpus::try_from_path(path)?;
info!( info!(
name = corpus.name(), name = corpus.name(),
number_of_contained_paths = corpus.path_count(), number_of_contained_paths = corpus.path_count(),
"Deserialized corpus file" "Deserialized corpus file"
); );
metadata_files.extend(corpus.enumerate_tests()); metadata_files.extend(corpus.enumerate_tests());
} }
// There's a possibility that there are certain paths that all lead to the same metadata files // There's a possibility that there are certain paths that all lead to the same metadata files
// and therefore it's important that we sort them and then deduplicate them. // and therefore it's important that we sort them and then deduplicate them.
metadata_files.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path)); metadata_files.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
metadata_files.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path); metadata_files.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
Ok(metadata_files) Ok(metadata_files)
} }
+34 -39
View File
@@ -2,58 +2,53 @@
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use crate::Platform;
use anyhow::Context as _; use anyhow::Context as _;
use revive_dt_config::*; use revive_dt_config::*;
use revive_dt_core::Platform;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
/// The node pool starts one or more [Node] which then can be accessed /// The node pool starts one or more [Node] which then can be accessed
/// in a round robbin fashion. /// in a round robbin fashion.
pub struct NodePool { pub struct NodePool {
next: AtomicUsize, next: AtomicUsize,
nodes: Vec<Box<dyn EthereumNode + Send + Sync>>, nodes: Vec<Box<dyn EthereumNode + Send + Sync>>,
} }
impl NodePool { impl NodePool {
/// Create a new Pool. This will start as many nodes as there are workers in `config`. /// Create a new Pool. This will start as many nodes as there are workers in `config`.
pub async fn new(context: Context, platform: &dyn Platform) -> anyhow::Result<Self> { pub async fn new(context: Context, platform: &dyn Platform) -> anyhow::Result<Self> {
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context); let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
let nodes = concurrency_configuration.number_of_nodes; let nodes = concurrency_configuration.number_of_nodes;
let mut handles = Vec::with_capacity(nodes); let mut handles = Vec::with_capacity(nodes);
for _ in 0..nodes { for _ in 0..nodes {
let context = context.clone(); let context = context.clone();
handles.push(platform.new_node(context)?); handles.push(platform.new_node(context)?);
} }
let mut nodes = Vec::with_capacity(nodes); let mut nodes = Vec::with_capacity(nodes);
for handle in handles { for handle in handles {
nodes.push( nodes.push(
handle handle
.join() .join()
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error)) .map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))
.context("Failed to join node spawn thread")? .context("Failed to join node spawn thread")?
.context("Node failed to spawn")?, .context("Node failed to spawn")?,
); );
} }
let pre_transactions_tasks = nodes let pre_transactions_tasks =
.iter_mut() nodes.iter_mut().map(|node| node.pre_transactions()).collect::<Vec<_>>();
.map(|node| node.pre_transactions()) futures::future::try_join_all(pre_transactions_tasks)
.collect::<Vec<_>>(); .await
futures::future::try_join_all(pre_transactions_tasks) .context("Failed to run the pre-transactions task")?;
.await
.context("Failed to run the pre-transactions task")?;
Ok(Self { Ok(Self { nodes, next: Default::default() })
nodes, }
next: Default::default(),
})
}
/// Get a handle to the next node. /// Get a handle to the next node.
pub fn round_robbin(&self) -> &dyn EthereumNode { pub fn round_robbin(&self) -> &dyn EthereumNode {
let current = self.next.fetch_add(1, Ordering::SeqCst) % self.nodes.len(); let current = self.next.fetch_add(1, Ordering::SeqCst) % self.nodes.len();
self.nodes.get(current).unwrap().as_ref() self.nodes.get(current).unwrap().as_ref()
} }
} }
+252 -271
View File
@@ -1,178 +1,163 @@
use std::collections::BTreeMap; use std::{borrow::Cow, collections::BTreeMap, path::Path, sync::Arc};
use std::sync::Arc;
use std::{borrow::Cow, path::Path};
use futures::{Stream, StreamExt, stream}; use futures::{Stream, StreamExt, stream};
use indexmap::{IndexMap, indexmap}; use indexmap::{IndexMap, indexmap};
use revive_dt_common::iterators::EitherIter; use revive_dt_common::{iterators::EitherIter, types::PlatformIdentifier};
use revive_dt_common::types::PlatformIdentifier;
use revive_dt_config::Context; use revive_dt_config::Context;
use revive_dt_format::mode::ParsedMode; use revive_dt_format::mode::ParsedMode;
use serde_json::{Value, json}; use serde_json::{Value, json};
use revive_dt_compiler::Mode; use revive_dt_compiler::{Mode, SolidityCompiler};
use revive_dt_compiler::SolidityCompiler;
use revive_dt_format::{ use revive_dt_format::{
case::{Case, CaseIdx}, case::{Case, CaseIdx},
metadata::MetadataFile, metadata::MetadataFile,
}; };
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
use revive_dt_report::{ExecutionSpecificReporter, Reporter}; use revive_dt_report::{ExecutionSpecificReporter, Reporter, TestSpecificReporter, TestSpecifier};
use revive_dt_report::{TestSpecificReporter, TestSpecifier};
use tracing::{debug, error, info}; use tracing::{debug, error, info};
use crate::Platform; use crate::{Platform, helpers::NodePool};
use crate::helpers::NodePool;
pub async fn create_test_definitions_stream<'a>( pub async fn create_test_definitions_stream<'a>(
// This is only required for creating the compiler objects and is not used anywhere else in the // This is only required for creating the compiler objects and is not used anywhere else in the
// function. // function.
context: &Context, context: &Context,
metadata_files: impl IntoIterator<Item = &'a MetadataFile>, metadata_files: impl IntoIterator<Item = &'a MetadataFile>,
platforms_and_nodes: &'a BTreeMap<PlatformIdentifier, (&dyn Platform, NodePool)>, platforms_and_nodes: &'a BTreeMap<PlatformIdentifier, (&dyn Platform, NodePool)>,
reporter: Reporter, reporter: Reporter,
) -> impl Stream<Item = TestDefinition<'a>> { ) -> impl Stream<Item = TestDefinition<'a>> {
stream::iter( stream::iter(
metadata_files metadata_files
.into_iter() .into_iter()
// Flatten over the cases. // Flatten over the cases.
.flat_map(|metadata_file| { .flat_map(|metadata_file| {
metadata_file metadata_file
.cases .cases
.iter() .iter()
.enumerate() .enumerate()
.map(move |(case_idx, case)| (metadata_file, case_idx, case)) .map(move |(case_idx, case)| (metadata_file, case_idx, case))
}) })
// Flatten over the modes, prefer the case modes over the metadata file modes. // Flatten over the modes, prefer the case modes over the metadata file modes.
.flat_map(move |(metadata_file, case_idx, case)| { .flat_map(move |(metadata_file, case_idx, case)| {
let reporter = reporter.clone(); let reporter = reporter.clone();
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref()); let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
let modes = match modes { let modes = match modes {
Some(modes) => EitherIter::A( Some(modes) => EitherIter::A(
ParsedMode::many_to_modes(modes.iter()).map(Cow::<'static, _>::Owned), ParsedMode::many_to_modes(modes.iter()).map(Cow::<'static, _>::Owned),
), ),
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)), None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
}; };
modes.into_iter().map(move |mode| { modes.into_iter().map(move |mode| {
( (
metadata_file, metadata_file,
case_idx, case_idx,
case, case,
mode.clone(), mode.clone(),
reporter.test_specific_reporter(Arc::new(TestSpecifier { reporter.test_specific_reporter(Arc::new(TestSpecifier {
solc_mode: mode.as_ref().clone(), solc_mode: mode.as_ref().clone(),
metadata_file_path: metadata_file.metadata_file_path.clone(), metadata_file_path: metadata_file.metadata_file_path.clone(),
case_idx: CaseIdx::new(case_idx), case_idx: CaseIdx::new(case_idx),
})), })),
) )
}) })
}) })
// Inform the reporter of each one of the test cases that were discovered which we expect to // Inform the reporter of each one of the test cases that were discovered which we
// run. // expect to run.
.inspect(|(_, _, _, _, reporter)| { .inspect(|(_, _, _, _, reporter)| {
reporter reporter.report_test_case_discovery_event().expect("Can't fail");
.report_test_case_discovery_event() }),
.expect("Can't fail"); )
}), // Creating the Test Definition objects from all of the various objects we have and creating
) // their required dependencies (e.g., compiler).
// Creating the Test Definition objects from all of the various objects we have and creating .filter_map(move |(metadata_file, case_idx, case, mode, reporter)| async move {
// their required dependencies (e.g., compiler). let mut platforms = BTreeMap::new();
.filter_map( for (platform, node_pool) in platforms_and_nodes.values() {
move |(metadata_file, case_idx, case, mode, reporter)| async move { let node = node_pool.round_robbin();
let mut platforms = BTreeMap::new(); let compiler = platform
for (platform, node_pool) in platforms_and_nodes.values() { .new_compiler(context.clone(), mode.version.clone().map(Into::into))
let node = node_pool.round_robbin(); .await
let compiler = platform .inspect_err(|err| {
.new_compiler(context.clone(), mode.version.clone().map(Into::into)) error!(
.await ?err,
.inspect_err(|err| { platform_identifier = %platform.platform_identifier(),
error!( "Failed to instantiate the compiler"
?err, )
platform_identifier = %platform.platform_identifier(), })
"Failed to instantiate the compiler" .ok()?;
)
})
.ok()?;
reporter reporter
.report_node_assigned_event( .report_node_assigned_event(
node.id(), node.id(),
platform.platform_identifier(), platform.platform_identifier(),
node.connection_string(), node.connection_string(),
) )
.expect("Can't fail"); .expect("Can't fail");
let reporter = let reporter =
reporter.execution_specific_reporter(node.id(), platform.platform_identifier()); reporter.execution_specific_reporter(node.id(), platform.platform_identifier());
platforms.insert( platforms.insert(
platform.platform_identifier(), platform.platform_identifier(),
TestPlatformInformation { TestPlatformInformation { platform: *platform, node, compiler, reporter },
platform: *platform, );
node, }
compiler,
reporter,
},
);
}
Some(TestDefinition { Some(TestDefinition {
/* Metadata file information */ /* Metadata file information */
metadata: metadata_file, metadata: metadata_file,
metadata_file_path: metadata_file.metadata_file_path.as_path(), metadata_file_path: metadata_file.metadata_file_path.as_path(),
/* Mode Information */ /* Mode Information */
mode: mode.clone(), mode: mode.clone(),
/* Case Information */ /* Case Information */
case_idx: CaseIdx::new(case_idx), case_idx: CaseIdx::new(case_idx),
case, case,
/* Platform and Node Assignment Information */ /* Platform and Node Assignment Information */
platforms, platforms,
/* Reporter */ /* Reporter */
reporter, reporter,
}) })
}, })
) // Filter out the test cases which are incompatible or that can't run in the current setup.
// Filter out the test cases which are incompatible or that can't run in the current setup. .filter_map(move |test| async move {
.filter_map(move |test| async move { match test.check_compatibility() {
match test.check_compatibility() { Ok(()) => Some(test),
Ok(()) => Some(test), Err((reason, additional_information)) => {
Err((reason, additional_information)) => { debug!(
debug!( metadata_file_path = %test.metadata.metadata_file_path.display(),
metadata_file_path = %test.metadata.metadata_file_path.display(), case_idx = %test.case_idx,
case_idx = %test.case_idx, mode = %test.mode,
mode = %test.mode, reason,
reason, additional_information =
additional_information = serde_json::to_string(&additional_information).unwrap(),
serde_json::to_string(&additional_information).unwrap(), "Ignoring Test Case"
"Ignoring Test Case" );
); test.reporter
test.reporter .report_test_ignored_event(
.report_test_ignored_event( reason.to_string(),
reason.to_string(), additional_information
additional_information .into_iter()
.into_iter() .map(|(k, v)| (k.into(), v))
.map(|(k, v)| (k.into(), v)) .collect::<IndexMap<_, _>>(),
.collect::<IndexMap<_, _>>(), )
) .expect("Can't fail");
.expect("Can't fail"); None
None },
} }
} })
}) .inspect(|test| {
.inspect(|test| { info!(
info!( metadata_file_path = %test.metadata_file_path.display(),
metadata_file_path = %test.metadata_file_path.display(), case_idx = %test.case_idx,
case_idx = %test.case_idx, mode = %test.mode,
mode = %test.mode, "Created a test case definition"
"Created a test case definition" );
); })
})
} }
/// This is a full description of a differential test to run alongside the full metadata file, the /// This is a full description of a differential test to run alongside the full metadata file, the
@@ -180,146 +165,142 @@ pub async fn create_test_definitions_stream<'a>(
/// these platforms that they should run on, the compilers to use, and everything else needed making /// these platforms that they should run on, the compilers to use, and everything else needed making
/// it a complete description. /// it a complete description.
pub struct TestDefinition<'a> { pub struct TestDefinition<'a> {
/* Metadata file information */ /* Metadata file information */
pub metadata: &'a MetadataFile, pub metadata: &'a MetadataFile,
pub metadata_file_path: &'a Path, pub metadata_file_path: &'a Path,
/* Mode Information */ /* Mode Information */
pub mode: Cow<'a, Mode>, pub mode: Cow<'a, Mode>,
/* Case Information */ /* Case Information */
pub case_idx: CaseIdx, pub case_idx: CaseIdx,
pub case: &'a Case, pub case: &'a Case,
/* Platform and Node Assignment Information */ /* Platform and Node Assignment Information */
pub platforms: BTreeMap<PlatformIdentifier, TestPlatformInformation<'a>>, pub platforms: BTreeMap<PlatformIdentifier, TestPlatformInformation<'a>>,
/* Reporter */ /* Reporter */
pub reporter: TestSpecificReporter, pub reporter: TestSpecificReporter,
} }
impl<'a> TestDefinition<'a> { impl<'a> TestDefinition<'a> {
/// Checks if this test can be ran with the current configuration. /// Checks if this test can be ran with the current configuration.
pub fn check_compatibility(&self) -> TestCheckFunctionResult { pub fn check_compatibility(&self) -> TestCheckFunctionResult {
self.check_metadata_file_ignored()?; self.check_metadata_file_ignored()?;
self.check_case_file_ignored()?; self.check_case_file_ignored()?;
self.check_target_compatibility()?; self.check_target_compatibility()?;
self.check_evm_version_compatibility()?; self.check_evm_version_compatibility()?;
self.check_compiler_compatibility()?; self.check_compiler_compatibility()?;
Ok(()) Ok(())
} }
/// Checks if the metadata file is ignored or not. /// Checks if the metadata file is ignored or not.
fn check_metadata_file_ignored(&self) -> TestCheckFunctionResult { fn check_metadata_file_ignored(&self) -> TestCheckFunctionResult {
if self.metadata.ignore.is_some_and(|ignore| ignore) { if self.metadata.ignore.is_some_and(|ignore| ignore) {
Err(("Metadata file is ignored.", indexmap! {})) Err(("Metadata file is ignored.", indexmap! {}))
} else { } else {
Ok(()) Ok(())
} }
} }
/// Checks if the case file is ignored or not. /// Checks if the case file is ignored or not.
fn check_case_file_ignored(&self) -> TestCheckFunctionResult { fn check_case_file_ignored(&self) -> TestCheckFunctionResult {
if self.case.ignore.is_some_and(|ignore| ignore) { if self.case.ignore.is_some_and(|ignore| ignore) {
Err(("Case is ignored.", indexmap! {})) Err(("Case is ignored.", indexmap! {}))
} else { } else {
Ok(()) Ok(())
} }
} }
/// Checks if the platforms all support the desired targets in the metadata file. /// Checks if the platforms all support the desired targets in the metadata file.
fn check_target_compatibility(&self) -> TestCheckFunctionResult { fn check_target_compatibility(&self) -> TestCheckFunctionResult {
let mut error_map = indexmap! { let mut error_map = indexmap! {
"test_desired_targets" => json!(self.metadata.targets.as_ref()), "test_desired_targets" => json!(self.metadata.targets.as_ref()),
}; };
let mut is_allowed = true; let mut is_allowed = true;
for (_, platform_information) in self.platforms.iter() { for (_, platform_information) in self.platforms.iter() {
let is_allowed_for_platform = match self.metadata.targets.as_ref() { let is_allowed_for_platform = match self.metadata.targets.as_ref() {
None => true, None => true,
Some(required_vm_identifiers) => { Some(required_vm_identifiers) =>
required_vm_identifiers.contains(&platform_information.platform.vm_identifier()) required_vm_identifiers.contains(&platform_information.platform.vm_identifier()),
} };
}; is_allowed &= is_allowed_for_platform;
is_allowed &= is_allowed_for_platform; error_map.insert(
error_map.insert( platform_information.platform.platform_identifier().into(),
platform_information.platform.platform_identifier().into(), json!(is_allowed_for_platform),
json!(is_allowed_for_platform), );
); }
}
if is_allowed { if is_allowed {
Ok(()) Ok(())
} else { } else {
Err(( Err((
"One of the platforms do do not support the targets allowed by the test.", "One of the platforms do do not support the targets allowed by the test.",
error_map, error_map,
)) ))
} }
} }
// Checks for the compatibility of the EVM version with the platforms specified. // Checks for the compatibility of the EVM version with the platforms specified.
fn check_evm_version_compatibility(&self) -> TestCheckFunctionResult { fn check_evm_version_compatibility(&self) -> TestCheckFunctionResult {
let Some(evm_version_requirement) = self.metadata.required_evm_version else { let Some(evm_version_requirement) = self.metadata.required_evm_version else {
return Ok(()); return Ok(());
}; };
let mut error_map = indexmap! { let mut error_map = indexmap! {
"test_desired_evm_version" => json!(self.metadata.required_evm_version), "test_desired_evm_version" => json!(self.metadata.required_evm_version),
}; };
let mut is_allowed = true; let mut is_allowed = true;
for (_, platform_information) in self.platforms.iter() { for (_, platform_information) in self.platforms.iter() {
let is_allowed_for_platform = let is_allowed_for_platform =
evm_version_requirement.matches(&platform_information.node.evm_version()); evm_version_requirement.matches(&platform_information.node.evm_version());
is_allowed &= is_allowed_for_platform; is_allowed &= is_allowed_for_platform;
error_map.insert( error_map.insert(
platform_information.platform.platform_identifier().into(), platform_information.platform.platform_identifier().into(),
json!(is_allowed_for_platform), json!(is_allowed_for_platform),
); );
} }
if is_allowed { if is_allowed {
Ok(()) Ok(())
} else { } else {
Err(( Err(("EVM version is incompatible for the platforms specified", error_map))
"EVM version is incompatible for the platforms specified", }
error_map, }
))
}
}
/// Checks if the platforms compilers support the mode that the test is for. /// Checks if the platforms compilers support the mode that the test is for.
fn check_compiler_compatibility(&self) -> TestCheckFunctionResult { fn check_compiler_compatibility(&self) -> TestCheckFunctionResult {
let mut error_map = indexmap! { let mut error_map = indexmap! {
"test_desired_evm_version" => json!(self.metadata.required_evm_version), "test_desired_evm_version" => json!(self.metadata.required_evm_version),
}; };
let mut is_allowed = true; let mut is_allowed = true;
for (_, platform_information) in self.platforms.iter() { for (_, platform_information) in self.platforms.iter() {
let is_allowed_for_platform = platform_information let is_allowed_for_platform = platform_information
.compiler .compiler
.supports_mode(self.mode.optimize_setting, self.mode.pipeline); .supports_mode(self.mode.optimize_setting, self.mode.pipeline);
is_allowed &= is_allowed_for_platform; is_allowed &= is_allowed_for_platform;
error_map.insert( error_map.insert(
platform_information.platform.platform_identifier().into(), platform_information.platform.platform_identifier().into(),
json!(is_allowed_for_platform), json!(is_allowed_for_platform),
); );
} }
if is_allowed { if is_allowed {
Ok(()) Ok(())
} else { } else {
Err(( Err((
"Compilers do not support this mode either for the provided platforms.", "Compilers do not support this mode either for the provided platforms.",
error_map, error_map,
)) ))
} }
} }
} }
pub struct TestPlatformInformation<'a> { pub struct TestPlatformInformation<'a> {
pub platform: &'a dyn Platform, pub platform: &'a dyn Platform,
pub node: &'a dyn EthereumNode, pub node: &'a dyn EthereumNode,
pub compiler: Box<dyn SolidityCompiler>, pub compiler: Box<dyn SolidityCompiler>,
pub reporter: ExecutionSpecificReporter, pub reporter: ExecutionSpecificReporter,
} }
type TestCheckFunctionResult = Result<(), (&'static str, IndexMap<&'static str, Value>)>; type TestCheckFunctionResult = Result<(), (&'static str, IndexMap<&'static str, Value>)>;
+377 -403
View File
@@ -3,9 +3,12 @@
//! This crate defines the testing configuration and //! This crate defines the testing configuration and
//! provides a helper utility to execute tests. //! provides a helper utility to execute tests.
pub mod differential_tests;
pub mod helpers;
use std::{ use std::{
pin::Pin, pin::Pin,
thread::{self, JoinHandle}, thread::{self, JoinHandle},
}; };
use alloy::genesis::Genesis; use alloy::genesis::Genesis;
@@ -14,516 +17,487 @@ use revive_dt_common::types::*;
use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc}; use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc};
use revive_dt_config::*; use revive_dt_config::*;
use revive_dt_node::{ use revive_dt_node::{
Node, node_implementations::geth::GethNode, Node,
node_implementations::lighthouse_geth::LighthouseGethNode, node_implementations::{
node_implementations::substrate::SubstrateNode, node_implementations::zombienet::ZombieNode, geth::GethNode, lighthouse_geth::LighthouseGethNode, substrate::SubstrateNode,
zombienet::ZombieNode,
},
}; };
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
use tracing::info; use tracing::info;
pub use helpers::CachedCompiler;
/// A trait that describes the interface for the platforms that are supported by the tool. /// A trait that describes the interface for the platforms that are supported by the tool.
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
pub trait Platform { pub trait Platform {
/// Returns the identifier of this platform. This is a combination of the node and the compiler /// Returns the identifier of this platform. This is a combination of the node and the compiler
/// used. /// used.
fn platform_identifier(&self) -> PlatformIdentifier; fn platform_identifier(&self) -> PlatformIdentifier;
/// Returns a full identifier for the platform. /// Returns a full identifier for the platform.
fn full_identifier(&self) -> (NodeIdentifier, VmIdentifier, CompilerIdentifier) { fn full_identifier(&self) -> (NodeIdentifier, VmIdentifier, CompilerIdentifier) {
( (self.node_identifier(), self.vm_identifier(), self.compiler_identifier())
self.node_identifier(), }
self.vm_identifier(),
self.compiler_identifier(),
)
}
/// Returns the identifier of the node used. /// Returns the identifier of the node used.
fn node_identifier(&self) -> NodeIdentifier; fn node_identifier(&self) -> NodeIdentifier;
/// Returns the identifier of the vm used. /// Returns the identifier of the vm used.
fn vm_identifier(&self) -> VmIdentifier; fn vm_identifier(&self) -> VmIdentifier;
/// Returns the identifier of the compiler used. /// Returns the identifier of the compiler used.
fn compiler_identifier(&self) -> CompilerIdentifier; fn compiler_identifier(&self) -> CompilerIdentifier;
/// Creates a new node for the platform by spawning a new thread, creating the node object, /// Creates a new node for the platform by spawning a new thread, creating the node object,
/// initializing it, spawning it, and waiting for it to start up. /// initializing it, spawning it, and waiting for it to start up.
fn new_node( fn new_node(
&self, &self,
context: Context, context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>>; ) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>>;
/// Creates a new compiler for the provided platform /// Creates a new compiler for the provided platform
fn new_compiler( fn new_compiler(
&self, &self,
context: Context, context: Context,
version: Option<VersionOrRequirement>, version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>>; ) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>>;
} }
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
pub struct GethEvmSolcPlatform; pub struct GethEvmSolcPlatform;
impl Platform for GethEvmSolcPlatform { impl Platform for GethEvmSolcPlatform {
fn platform_identifier(&self) -> PlatformIdentifier { fn platform_identifier(&self) -> PlatformIdentifier {
PlatformIdentifier::GethEvmSolc PlatformIdentifier::GethEvmSolc
} }
fn node_identifier(&self) -> NodeIdentifier { fn node_identifier(&self) -> NodeIdentifier {
NodeIdentifier::Geth NodeIdentifier::Geth
} }
fn vm_identifier(&self) -> VmIdentifier { fn vm_identifier(&self) -> VmIdentifier {
VmIdentifier::Evm VmIdentifier::Evm
} }
fn compiler_identifier(&self) -> CompilerIdentifier { fn compiler_identifier(&self) -> CompilerIdentifier {
CompilerIdentifier::Solc CompilerIdentifier::Solc
} }
fn new_node( fn new_node(
&self, &self,
context: Context, context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> { ) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let node = GethNode::new(context); let node = GethNode::new(context);
let node = spawn_node::<GethNode>(node, genesis)?; let node = spawn_node::<GethNode>(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
})) }))
} }
fn new_compiler( fn new_compiler(
&self, &self,
context: Context, context: Context,
version: Option<VersionOrRequirement>, version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
Box::pin(async move { Box::pin(async move {
let compiler = Solc::new(context, version).await; let compiler = Solc::new(context, version).await;
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>) compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
}) })
} }
} }
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
pub struct LighthouseGethEvmSolcPlatform; pub struct LighthouseGethEvmSolcPlatform;
impl Platform for LighthouseGethEvmSolcPlatform { impl Platform for LighthouseGethEvmSolcPlatform {
fn platform_identifier(&self) -> PlatformIdentifier { fn platform_identifier(&self) -> PlatformIdentifier {
PlatformIdentifier::LighthouseGethEvmSolc PlatformIdentifier::LighthouseGethEvmSolc
} }
fn node_identifier(&self) -> NodeIdentifier { fn node_identifier(&self) -> NodeIdentifier {
NodeIdentifier::LighthouseGeth NodeIdentifier::LighthouseGeth
} }
fn vm_identifier(&self) -> VmIdentifier { fn vm_identifier(&self) -> VmIdentifier {
VmIdentifier::Evm VmIdentifier::Evm
} }
fn compiler_identifier(&self) -> CompilerIdentifier { fn compiler_identifier(&self) -> CompilerIdentifier {
CompilerIdentifier::Solc CompilerIdentifier::Solc
} }
fn new_node( fn new_node(
&self, &self,
context: Context, context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> { ) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let node = LighthouseGethNode::new(context); let node = LighthouseGethNode::new(context);
let node = spawn_node::<LighthouseGethNode>(node, genesis)?; let node = spawn_node::<LighthouseGethNode>(node, genesis)?;
Ok(Box::new(node) as Box<_>) Ok(Box::new(node) as Box<_>)
})) }))
} }
fn new_compiler( fn new_compiler(
&self, &self,
context: Context, context: Context,
version: Option<VersionOrRequirement>, version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
Box::pin(async move { Box::pin(async move {
let compiler = Solc::new(context, version).await; let compiler = Solc::new(context, version).await;
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>) compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
}) })
} }
} }
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
pub struct KitchensinkPolkavmResolcPlatform; pub struct KitchensinkPolkavmResolcPlatform;
impl Platform for KitchensinkPolkavmResolcPlatform { impl Platform for KitchensinkPolkavmResolcPlatform {
fn platform_identifier(&self) -> PlatformIdentifier { fn platform_identifier(&self) -> PlatformIdentifier {
PlatformIdentifier::KitchensinkPolkavmResolc PlatformIdentifier::KitchensinkPolkavmResolc
} }
fn node_identifier(&self) -> NodeIdentifier { fn node_identifier(&self) -> NodeIdentifier {
NodeIdentifier::Kitchensink NodeIdentifier::Kitchensink
} }
fn vm_identifier(&self) -> VmIdentifier { fn vm_identifier(&self) -> VmIdentifier {
VmIdentifier::PolkaVM VmIdentifier::PolkaVM
} }
fn compiler_identifier(&self) -> CompilerIdentifier { fn compiler_identifier(&self) -> CompilerIdentifier {
CompilerIdentifier::Resolc CompilerIdentifier::Resolc
} }
fn new_node( fn new_node(
&self, &self,
context: Context, context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> { ) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context) let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context).path.clone();
.path let genesis = genesis_configuration.genesis()?.clone();
.clone(); Ok(thread::spawn(move || {
let genesis = genesis_configuration.genesis()?.clone(); let node = SubstrateNode::new(
Ok(thread::spawn(move || { kitchensink_path,
let node = SubstrateNode::new( SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
kitchensink_path, context,
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND, );
context, let node = spawn_node(node, genesis)?;
); Ok(Box::new(node) as Box<_>)
let node = spawn_node(node, genesis)?; }))
Ok(Box::new(node) as Box<_>) }
}))
}
fn new_compiler( fn new_compiler(
&self, &self,
context: Context, context: Context,
version: Option<VersionOrRequirement>, version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
Box::pin(async move { Box::pin(async move {
let compiler = Resolc::new(context, version).await; let compiler = Resolc::new(context, version).await;
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>) compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
}) })
} }
} }
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
pub struct KitchensinkRevmSolcPlatform; pub struct KitchensinkRevmSolcPlatform;
impl Platform for KitchensinkRevmSolcPlatform { impl Platform for KitchensinkRevmSolcPlatform {
fn platform_identifier(&self) -> PlatformIdentifier { fn platform_identifier(&self) -> PlatformIdentifier {
PlatformIdentifier::KitchensinkRevmSolc PlatformIdentifier::KitchensinkRevmSolc
} }
fn node_identifier(&self) -> NodeIdentifier { fn node_identifier(&self) -> NodeIdentifier {
NodeIdentifier::Kitchensink NodeIdentifier::Kitchensink
} }
fn vm_identifier(&self) -> VmIdentifier { fn vm_identifier(&self) -> VmIdentifier {
VmIdentifier::Evm VmIdentifier::Evm
} }
fn compiler_identifier(&self) -> CompilerIdentifier { fn compiler_identifier(&self) -> CompilerIdentifier {
CompilerIdentifier::Solc CompilerIdentifier::Solc
} }
fn new_node( fn new_node(
&self, &self,
context: Context, context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> { ) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context) let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context).path.clone();
.path let genesis = genesis_configuration.genesis()?.clone();
.clone(); Ok(thread::spawn(move || {
let genesis = genesis_configuration.genesis()?.clone(); let node = SubstrateNode::new(
Ok(thread::spawn(move || { kitchensink_path,
let node = SubstrateNode::new( SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
kitchensink_path, context,
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND, );
context, let node = spawn_node(node, genesis)?;
); Ok(Box::new(node) as Box<_>)
let node = spawn_node(node, genesis)?; }))
Ok(Box::new(node) as Box<_>) }
}))
}
fn new_compiler( fn new_compiler(
&self, &self,
context: Context, context: Context,
version: Option<VersionOrRequirement>, version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
Box::pin(async move { Box::pin(async move {
let compiler = Solc::new(context, version).await; let compiler = Solc::new(context, version).await;
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>) compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
}) })
} }
} }
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
pub struct ReviveDevNodePolkavmResolcPlatform; pub struct ReviveDevNodePolkavmResolcPlatform;
impl Platform for ReviveDevNodePolkavmResolcPlatform { impl Platform for ReviveDevNodePolkavmResolcPlatform {
fn platform_identifier(&self) -> PlatformIdentifier { fn platform_identifier(&self) -> PlatformIdentifier {
PlatformIdentifier::ReviveDevNodePolkavmResolc PlatformIdentifier::ReviveDevNodePolkavmResolc
} }
fn node_identifier(&self) -> NodeIdentifier { fn node_identifier(&self) -> NodeIdentifier {
NodeIdentifier::ReviveDevNode NodeIdentifier::ReviveDevNode
} }
fn vm_identifier(&self) -> VmIdentifier { fn vm_identifier(&self) -> VmIdentifier {
VmIdentifier::PolkaVM VmIdentifier::PolkaVM
} }
fn compiler_identifier(&self) -> CompilerIdentifier { fn compiler_identifier(&self) -> CompilerIdentifier {
CompilerIdentifier::Resolc CompilerIdentifier::Resolc
} }
fn new_node( fn new_node(
&self, &self,
context: Context, context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> { ) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context) let revive_dev_node_path =
.path AsRef::<ReviveDevNodeConfiguration>::as_ref(&context).path.clone();
.clone(); let genesis = genesis_configuration.genesis()?.clone();
let genesis = genesis_configuration.genesis()?.clone(); Ok(thread::spawn(move || {
Ok(thread::spawn(move || { let node = SubstrateNode::new(
let node = SubstrateNode::new( revive_dev_node_path,
revive_dev_node_path, SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND, context,
context, );
); let node = spawn_node(node, genesis)?;
let node = spawn_node(node, genesis)?; Ok(Box::new(node) as Box<_>)
Ok(Box::new(node) as Box<_>) }))
})) }
}
fn new_compiler( fn new_compiler(
&self, &self,
context: Context, context: Context,
version: Option<VersionOrRequirement>, version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
Box::pin(async move { Box::pin(async move {
let compiler = Resolc::new(context, version).await; let compiler = Resolc::new(context, version).await;
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>) compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
}) })
} }
} }
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
pub struct ReviveDevNodeRevmSolcPlatform; pub struct ReviveDevNodeRevmSolcPlatform;
impl Platform for ReviveDevNodeRevmSolcPlatform { impl Platform for ReviveDevNodeRevmSolcPlatform {
fn platform_identifier(&self) -> PlatformIdentifier { fn platform_identifier(&self) -> PlatformIdentifier {
PlatformIdentifier::ReviveDevNodeRevmSolc PlatformIdentifier::ReviveDevNodeRevmSolc
} }
fn node_identifier(&self) -> NodeIdentifier { fn node_identifier(&self) -> NodeIdentifier {
NodeIdentifier::ReviveDevNode NodeIdentifier::ReviveDevNode
} }
fn vm_identifier(&self) -> VmIdentifier { fn vm_identifier(&self) -> VmIdentifier {
VmIdentifier::Evm VmIdentifier::Evm
} }
fn compiler_identifier(&self) -> CompilerIdentifier { fn compiler_identifier(&self) -> CompilerIdentifier {
CompilerIdentifier::Solc CompilerIdentifier::Solc
} }
fn new_node( fn new_node(
&self, &self,
context: Context, context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> { ) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context) let revive_dev_node_path =
.path AsRef::<ReviveDevNodeConfiguration>::as_ref(&context).path.clone();
.clone(); let genesis = genesis_configuration.genesis()?.clone();
let genesis = genesis_configuration.genesis()?.clone(); Ok(thread::spawn(move || {
Ok(thread::spawn(move || { let node = SubstrateNode::new(
let node = SubstrateNode::new( revive_dev_node_path,
revive_dev_node_path, SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND, context,
context, );
); let node = spawn_node(node, genesis)?;
let node = spawn_node(node, genesis)?; Ok(Box::new(node) as Box<_>)
Ok(Box::new(node) as Box<_>) }))
})) }
}
fn new_compiler( fn new_compiler(
&self, &self,
context: Context, context: Context,
version: Option<VersionOrRequirement>, version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
Box::pin(async move { Box::pin(async move {
let compiler = Solc::new(context, version).await; let compiler = Solc::new(context, version).await;
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>) compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
}) })
} }
} }
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
pub struct ZombienetPolkavmResolcPlatform; pub struct ZombienetPolkavmResolcPlatform;
impl Platform for ZombienetPolkavmResolcPlatform { impl Platform for ZombienetPolkavmResolcPlatform {
fn platform_identifier(&self) -> PlatformIdentifier { fn platform_identifier(&self) -> PlatformIdentifier {
PlatformIdentifier::ZombienetPolkavmResolc PlatformIdentifier::ZombienetPolkavmResolc
} }
fn node_identifier(&self) -> NodeIdentifier { fn node_identifier(&self) -> NodeIdentifier {
NodeIdentifier::Zombienet NodeIdentifier::Zombienet
} }
fn vm_identifier(&self) -> VmIdentifier { fn vm_identifier(&self) -> VmIdentifier {
VmIdentifier::PolkaVM VmIdentifier::PolkaVM
} }
fn compiler_identifier(&self) -> CompilerIdentifier { fn compiler_identifier(&self) -> CompilerIdentifier {
CompilerIdentifier::Resolc CompilerIdentifier::Resolc
} }
fn new_node( fn new_node(
&self, &self,
context: Context, context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> { ) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context) let polkadot_parachain_path =
.path AsRef::<PolkadotParachainConfiguration>::as_ref(&context).path.clone();
.clone(); let genesis = genesis_configuration.genesis()?.clone();
let genesis = genesis_configuration.genesis()?.clone(); Ok(thread::spawn(move || {
Ok(thread::spawn(move || { let node = ZombieNode::new(polkadot_parachain_path, context);
let node = ZombieNode::new(polkadot_parachain_path, context); let node = spawn_node(node, genesis)?;
let node = spawn_node(node, genesis)?; Ok(Box::new(node) as Box<_>)
Ok(Box::new(node) as Box<_>) }))
})) }
}
fn new_compiler( fn new_compiler(
&self, &self,
context: Context, context: Context,
version: Option<VersionOrRequirement>, version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
Box::pin(async move { Box::pin(async move {
let compiler = Solc::new(context, version).await; let compiler = Solc::new(context, version).await;
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>) compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
}) })
} }
} }
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
pub struct ZombienetRevmSolcPlatform; pub struct ZombienetRevmSolcPlatform;
impl Platform for ZombienetRevmSolcPlatform { impl Platform for ZombienetRevmSolcPlatform {
fn platform_identifier(&self) -> PlatformIdentifier { fn platform_identifier(&self) -> PlatformIdentifier {
PlatformIdentifier::ZombienetRevmSolc PlatformIdentifier::ZombienetRevmSolc
} }
fn node_identifier(&self) -> NodeIdentifier { fn node_identifier(&self) -> NodeIdentifier {
NodeIdentifier::Zombienet NodeIdentifier::Zombienet
} }
fn vm_identifier(&self) -> VmIdentifier { fn vm_identifier(&self) -> VmIdentifier {
VmIdentifier::Evm VmIdentifier::Evm
} }
fn compiler_identifier(&self) -> CompilerIdentifier { fn compiler_identifier(&self) -> CompilerIdentifier {
CompilerIdentifier::Solc CompilerIdentifier::Solc
} }
fn new_node( fn new_node(
&self, &self,
context: Context, context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> { ) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context) let polkadot_parachain_path =
.path AsRef::<PolkadotParachainConfiguration>::as_ref(&context).path.clone();
.clone(); let genesis = genesis_configuration.genesis()?.clone();
let genesis = genesis_configuration.genesis()?.clone(); Ok(thread::spawn(move || {
Ok(thread::spawn(move || { let node = ZombieNode::new(polkadot_parachain_path, context);
let node = ZombieNode::new(polkadot_parachain_path, context); let node = spawn_node(node, genesis)?;
let node = spawn_node(node, genesis)?; Ok(Box::new(node) as Box<_>)
Ok(Box::new(node) as Box<_>) }))
})) }
}
fn new_compiler( fn new_compiler(
&self, &self,
context: Context, context: Context,
version: Option<VersionOrRequirement>, version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
Box::pin(async move { Box::pin(async move {
let compiler = Solc::new(context, version).await; let compiler = Solc::new(context, version).await;
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>) compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
}) })
} }
} }
impl From<PlatformIdentifier> for Box<dyn Platform> { impl From<PlatformIdentifier> for Box<dyn Platform> {
fn from(value: PlatformIdentifier) -> Self { fn from(value: PlatformIdentifier) -> Self {
match value { match value {
PlatformIdentifier::GethEvmSolc => Box::new(GethEvmSolcPlatform) as Box<_>, PlatformIdentifier::GethEvmSolc => Box::new(GethEvmSolcPlatform) as Box<_>,
PlatformIdentifier::LighthouseGethEvmSolc => { PlatformIdentifier::LighthouseGethEvmSolc =>
Box::new(LighthouseGethEvmSolcPlatform) as Box<_> Box::new(LighthouseGethEvmSolcPlatform) as Box<_>,
} PlatformIdentifier::KitchensinkPolkavmResolc =>
PlatformIdentifier::KitchensinkPolkavmResolc => { Box::new(KitchensinkPolkavmResolcPlatform) as Box<_>,
Box::new(KitchensinkPolkavmResolcPlatform) as Box<_> PlatformIdentifier::KitchensinkRevmSolc =>
} Box::new(KitchensinkRevmSolcPlatform) as Box<_>,
PlatformIdentifier::KitchensinkRevmSolc => { PlatformIdentifier::ReviveDevNodePolkavmResolc =>
Box::new(KitchensinkRevmSolcPlatform) as Box<_> Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_>,
} PlatformIdentifier::ReviveDevNodeRevmSolc =>
PlatformIdentifier::ReviveDevNodePolkavmResolc => { Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_>,
Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_> PlatformIdentifier::ZombienetPolkavmResolc =>
} Box::new(ZombienetPolkavmResolcPlatform) as Box<_>,
PlatformIdentifier::ReviveDevNodeRevmSolc => { PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>,
Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_> }
} }
PlatformIdentifier::ZombienetPolkavmResolc => {
Box::new(ZombienetPolkavmResolcPlatform) as Box<_>
}
PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>,
}
}
} }
impl From<PlatformIdentifier> for &dyn Platform { impl From<PlatformIdentifier> for &dyn Platform {
fn from(value: PlatformIdentifier) -> Self { fn from(value: PlatformIdentifier) -> Self {
match value { match value {
PlatformIdentifier::GethEvmSolc => &GethEvmSolcPlatform as &dyn Platform, PlatformIdentifier::GethEvmSolc => &GethEvmSolcPlatform as &dyn Platform,
PlatformIdentifier::LighthouseGethEvmSolc => { PlatformIdentifier::LighthouseGethEvmSolc =>
&LighthouseGethEvmSolcPlatform as &dyn Platform &LighthouseGethEvmSolcPlatform as &dyn Platform,
} PlatformIdentifier::KitchensinkPolkavmResolc =>
PlatformIdentifier::KitchensinkPolkavmResolc => { &KitchensinkPolkavmResolcPlatform as &dyn Platform,
&KitchensinkPolkavmResolcPlatform as &dyn Platform PlatformIdentifier::KitchensinkRevmSolc =>
} &KitchensinkRevmSolcPlatform as &dyn Platform,
PlatformIdentifier::KitchensinkRevmSolc => { PlatformIdentifier::ReviveDevNodePolkavmResolc =>
&KitchensinkRevmSolcPlatform as &dyn Platform &ReviveDevNodePolkavmResolcPlatform as &dyn Platform,
} PlatformIdentifier::ReviveDevNodeRevmSolc =>
PlatformIdentifier::ReviveDevNodePolkavmResolc => { &ReviveDevNodeRevmSolcPlatform as &dyn Platform,
&ReviveDevNodePolkavmResolcPlatform as &dyn Platform PlatformIdentifier::ZombienetPolkavmResolc =>
} &ZombienetPolkavmResolcPlatform as &dyn Platform,
PlatformIdentifier::ReviveDevNodeRevmSolc => { PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform,
&ReviveDevNodeRevmSolcPlatform as &dyn Platform }
} }
PlatformIdentifier::ZombienetPolkavmResolc => {
&ZombienetPolkavmResolcPlatform as &dyn Platform
}
PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform,
}
}
} }
fn spawn_node<T: Node + EthereumNode + Send + Sync>( fn spawn_node<T: Node + EthereumNode + Send + Sync>(
mut node: T, mut node: T,
genesis: Genesis, genesis: Genesis,
) -> anyhow::Result<T> { ) -> anyhow::Result<T> {
info!( info!(id = node.id(), connection_string = node.connection_string(), "Spawning node");
id = node.id(), node.spawn(genesis).context("Failed to spawn node process")?;
connection_string = node.connection_string(), info!(id = node.id(), connection_string = node.connection_string(), "Spawned node");
"Spawning node" Ok(node)
);
node.spawn(genesis)
.context("Failed to spawn node process")?;
info!(
id = node.id(),
connection_string = node.connection_string(),
"Spawned node"
);
Ok(node)
} }
+55 -55
View File
@@ -13,69 +13,69 @@ use revive_dt_core::Platform;
use revive_dt_format::metadata::Metadata; use revive_dt_format::metadata::Metadata;
use crate::{ use crate::{
differential_benchmarks::handle_differential_benchmarks, differential_benchmarks::handle_differential_benchmarks,
differential_tests::handle_differential_tests, differential_tests::handle_differential_tests,
}; };
fn main() -> anyhow::Result<()> { fn main() -> anyhow::Result<()> {
let (writer, _guard) = tracing_appender::non_blocking::NonBlockingBuilder::default() let (writer, _guard) = tracing_appender::non_blocking::NonBlockingBuilder::default()
.lossy(false) .lossy(false)
// Assuming that each line contains 255 characters and that each character is one byte, then // Assuming that each line contains 255 characters and that each character is one byte, then
// this means that our buffer is about 4GBs large. // this means that our buffer is about 4GBs large.
.buffered_lines_limit(0x1000000) .buffered_lines_limit(0x1000000)
.thread_name("buffered writer") .thread_name("buffered writer")
.finish(std::io::stdout()); .finish(std::io::stdout());
let subscriber = FmtSubscriber::builder() let subscriber = FmtSubscriber::builder()
.with_writer(writer) .with_writer(writer)
.with_thread_ids(false) .with_thread_ids(false)
.with_thread_names(false) .with_thread_names(false)
.with_env_filter(EnvFilter::from_default_env()) .with_env_filter(EnvFilter::from_default_env())
.with_ansi(false) .with_ansi(false)
.pretty() .pretty()
.finish(); .finish();
tracing::subscriber::set_global_default(subscriber)?; tracing::subscriber::set_global_default(subscriber)?;
info!("Differential testing tool is starting"); info!("Differential testing tool is starting");
let context = Context::try_parse()?; let context = Context::try_parse()?;
let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task(); let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task();
match context { match context {
Context::Test(context) => tokio::runtime::Builder::new_multi_thread() Context::Test(context) => tokio::runtime::Builder::new_multi_thread()
.worker_threads(context.concurrency_configuration.number_of_threads) .worker_threads(context.concurrency_configuration.number_of_threads)
.enable_all() .enable_all()
.build() .build()
.expect("Failed building the Runtime") .expect("Failed building the Runtime")
.block_on(async move { .block_on(async move {
let differential_tests_handling_task = let differential_tests_handling_task =
handle_differential_tests(*context, reporter); handle_differential_tests(*context, reporter);
futures::future::try_join(differential_tests_handling_task, report_aggregator_task) futures::future::try_join(differential_tests_handling_task, report_aggregator_task)
.await?; .await?;
Ok(()) Ok(())
}), }),
Context::Benchmark(context) => tokio::runtime::Builder::new_multi_thread() Context::Benchmark(context) => tokio::runtime::Builder::new_multi_thread()
.worker_threads(context.concurrency_configuration.number_of_threads) .worker_threads(context.concurrency_configuration.number_of_threads)
.enable_all() .enable_all()
.build() .build()
.expect("Failed building the Runtime") .expect("Failed building the Runtime")
.block_on(async move { .block_on(async move {
let differential_benchmarks_handling_task = let differential_benchmarks_handling_task =
handle_differential_benchmarks(*context, reporter); handle_differential_benchmarks(*context, reporter);
futures::future::try_join( futures::future::try_join(
differential_benchmarks_handling_task, differential_benchmarks_handling_task,
report_aggregator_task, report_aggregator_task,
) )
.await?; .await?;
Ok(()) Ok(())
}), }),
Context::ExportJsonSchema => { Context::ExportJsonSchema => {
let schema = schema_for!(Metadata); let schema = schema_for!(Metadata);
println!("{}", serde_json::to_string_pretty(&schema).unwrap()); println!("{}", serde_json::to_string_pretty(&schema).unwrap());
Ok(()) Ok(())
} },
} }
} }
+81 -87
View File
@@ -7,108 +7,102 @@ use crate::{mode::ParsedMode, steps::*};
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)] #[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)]
pub struct Case { pub struct Case {
/// An optional name of the test case. /// An optional name of the test case.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>, pub name: Option<String>,
/// An optional comment on the case which has no impact on the execution in any way. /// An optional comment on the case which has no impact on the execution in any way.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
/// This represents a mode that has been parsed from test metadata. /// This represents a mode that has been parsed from test metadata.
/// ///
/// Mode strings can take the following form (in pseudo-regex): /// Mode strings can take the following form (in pseudo-regex):
/// ///
/// ```text /// ```text
/// [YEILV][+-]? (M[0123sz])? <semver>? /// [YEILV][+-]? (M[0123sz])? <semver>?
/// ``` /// ```
/// ///
/// If this is provided then it takes higher priority than the modes specified in the metadata /// If this is provided then it takes higher priority than the modes specified in the metadata
/// file. /// file.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub modes: Option<Vec<ParsedMode>>, pub modes: Option<Vec<ParsedMode>>,
/// The set of steps to run as part of this test case. /// The set of steps to run as part of this test case.
#[serde(rename = "inputs")] #[serde(rename = "inputs")]
pub steps: Vec<Step>, pub steps: Vec<Step>,
/// An optional name of the group of tests that this test belongs to. /// An optional name of the group of tests that this test belongs to.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub group: Option<String>, pub group: Option<String>,
/// An optional set of expectations and assertions to make about the transaction after it ran. /// An optional set of expectations and assertions to make about the transaction after it ran.
/// ///
/// If this is not specified then the only assertion that will be ran is that the transaction /// If this is not specified then the only assertion that will be ran is that the transaction
/// was successful. /// was successful.
/// ///
/// This expectation that's on the case itself will be attached to the final step of the case. /// This expectation that's on the case itself will be attached to the final step of the case.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub expected: Option<Expected>, pub expected: Option<Expected>,
/// An optional boolean which defines if the case as a whole should be ignored. If null then the /// An optional boolean which defines if the case as a whole should be ignored. If null then
/// case will not be ignored. /// the case will not be ignored.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub ignore: Option<bool>, pub ignore: Option<bool>,
} }
impl Case { impl Case {
pub fn steps_iterator(&self) -> impl Iterator<Item = Step> { pub fn steps_iterator(&self) -> impl Iterator<Item = Step> {
let steps_len = self.steps.len(); let steps_len = self.steps.len();
self.steps self.steps.clone().into_iter().enumerate().map(move |(idx, mut step)| {
.clone() let Step::FunctionCall(ref mut input) = step else {
.into_iter() return step;
.enumerate() };
.map(move |(idx, mut step)| {
let Step::FunctionCall(ref mut input) = step else {
return step;
};
if idx + 1 == steps_len { if idx + 1 == steps_len {
if input.expected.is_none() { if input.expected.is_none() {
input.expected = self.expected.clone(); input.expected = self.expected.clone();
} }
// TODO: What does it mean for us to have an `expected` field on the case itself // TODO: What does it mean for us to have an `expected` field on the case itself
// but the final input also has an expected field that doesn't match the one on // but the final input also has an expected field that doesn't match the one on
// the case? What are we supposed to do with that final expected field on the // the case? What are we supposed to do with that final expected field on the
// case? // case?
step step
} else { } else {
step step
} }
}) })
} }
pub fn steps_iterator_for_benchmarks( pub fn steps_iterator_for_benchmarks(
&self, &self,
default_repeat_count: usize, default_repeat_count: usize,
) -> Box<dyn Iterator<Item = Step> + '_> { ) -> Box<dyn Iterator<Item = Step> + '_> {
let contains_repeat = self let contains_repeat = self.steps_iterator().any(|step| matches!(&step, Step::Repeat(..)));
.steps_iterator() if contains_repeat {
.any(|step| matches!(&step, Step::Repeat(..))); Box::new(self.steps_iterator()) as Box<_>
if contains_repeat { } else {
Box::new(self.steps_iterator()) as Box<_> Box::new(std::iter::once(Step::Repeat(Box::new(RepeatStep {
} else { comment: None,
Box::new(std::iter::once(Step::Repeat(Box::new(RepeatStep { repeat: default_repeat_count,
comment: None, steps: self.steps_iterator().collect(),
repeat: default_repeat_count, })))) as Box<_>
steps: self.steps_iterator().collect(), }
})))) as Box<_> }
}
}
pub fn solc_modes(&self) -> Vec<Mode> { pub fn solc_modes(&self) -> Vec<Mode> {
match &self.modes { match &self.modes {
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(), Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
None => Mode::all().cloned().collect(), None => Mode::all().cloned().collect(),
} }
} }
} }
define_wrapper_type!( define_wrapper_type!(
/// A wrapper type for the index of test cases found in metadata file. /// A wrapper type for the index of test cases found in metadata file.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[serde(transparent)] #[serde(transparent)]
pub struct CaseIdx(usize) impl Display, FromStr; pub struct CaseIdx(usize) impl Display, FromStr;
); );
+98 -106
View File
@@ -1,6 +1,6 @@
use std::{ use std::{
fs::File, fs::File,
path::{Path, PathBuf}, path::{Path, PathBuf},
}; };
use revive_dt_common::iterators::FilesWithExtensionIterator; use revive_dt_common::iterators::FilesWithExtensionIterator;
@@ -13,119 +13,111 @@ use anyhow::Context as _;
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[serde(untagged)] #[serde(untagged)]
pub enum Corpus { pub enum Corpus {
SinglePath { name: String, path: PathBuf }, SinglePath { name: String, path: PathBuf },
MultiplePaths { name: String, paths: Vec<PathBuf> }, MultiplePaths { name: String, paths: Vec<PathBuf> },
} }
impl Corpus { impl Corpus {
pub fn try_from_path(file_path: impl AsRef<Path>) -> anyhow::Result<Self> { pub fn try_from_path(file_path: impl AsRef<Path>) -> anyhow::Result<Self> {
let mut corpus = File::open(file_path.as_ref()) let mut corpus = File::open(file_path.as_ref())
.map_err(anyhow::Error::from) .map_err(anyhow::Error::from)
.and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into)) .and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into))
.with_context(|| { .with_context(|| {
format!( format!(
"Failed to open and deserialize corpus file at {}", "Failed to open and deserialize corpus file at {}",
file_path.as_ref().display() file_path.as_ref().display()
) )
})?; })?;
let corpus_directory = file_path let corpus_directory = file_path
.as_ref() .as_ref()
.canonicalize() .canonicalize()
.context("Failed to canonicalize the path to the corpus file")? .context("Failed to canonicalize the path to the corpus file")?
.parent() .parent()
.context("Corpus file has no parent")? .context("Corpus file has no parent")?
.to_path_buf(); .to_path_buf();
for path in corpus.paths_iter_mut() { for path in corpus.paths_iter_mut() {
*path = corpus_directory.join(path.as_path()) *path = corpus_directory.join(path.as_path())
} }
Ok(corpus) Ok(corpus)
} }
pub fn enumerate_tests(&self) -> Vec<MetadataFile> { pub fn enumerate_tests(&self) -> Vec<MetadataFile> {
let mut tests = self let mut tests = self
.paths_iter() .paths_iter()
.flat_map(|root_path| { .flat_map(|root_path| {
if !root_path.is_dir() { if !root_path.is_dir() {
Box::new(std::iter::once(root_path.to_path_buf())) Box::new(std::iter::once(root_path.to_path_buf()))
as Box<dyn Iterator<Item = _>> as Box<dyn Iterator<Item = _>>
} else { } else {
Box::new( Box::new(
FilesWithExtensionIterator::new(root_path) FilesWithExtensionIterator::new(root_path)
.with_use_cached_fs(true) .with_use_cached_fs(true)
.with_allowed_extension("sol") .with_allowed_extension("sol")
.with_allowed_extension("json"), .with_allowed_extension("json"),
) )
} }
.map(move |metadata_file_path| (root_path, metadata_file_path)) .map(move |metadata_file_path| (root_path, metadata_file_path))
}) })
.filter_map(|(root_path, metadata_file_path)| { .filter_map(|(root_path, metadata_file_path)| {
Metadata::try_from_file(&metadata_file_path) Metadata::try_from_file(&metadata_file_path)
.or_else(|| { .or_else(|| {
debug!( debug!(
discovered_from = %root_path.display(), discovered_from = %root_path.display(),
metadata_file_path = %metadata_file_path.display(), metadata_file_path = %metadata_file_path.display(),
"Skipping file since it doesn't contain valid metadata" "Skipping file since it doesn't contain valid metadata"
); );
None None
}) })
.map(|metadata| MetadataFile { .map(|metadata| MetadataFile {
metadata_file_path, metadata_file_path,
corpus_file_path: root_path.to_path_buf(), corpus_file_path: root_path.to_path_buf(),
content: metadata, content: metadata,
}) })
.inspect(|metadata_file| { .inspect(|metadata_file| {
debug!( debug!(
metadata_file_path = %metadata_file.relative_path().display(), metadata_file_path = %metadata_file.relative_path().display(),
"Loaded metadata file" "Loaded metadata file"
) )
}) })
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path)); tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path); tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
info!( info!(len = tests.len(), corpus_name = self.name(), "Found tests in Corpus");
len = tests.len(), tests
corpus_name = self.name(), }
"Found tests in Corpus"
);
tests
}
pub fn name(&self) -> &str { pub fn name(&self) -> &str {
match self { match self {
Corpus::SinglePath { name, .. } | Corpus::MultiplePaths { name, .. } => name.as_str(), Corpus::SinglePath { name, .. } | Corpus::MultiplePaths { name, .. } => name.as_str(),
} }
} }
pub fn paths_iter(&self) -> impl Iterator<Item = &Path> { pub fn paths_iter(&self) -> impl Iterator<Item = &Path> {
match self { match self {
Corpus::SinglePath { path, .. } => { Corpus::SinglePath { path, .. } =>
Box::new(std::iter::once(path.as_path())) as Box<dyn Iterator<Item = _>> Box::new(std::iter::once(path.as_path())) as Box<dyn Iterator<Item = _>>,
} Corpus::MultiplePaths { paths, .. } =>
Corpus::MultiplePaths { paths, .. } => { Box::new(paths.iter().map(|path| path.as_path())) as Box<dyn Iterator<Item = _>>,
Box::new(paths.iter().map(|path| path.as_path())) as Box<dyn Iterator<Item = _>> }
} }
}
}
pub fn paths_iter_mut(&mut self) -> impl Iterator<Item = &mut PathBuf> { pub fn paths_iter_mut(&mut self) -> impl Iterator<Item = &mut PathBuf> {
match self { match self {
Corpus::SinglePath { path, .. } => { Corpus::SinglePath { path, .. } =>
Box::new(std::iter::once(path)) as Box<dyn Iterator<Item = _>> Box::new(std::iter::once(path)) as Box<dyn Iterator<Item = _>>,
} Corpus::MultiplePaths { paths, .. } =>
Corpus::MultiplePaths { paths, .. } => { Box::new(paths.iter_mut()) as Box<dyn Iterator<Item = _>>,
Box::new(paths.iter_mut()) as Box<dyn Iterator<Item = _>> }
} }
}
}
pub fn path_count(&self) -> usize { pub fn path_count(&self) -> usize {
match self { match self {
Corpus::SinglePath { .. } => 1, Corpus::SinglePath { .. } => 1,
Corpus::MultiplePaths { paths, .. } => paths.len(), Corpus::MultiplePaths { paths, .. } => paths.len(),
} }
} }
} }
File diff suppressed because it is too large Load Diff
+177 -192
View File
@@ -1,13 +1,12 @@
use anyhow::Context as _; use anyhow::Context as _;
use regex::Regex; use regex::Regex;
use revive_dt_common::iterators::EitherIter; use revive_dt_common::{
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline}; iterators::EitherIter,
types::{Mode, ModeOptimizerSetting, ModePipeline},
};
use schemars::JsonSchema; use schemars::JsonSchema;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::HashSet; use std::{collections::HashSet, fmt::Display, str::FromStr, sync::LazyLock};
use std::fmt::Display;
use std::str::FromStr;
use std::sync::LazyLock;
/// This represents a mode that has been parsed from test metadata. /// This represents a mode that has been parsed from test metadata.
/// ///
@@ -21,17 +20,17 @@ use std::sync::LazyLock;
#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)] #[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)]
#[serde(try_from = "String", into = "String")] #[serde(try_from = "String", into = "String")]
pub struct ParsedMode { pub struct ParsedMode {
pub pipeline: Option<ModePipeline>, pub pipeline: Option<ModePipeline>,
pub optimize_flag: Option<bool>, pub optimize_flag: Option<bool>,
pub optimize_setting: Option<ModeOptimizerSetting>, pub optimize_setting: Option<ModeOptimizerSetting>,
pub version: Option<semver::VersionReq>, pub version: Option<semver::VersionReq>,
} }
impl FromStr for ParsedMode { impl FromStr for ParsedMode {
type Err = anyhow::Error; type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
static REGEX: LazyLock<Regex> = LazyLock::new(|| { static REGEX: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(r"(?x) Regex::new(r"(?x)
^ ^
(?:(?P<pipeline>[YEILV])(?P<optimize_flag>[+-])?)? # Pipeline to use eg Y, E+, E- (?:(?P<pipeline>[YEILV])(?P<optimize_flag>[+-])?)? # Pipeline to use eg Y, E+, E-
\s* \s*
@@ -40,218 +39,204 @@ impl FromStr for ParsedMode {
(?P<version>[>=<]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8 (?P<version>[>=<]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8
$ $
").unwrap() ").unwrap()
}); });
let Some(caps) = REGEX.captures(s) else { let Some(caps) = REGEX.captures(s) else {
anyhow::bail!("Cannot parse mode '{s}' from string"); anyhow::bail!("Cannot parse mode '{s}' from string");
}; };
let pipeline = match caps.name("pipeline") { let pipeline = match caps.name("pipeline") {
Some(m) => Some( Some(m) => Some(
ModePipeline::from_str(m.as_str()) ModePipeline::from_str(m.as_str())
.context("Failed to parse mode pipeline from string")?, .context("Failed to parse mode pipeline from string")?,
), ),
None => None, None => None,
}; };
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+"); let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
let optimize_setting = match caps.name("optimize_setting") { let optimize_setting = match caps.name("optimize_setting") {
Some(m) => Some( Some(m) => Some(
ModeOptimizerSetting::from_str(m.as_str()) ModeOptimizerSetting::from_str(m.as_str())
.context("Failed to parse optimizer setting from string")?, .context("Failed to parse optimizer setting from string")?,
), ),
None => None, None => None,
}; };
let version = match caps.name("version") { let version = match caps.name("version") {
Some(m) => Some( Some(m) => Some(
semver::VersionReq::parse(m.as_str()) semver::VersionReq::parse(m.as_str())
.map_err(|e| { .map_err(|e| {
anyhow::anyhow!( anyhow::anyhow!(
"Cannot parse the version requirement '{}': {e}", "Cannot parse the version requirement '{}': {e}",
m.as_str() m.as_str()
) )
}) })
.context("Failed to parse semver requirement from mode string")?, .context("Failed to parse semver requirement from mode string")?,
), ),
None => None, None => None,
}; };
Ok(ParsedMode { Ok(ParsedMode { pipeline, optimize_flag, optimize_setting, version })
pipeline, }
optimize_flag,
optimize_setting,
version,
})
}
} }
impl Display for ParsedMode { impl Display for ParsedMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut has_written = false; let mut has_written = false;
if let Some(pipeline) = self.pipeline { if let Some(pipeline) = self.pipeline {
pipeline.fmt(f)?; pipeline.fmt(f)?;
if let Some(optimize_flag) = self.optimize_flag { if let Some(optimize_flag) = self.optimize_flag {
f.write_str(if optimize_flag { "+" } else { "-" })?; f.write_str(if optimize_flag { "+" } else { "-" })?;
} }
has_written = true; has_written = true;
} }
if let Some(optimize_setting) = self.optimize_setting { if let Some(optimize_setting) = self.optimize_setting {
if has_written { if has_written {
f.write_str(" ")?; f.write_str(" ")?;
} }
optimize_setting.fmt(f)?; optimize_setting.fmt(f)?;
has_written = true; has_written = true;
} }
if let Some(version) = &self.version { if let Some(version) = &self.version {
if has_written { if has_written {
f.write_str(" ")?; f.write_str(" ")?;
} }
version.fmt(f)?; version.fmt(f)?;
} }
Ok(()) Ok(())
} }
} }
impl From<ParsedMode> for String { impl From<ParsedMode> for String {
fn from(parsed_mode: ParsedMode) -> Self { fn from(parsed_mode: ParsedMode) -> Self {
parsed_mode.to_string() parsed_mode.to_string()
} }
} }
impl TryFrom<String> for ParsedMode { impl TryFrom<String> for ParsedMode {
type Error = anyhow::Error; type Error = anyhow::Error;
fn try_from(value: String) -> Result<Self, Self::Error> { fn try_from(value: String) -> Result<Self, Self::Error> {
ParsedMode::from_str(&value) ParsedMode::from_str(&value)
} }
} }
impl ParsedMode { impl ParsedMode {
/// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try. /// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try.
pub fn to_modes(&self) -> impl Iterator<Item = Mode> { pub fn to_modes(&self) -> impl Iterator<Item = Mode> {
let pipeline_iter = self.pipeline.as_ref().map_or_else( let pipeline_iter = self.pipeline.as_ref().map_or_else(
|| EitherIter::A(ModePipeline::test_cases()), || EitherIter::A(ModePipeline::test_cases()),
|p| EitherIter::B(std::iter::once(*p)), |p| EitherIter::B(std::iter::once(*p)),
); );
let optimize_flag_setting = self.optimize_flag.map(|flag| { let optimize_flag_setting = self
if flag { .optimize_flag
ModeOptimizerSetting::M3 .map(|flag| if flag { ModeOptimizerSetting::M3 } else { ModeOptimizerSetting::M0 });
} else {
ModeOptimizerSetting::M0
}
});
let optimize_flag_iter = match optimize_flag_setting { let optimize_flag_iter = match optimize_flag_setting {
Some(setting) => EitherIter::A(std::iter::once(setting)), Some(setting) => EitherIter::A(std::iter::once(setting)),
None => EitherIter::B(ModeOptimizerSetting::test_cases()), None => EitherIter::B(ModeOptimizerSetting::test_cases()),
}; };
let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else( let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else(
|| EitherIter::A(optimize_flag_iter), || EitherIter::A(optimize_flag_iter),
|s| EitherIter::B(std::iter::once(*s)), |s| EitherIter::B(std::iter::once(*s)),
); );
pipeline_iter.flat_map(move |pipeline| { pipeline_iter.flat_map(move |pipeline| {
optimize_settings_iter optimize_settings_iter.clone().map(move |optimize_setting| Mode {
.clone() pipeline,
.map(move |optimize_setting| Mode { optimize_setting,
pipeline, version: self.version.clone(),
optimize_setting, })
version: self.version.clone(), })
}) }
})
}
/// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s. /// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s.
/// This avoids any duplicate entries. /// This avoids any duplicate entries.
pub fn many_to_modes<'a>( pub fn many_to_modes<'a>(
parsed: impl Iterator<Item = &'a ParsedMode>, parsed: impl Iterator<Item = &'a ParsedMode>,
) -> impl Iterator<Item = Mode> { ) -> impl Iterator<Item = Mode> {
let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect(); let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect();
modes.into_iter() modes.into_iter()
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
#[test] #[test]
fn test_parsed_mode_from_str() { fn test_parsed_mode_from_str() {
let strings = vec![ let strings = vec![
("Mz", "Mz"), ("Mz", "Mz"),
("Y", "Y"), ("Y", "Y"),
("Y+", "Y+"), ("Y+", "Y+"),
("Y-", "Y-"), ("Y-", "Y-"),
("E", "E"), ("E", "E"),
("E+", "E+"), ("E+", "E+"),
("E-", "E-"), ("E-", "E-"),
("Y M0", "Y M0"), ("Y M0", "Y M0"),
("Y M1", "Y M1"), ("Y M1", "Y M1"),
("Y M2", "Y M2"), ("Y M2", "Y M2"),
("Y M3", "Y M3"), ("Y M3", "Y M3"),
("Y Ms", "Y Ms"), ("Y Ms", "Y Ms"),
("Y Mz", "Y Mz"), ("Y Mz", "Y Mz"),
("E M0", "E M0"), ("E M0", "E M0"),
("E M1", "E M1"), ("E M1", "E M1"),
("E M2", "E M2"), ("E M2", "E M2"),
("E M3", "E M3"), ("E M3", "E M3"),
("E Ms", "E Ms"), ("E Ms", "E Ms"),
("E Mz", "E Mz"), ("E Mz", "E Mz"),
// When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning) // When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning)
("Y 0.8.0", "Y ^0.8.0"), ("Y 0.8.0", "Y ^0.8.0"),
("E+ 0.8.0", "E+ ^0.8.0"), ("E+ 0.8.0", "E+ ^0.8.0"),
("Y M3 >=0.8.0", "Y M3 >=0.8.0"), ("Y M3 >=0.8.0", "Y M3 >=0.8.0"),
("E Mz <0.7.0", "E Mz <0.7.0"), ("E Mz <0.7.0", "E Mz <0.7.0"),
// We can parse +- _and_ M1/M2 but the latter takes priority. // We can parse +- _and_ M1/M2 but the latter takes priority.
("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"), ("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"),
("E- M2 0.7.0", "E- M2 ^0.7.0"), ("E- M2 0.7.0", "E- M2 ^0.7.0"),
// We don't see this in the wild but it is parsed. // We don't see this in the wild but it is parsed.
("<=0.8", "<=0.8"), ("<=0.8", "<=0.8"),
]; ];
for (actual, expected) in strings { for (actual, expected) in strings {
let parsed = ParsedMode::from_str(actual) let parsed = ParsedMode::from_str(actual)
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'")); .unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
assert_eq!( assert_eq!(
expected, expected,
parsed.to_string(), parsed.to_string(),
"Mode string '{actual}' did not parse to '{expected}': got '{parsed}'" "Mode string '{actual}' did not parse to '{expected}': got '{parsed}'"
); );
} }
} }
#[test] #[test]
fn test_parsed_mode_to_test_modes() { fn test_parsed_mode_to_test_modes() {
let strings = vec![ let strings = vec![
("Mz", vec!["Y Mz", "E Mz"]), ("Mz", vec!["Y Mz", "E Mz"]),
("Y", vec!["Y M0", "Y M3"]), ("Y", vec!["Y M0", "Y M3"]),
("E", vec!["E M0", "E M3"]), ("E", vec!["E M0", "E M3"]),
("Y+", vec!["Y M3"]), ("Y+", vec!["Y M3"]),
("Y-", vec!["Y M0"]), ("Y-", vec!["Y M0"]),
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]), ("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
( ("<=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"]),
"<=0.8", ];
vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"],
),
];
for (actual, expected) in strings { for (actual, expected) in strings {
let parsed = ParsedMode::from_str(actual) let parsed = ParsedMode::from_str(actual)
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'")); .unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect(); let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect();
let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect(); let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect();
assert_eq!( assert_eq!(
expected_set, actual_set, expected_set, actual_set,
"Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'" "Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'"
); );
} }
} }
} }
+989 -1058
View File
File diff suppressed because it is too large Load Diff
+135 -136
View File
@@ -1,10 +1,10 @@
use std::collections::HashMap; use std::{collections::HashMap, pin::Pin};
use std::pin::Pin;
use alloy::eips::BlockNumberOrTag; use alloy::{
use alloy::json_abi::JsonAbi; eips::BlockNumberOrTag,
use alloy::primitives::TxHash; json_abi::JsonAbi,
use alloy::primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, U256}; primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, U256},
};
use anyhow::Result; use anyhow::Result;
use crate::metadata::{ContractIdent, ContractInstance}; use crate::metadata::{ContractIdent, ContractInstance};
@@ -12,165 +12,164 @@ use crate::metadata::{ContractIdent, ContractInstance};
/// A trait of the interface are required to implement to be used by the resolution logic that this /// A trait of the interface are required to implement to be used by the resolution logic that this
/// crate implements to go from string calldata and into the bytes calldata. /// crate implements to go from string calldata and into the bytes calldata.
pub trait ResolverApi { pub trait ResolverApi {
/// Returns the ID of the chain that the node is on. /// Returns the ID of the chain that the node is on.
fn chain_id(&self) -> Pin<Box<dyn Future<Output = Result<ChainId>> + '_>>; fn chain_id(&self) -> Pin<Box<dyn Future<Output = Result<ChainId>> + '_>>;
/// Returns the gas price for the specified transaction. /// Returns the gas price for the specified transaction.
fn transaction_gas_price( fn transaction_gas_price(
&self, &self,
tx_hash: TxHash, tx_hash: TxHash,
) -> Pin<Box<dyn Future<Output = Result<u128>> + '_>>; ) -> Pin<Box<dyn Future<Output = Result<u128>> + '_>>;
// TODO: This is currently a u128 due to substrate needing more than 64 bits for its gas limit // TODO: This is currently a u128 due to substrate needing more than 64 bits for its gas limit
// when we implement the changes to the gas we need to adjust this to be a u64. // when we implement the changes to the gas we need to adjust this to be a u64.
/// Returns the gas limit of the specified block. /// Returns the gas limit of the specified block.
fn block_gas_limit( fn block_gas_limit(
&self, &self,
number: BlockNumberOrTag, number: BlockNumberOrTag,
) -> Pin<Box<dyn Future<Output = Result<u128>> + '_>>; ) -> Pin<Box<dyn Future<Output = Result<u128>> + '_>>;
/// Returns the coinbase of the specified block. /// Returns the coinbase of the specified block.
fn block_coinbase( fn block_coinbase(
&self, &self,
number: BlockNumberOrTag, number: BlockNumberOrTag,
) -> Pin<Box<dyn Future<Output = Result<Address>> + '_>>; ) -> Pin<Box<dyn Future<Output = Result<Address>> + '_>>;
/// Returns the difficulty of the specified block. /// Returns the difficulty of the specified block.
fn block_difficulty( fn block_difficulty(
&self, &self,
number: BlockNumberOrTag, number: BlockNumberOrTag,
) -> Pin<Box<dyn Future<Output = Result<U256>> + '_>>; ) -> Pin<Box<dyn Future<Output = Result<U256>> + '_>>;
/// Returns the base fee of the specified block. /// Returns the base fee of the specified block.
fn block_base_fee( fn block_base_fee(
&self, &self,
number: BlockNumberOrTag, number: BlockNumberOrTag,
) -> Pin<Box<dyn Future<Output = Result<u64>> + '_>>; ) -> Pin<Box<dyn Future<Output = Result<u64>> + '_>>;
/// Returns the hash of the specified block. /// Returns the hash of the specified block.
fn block_hash( fn block_hash(
&self, &self,
number: BlockNumberOrTag, number: BlockNumberOrTag,
) -> Pin<Box<dyn Future<Output = Result<BlockHash>> + '_>>; ) -> Pin<Box<dyn Future<Output = Result<BlockHash>> + '_>>;
/// Returns the timestamp of the specified block, /// Returns the timestamp of the specified block,
fn block_timestamp( fn block_timestamp(
&self, &self,
number: BlockNumberOrTag, number: BlockNumberOrTag,
) -> Pin<Box<dyn Future<Output = Result<BlockTimestamp>> + '_>>; ) -> Pin<Box<dyn Future<Output = Result<BlockTimestamp>> + '_>>;
/// Returns the number of the last block. /// Returns the number of the last block.
fn last_block_number(&self) -> Pin<Box<dyn Future<Output = Result<BlockNumber>> + '_>>; fn last_block_number(&self) -> Pin<Box<dyn Future<Output = Result<BlockNumber>> + '_>>;
} }
#[derive(Clone, Copy, Debug, Default)] #[derive(Clone, Copy, Debug, Default)]
/// Contextual information required by the code that's performing the resolution. /// Contextual information required by the code that's performing the resolution.
pub struct ResolutionContext<'a> { pub struct ResolutionContext<'a> {
/// When provided the contracts provided here will be used for resolutions. /// When provided the contracts provided here will be used for resolutions.
deployed_contracts: Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>, deployed_contracts: Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
/// When provided the variables in here will be used for performing resolutions. /// When provided the variables in here will be used for performing resolutions.
variables: Option<&'a HashMap<String, U256>>, variables: Option<&'a HashMap<String, U256>>,
/// When provided this block number will be treated as the tip of the chain. /// When provided this block number will be treated as the tip of the chain.
block_number: Option<&'a BlockNumber>, block_number: Option<&'a BlockNumber>,
/// When provided the resolver will use this transaction hash for all of its resolutions. /// When provided the resolver will use this transaction hash for all of its resolutions.
transaction_hash: Option<&'a TxHash>, transaction_hash: Option<&'a TxHash>,
} }
impl<'a> ResolutionContext<'a> { impl<'a> ResolutionContext<'a> {
pub fn new() -> Self { pub fn new() -> Self {
Default::default() Default::default()
} }
pub fn new_from_parts( pub fn new_from_parts(
deployed_contracts: impl Into< deployed_contracts: impl Into<
Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>, Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
>, >,
variables: impl Into<Option<&'a HashMap<String, U256>>>, variables: impl Into<Option<&'a HashMap<String, U256>>>,
block_number: impl Into<Option<&'a BlockNumber>>, block_number: impl Into<Option<&'a BlockNumber>>,
transaction_hash: impl Into<Option<&'a TxHash>>, transaction_hash: impl Into<Option<&'a TxHash>>,
) -> Self { ) -> Self {
Self { Self {
deployed_contracts: deployed_contracts.into(), deployed_contracts: deployed_contracts.into(),
variables: variables.into(), variables: variables.into(),
block_number: block_number.into(), block_number: block_number.into(),
transaction_hash: transaction_hash.into(), transaction_hash: transaction_hash.into(),
} }
} }
pub fn with_deployed_contracts( pub fn with_deployed_contracts(
mut self, mut self,
deployed_contracts: impl Into< deployed_contracts: impl Into<
Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>, Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
>, >,
) -> Self { ) -> Self {
self.deployed_contracts = deployed_contracts.into(); self.deployed_contracts = deployed_contracts.into();
self self
} }
pub fn with_variables( pub fn with_variables(
mut self, mut self,
variables: impl Into<Option<&'a HashMap<String, U256>>>, variables: impl Into<Option<&'a HashMap<String, U256>>>,
) -> Self { ) -> Self {
self.variables = variables.into(); self.variables = variables.into();
self self
} }
pub fn with_block_number(mut self, block_number: impl Into<Option<&'a BlockNumber>>) -> Self { pub fn with_block_number(mut self, block_number: impl Into<Option<&'a BlockNumber>>) -> Self {
self.block_number = block_number.into(); self.block_number = block_number.into();
self self
} }
pub fn with_transaction_hash( pub fn with_transaction_hash(
mut self, mut self,
transaction_hash: impl Into<Option<&'a TxHash>>, transaction_hash: impl Into<Option<&'a TxHash>>,
) -> Self { ) -> Self {
self.transaction_hash = transaction_hash.into(); self.transaction_hash = transaction_hash.into();
self self
} }
pub fn resolve_block_number(&self, number: BlockNumberOrTag) -> BlockNumberOrTag { pub fn resolve_block_number(&self, number: BlockNumberOrTag) -> BlockNumberOrTag {
match self.block_number { match self.block_number {
Some(block_number) => match number { Some(block_number) => match number {
BlockNumberOrTag::Latest => BlockNumberOrTag::Number(*block_number), BlockNumberOrTag::Latest => BlockNumberOrTag::Number(*block_number),
n @ (BlockNumberOrTag::Finalized n @ (BlockNumberOrTag::Finalized |
| BlockNumberOrTag::Safe BlockNumberOrTag::Safe |
| BlockNumberOrTag::Earliest BlockNumberOrTag::Earliest |
| BlockNumberOrTag::Pending BlockNumberOrTag::Pending |
| BlockNumberOrTag::Number(_)) => n, BlockNumberOrTag::Number(_)) => n,
}, },
None => number, None => number,
} }
} }
pub fn deployed_contract( pub fn deployed_contract(
&self, &self,
instance: &ContractInstance, instance: &ContractInstance,
) -> Option<&(ContractIdent, Address, JsonAbi)> { ) -> Option<&(ContractIdent, Address, JsonAbi)> {
self.deployed_contracts self.deployed_contracts
.and_then(|deployed_contracts| deployed_contracts.get(instance)) .and_then(|deployed_contracts| deployed_contracts.get(instance))
} }
pub fn deployed_contract_address(&self, instance: &ContractInstance) -> Option<&Address> { pub fn deployed_contract_address(&self, instance: &ContractInstance) -> Option<&Address> {
self.deployed_contract(instance).map(|(_, a, _)| a) self.deployed_contract(instance).map(|(_, a, _)| a)
} }
pub fn deployed_contract_abi(&self, instance: &ContractInstance) -> Option<&JsonAbi> { pub fn deployed_contract_abi(&self, instance: &ContractInstance) -> Option<&JsonAbi> {
self.deployed_contract(instance).map(|(_, _, a)| a) self.deployed_contract(instance).map(|(_, _, a)| a)
} }
pub fn variable(&self, name: impl AsRef<str>) -> Option<&U256> { pub fn variable(&self, name: impl AsRef<str>) -> Option<&U256> {
self.variables self.variables.and_then(|variables| variables.get(name.as_ref()))
.and_then(|variables| variables.get(name.as_ref())) }
}
pub fn tip_block_number(&self) -> Option<&'a BlockNumber> { pub fn tip_block_number(&self) -> Option<&'a BlockNumber> {
self.block_number self.block_number
} }
pub fn transaction_hash(&self) -> Option<&'a TxHash> { pub fn transaction_hash(&self) -> Option<&'a TxHash> {
self.transaction_hash self.transaction_hash
} }
} }
+34
View File
@@ -0,0 +1,34 @@
[package]
name = "ml-test-runner"
description = "ML-based test runner for executing differential tests file by file"
version.workspace = true
authors.workspace = true
license.workspace = true
edition.workspace = true
repository.workspace = true
rust-version.workspace = true
[[bin]]
name = "ml-test-runner"
path = "src/main.rs"
[dependencies]
revive-dt-common = { workspace = true }
revive-dt-compiler = { workspace = true }
revive-dt-config = { workspace = true }
revive-dt-core = { workspace = true }
revive-dt-format = { workspace = true }
revive-dt-node = { workspace = true }
revive-dt-node-interaction = { workspace = true }
revive-dt-report = { workspace = true }
alloy = { workspace = true }
anyhow = { workspace = true }
clap = { workspace = true }
tokio = { workspace = true }
temp-dir = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
[lints]
workspace = true
+74
View File
@@ -0,0 +1,74 @@
# ML Test Runner
A test runner for executing Revive differential tests file-by-file with cargo-test-style output.
This is similar to the `retester` binary but designed for ML-based test execution with a focus on:
- Running tests file-by-file (rather than in bulk)
- Caching passed tests to skip them in future runs
- Providing cargo-test-style output for easy integration with ML pipelines
- Single platform testing (rather than differential testing)
## Features
- **File-by-file execution**: Run tests on individual `.sol` files, corpus files (`.json`), or recursively walk directories
- **Cached results**: Skip tests that have already passed using `--cached-passed`
- **Fail fast**: Stop on first failure with `--bail`
- **Cargo-like output**: Familiar test output format with colored pass/fail indicators
- **Platform support**: Test against `geth` or `kitchensink` platforms
## Usage
```bash
# Run a single .sol file (compile-only mode, default)
./ml-test-runner path/to/test.sol --platform geth
# Run all tests in a corpus file
./ml-test-runner path/to/corpus.json --platform kitchensink
# Walk a directory recursively for .sol files
./ml-test-runner path/to/tests/ --platform geth
# Use cached results and bail on first failure
./ml-test-runner path/to/tests/ --cached-passed ./cache.txt --bail
# Start the platform and execute tests (full mode)
./ml-test-runner path/to/tests/ --platform geth --start-platform
# Enable verbose logging (info, debug, or trace level)
RUST_LOG=info ./ml-test-runner path/to/tests/
RUST_LOG=debug ./ml-test-runner path/to/tests/ --start-platform
RUST_LOG=trace ./ml-test-runner path/to/tests/ --start-platform
```
## Arguments
- `<PATH>` - Path to test file (`.sol`), corpus file (`.json`), or folder of `.sol` files
- `--cached-passed <FILE>` - File to track tests that have already passed
- `--bail` - Stop after the first file failure
- `--platform <PLATFORM>` - Platform to test against (`geth`, `kitchensink`, or `zombienet`, default: `geth`)
- `--start-platform` - Start the platform and execute tests (default: `false`, compile-only mode)
## Output Format
The runner produces cargo-test-style output:
```
test path/to/test1.sol ... ok
test path/to/test2.sol ... FAILED
test path/to/test3.sol ... cached
failures:
---- path/to/test2.sol ----
Error: ...
test result: FAILED. 1 passed; 1 failed; 1 cached; finished in 2.34s
```
## Building
```bash
cargo build --release -p ml-test-runner
```
The binary will be available at `target/release/ml-test-runner`.
+639
View File
@@ -0,0 +1,639 @@
use anyhow::Context;
use clap::Parser;
use revive_dt_common::{
iterators::FilesWithExtensionIterator,
types::{PlatformIdentifier, PrivateKeyAllocator},
};
use revive_dt_config::TestExecutionContext;
use revive_dt_core::{
CachedCompiler, Platform,
helpers::{TestDefinition, TestPlatformInformation},
};
use revive_dt_format::{
case::CaseIdx,
corpus::Corpus,
metadata::{Metadata, MetadataFile},
};
use std::{
borrow::Cow,
collections::{BTreeMap, HashSet},
fs::File,
io::{BufRead, BufReader, BufWriter, Write},
path::{Path, PathBuf},
sync::Arc,
time::{Duration, Instant},
};
use temp_dir::TempDir;
use tokio::sync::Mutex;
use tracing::info;
use tracing_subscriber::{EnvFilter, FmtSubscriber};
/// ML-based test runner for executing differential tests file by file
#[derive(Debug, Parser)]
#[command(name = "ml-test-runner")]
struct MlTestRunnerArgs {
/// Path to test file (.sol), corpus file (.json), or folder containing .sol files
#[arg(value_name = "PATH")]
path: PathBuf,
/// File to cache tests that have already passed
#[arg(long = "cached-passed")]
cached_passed: Option<PathBuf>,
/// File to store tests that have failed (defaults to .<platform>-failed)
#[arg(long = "cached-failed")]
cached_failed: Option<PathBuf>,
/// Stop after the first file failure
#[arg(long = "bail")]
bail: bool,
/// Platform to test against (e.g., geth-evm-solc, kitchensink-polkavm-resolc)
#[arg(long = "platform", default_value = "geth-evm-solc")]
platform: PlatformIdentifier,
/// Start the platform and wait for RPC readiness
#[arg(long = "start-platform", default_value = "false")]
start_platform: bool,
/// Private key to use for wallet initialization (hex string with or without 0x prefix)
#[arg(
long = "private-key",
default_value = "0x5fb92d6e98884f76de468fa3f6278f8807c48bebc13595d45af5bdc4da702133"
)]
private_key: String,
/// RPC port to connect to when using existing node
#[arg(long = "rpc-port", default_value = "8545")]
rpc_port: u16,
/// Show verbose output including cached tests and detailed error messages
#[arg(long = "verbose", short = 'v')]
verbose: bool,
}
fn main() -> anyhow::Result<()> {
let args = MlTestRunnerArgs::parse();
// Only set up tracing if RUST_LOG is explicitly set or --verbose is passed
if std::env::var("RUST_LOG").is_ok() || args.verbose {
let subscriber = FmtSubscriber::builder()
.with_env_filter(EnvFilter::from_default_env())
.with_writer(std::io::stderr)
.finish();
tracing::subscriber::set_global_default(subscriber)
.expect("Failed to set tracing subscriber");
}
info!("ML test runner starting");
info!("Platform: {:?}", args.platform);
info!("Start platform: {}", args.start_platform);
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.expect("Failed building the Runtime")
.block_on(run(args))
}
/// Wait for HTTP server to be ready by attempting to connect to the specified port
async fn wait_for_http_server(port: u16) -> anyhow::Result<()> {
const MAX_RETRIES: u32 = 60;
const RETRY_DELAY: Duration = Duration::from_secs(1);
for attempt in 1..=MAX_RETRIES {
match tokio::net::TcpStream::connect(format!("127.0.0.1:{}", port)).await {
Ok(_) => {
info!("Successfully connected to HTTP server on port {} (attempt {})", port, attempt);
return Ok(());
},
Err(e) => {
if attempt == MAX_RETRIES {
anyhow::bail!(
"Failed to connect to HTTP server on port {} after {} attempts: {}",
port,
MAX_RETRIES,
e
);
}
if attempt % 10 == 0 {
info!(
"Still waiting for HTTP server on port {} (attempt {}/{})",
port, attempt, MAX_RETRIES
);
}
tokio::time::sleep(RETRY_DELAY).await;
},
}
}
unreachable!()
}
async fn run(args: MlTestRunnerArgs) -> anyhow::Result<()> {
let start_time = Instant::now();
info!("Discovering test files from: {}", args.path.display());
let test_files = discover_test_files(&args.path)?;
info!("Found {} test file(s)", test_files.len());
let cached_passed = if let Some(cache_file) = &args.cached_passed {
let cached = load_cached_passed(cache_file)?;
info!("Loaded {} cached passed test(s)", cached.len());
cached
} else {
HashSet::new()
};
let cached_passed = Arc::new(Mutex::new(cached_passed));
// Set up cached-failed file (defaults to .<platform>-failed)
let cached_failed_path = args
.cached_failed
.clone()
.unwrap_or_else(|| PathBuf::from(format!(".{:?}-failed", args.platform)));
let cached_failed = Arc::new(Mutex::new(HashSet::<String>::new()));
// Get the platform based on CLI args
let platform: &dyn Platform = match args.platform {
PlatformIdentifier::GethEvmSolc => &revive_dt_core::GethEvmSolcPlatform,
PlatformIdentifier::LighthouseGethEvmSolc => &revive_dt_core::LighthouseGethEvmSolcPlatform,
PlatformIdentifier::KitchensinkPolkavmResolc =>
&revive_dt_core::KitchensinkPolkavmResolcPlatform,
PlatformIdentifier::KitchensinkRevmSolc => &revive_dt_core::KitchensinkRevmSolcPlatform,
PlatformIdentifier::ReviveDevNodePolkavmResolc =>
&revive_dt_core::ReviveDevNodePolkavmResolcPlatform,
PlatformIdentifier::ReviveDevNodeRevmSolc => &revive_dt_core::ReviveDevNodeRevmSolcPlatform,
PlatformIdentifier::ZombienetPolkavmResolc =>
&revive_dt_core::ZombienetPolkavmResolcPlatform,
PlatformIdentifier::ZombienetRevmSolc => &revive_dt_core::ZombienetRevmSolcPlatform,
};
let test_context = TestExecutionContext::default();
let context = revive_dt_config::Context::Test(Box::new(test_context));
let node: &'static dyn revive_dt_node_interaction::EthereumNode = if args.start_platform {
info!("Starting blockchain node...");
let node_handle =
platform.new_node(context.clone()).context("Failed to spawn node thread")?;
info!("Waiting for node to start...");
let node = node_handle
.join()
.map_err(|e| anyhow::anyhow!("Node thread panicked: {:?}", e))?
.context("Failed to start node")?;
info!("Node started with ID: {}, connection: {}", node.id(), node.connection_string());
let node = Box::leak(node);
info!("Running pre-transactions...");
node.pre_transactions().await.context("Failed to run pre-transactions")?;
info!("Pre-transactions completed");
node
} else {
info!("Using existing node at port {}", args.rpc_port);
// Wait for the HTTP server to be ready
info!("Waiting for HTTP server to be ready on port {}...", args.rpc_port);
wait_for_http_server(args.rpc_port).await?;
info!("HTTP server is ready");
let existing_node: Box<dyn revive_dt_node_interaction::EthereumNode> = match args.platform {
PlatformIdentifier::GethEvmSolc | PlatformIdentifier::LighthouseGethEvmSolc =>
Box::new(
revive_dt_node::node_implementations::geth::GethNode::new_existing(
&args.private_key,
args.rpc_port,
)
.await?,
),
PlatformIdentifier::KitchensinkPolkavmResolc |
PlatformIdentifier::KitchensinkRevmSolc |
PlatformIdentifier::ReviveDevNodePolkavmResolc |
PlatformIdentifier::ReviveDevNodeRevmSolc |
PlatformIdentifier::ZombienetPolkavmResolc |
PlatformIdentifier::ZombienetRevmSolc => Box::new(
revive_dt_node::node_implementations::substrate::SubstrateNode::new_existing(
&args.private_key,
args.rpc_port,
)
.await?,
),
};
Box::leak(existing_node)
};
let mut passed_files = 0;
let mut failed_files = 0;
let mut skipped_files = 0;
let mut failures = Vec::new();
const GREEN: &str = "\x1B[32m";
const RED: &str = "\x1B[31m";
const YELLOW: &str = "\x1B[33m";
const COLOUR_RESET: &str = "\x1B[0m";
const BOLD: &str = "\x1B[1m";
const BOLD_RESET: &str = "\x1B[22m";
for test_file in test_files {
let file_display = test_file.display().to_string();
info!("\n\n == Executing test file: {file_display} == \n\n");
// Check if already passed
{
let cache = cached_passed.lock().await;
if cache.contains(&file_display) {
if args.verbose {
println!("test {file_display} ... {YELLOW}cached{COLOUR_RESET}");
}
skipped_files += 1;
continue;
}
}
info!("Loading metadata from: {}", test_file.display());
let metadata_file = match load_metadata_file(&test_file) {
Ok(mf) => {
info!("Loaded metadata with {} case(s)", mf.cases.len());
mf
},
Err(e) => {
// Skip files without metadata instead of treating them as failures
info!("Skipping {} (no metadata): {}", file_display, e);
skipped_files += 1;
continue;
},
};
// Execute test with 10 second timeout
let test_result = tokio::time::timeout(
Duration::from_secs(20),
execute_test_file(&metadata_file, platform, node, &context),
)
.await;
let result = match test_result {
Ok(Ok(_)) => Ok(()),
Ok(Err(e)) => Err(e),
Err(_) => Err(anyhow::anyhow!("Test timed out after 20 seconds")),
};
match result {
Ok(_) => {
println!("test {file_display} ... {GREEN}ok{COLOUR_RESET}");
passed_files += 1;
// Update cache
if let Some(cache_file) = &args.cached_passed {
let mut cache = cached_passed.lock().await;
cache.insert(file_display);
if let Err(e) = save_cached_passed(cache_file, &cache) {
info!("Failed to save cache: {}", e);
}
}
},
Err(e) => {
println!("test {file_display} ... {RED}FAILED{COLOUR_RESET}");
failed_files += 1;
let error_detail = if args.verbose { format!("{:?}", e) } else { format!("{}", e) };
failures.push((file_display.clone(), error_detail));
// Update cached-failed
{
let mut cache = cached_failed.lock().await;
cache.insert(file_display);
if let Err(e) = save_cached_failed(&cached_failed_path, &cache) {
info!("Failed to save cached-failed: {}", e);
}
}
if args.bail {
info!("Bailing after first failure");
break;
}
},
}
}
// Print summary
println!();
if !failures.is_empty() && args.verbose {
println!("{BOLD}failures:{BOLD_RESET}");
println!();
for (file, error) in &failures {
println!("---- {} ----", file);
println!("{}", error);
println!();
}
}
let elapsed = start_time.elapsed();
println!(
"test result: {}. {} passed; {} failed; {} cached; finished in {:.2}s",
if failed_files == 0 {
format!("{GREEN}ok{COLOUR_RESET}")
} else {
format!("{RED}FAILED{COLOUR_RESET}")
},
passed_files,
failed_files,
skipped_files,
elapsed.as_secs_f64()
);
if failed_files > 0 {
std::process::exit(1);
}
Ok(())
}
/// Discover test files from the given path
fn discover_test_files(path: &Path) -> anyhow::Result<Vec<PathBuf>> {
if !path.exists() {
anyhow::bail!("Path does not exist: {}", path.display());
}
let mut files = Vec::new();
if path.is_file() {
let extension = path.extension().and_then(|s| s.to_str()).unwrap_or("");
match extension {
"sol" => {
// Single .sol file
files.push(path.to_path_buf());
},
"json" => {
// Corpus file - enumerate its tests
let corpus = Corpus::try_from_path(path)?;
let metadata_files = corpus.enumerate_tests();
for metadata in metadata_files {
files.push(metadata.metadata_file_path);
}
},
_ => anyhow::bail!("Unsupported file extension: {}. Expected .sol or .json", extension),
}
} else if path.is_dir() {
// First, find all test.json files
let mut test_json_dirs = HashSet::new();
for json_file in FilesWithExtensionIterator::new(path)
.with_allowed_extension("json")
.with_use_cached_fs(true)
{
if json_file.file_name().and_then(|s| s.to_str()) == Some("test.json") {
if let Some(parent) = json_file.parent() {
test_json_dirs.insert(parent.to_path_buf());
}
// Try to parse as corpus file first, then as metadata file
if let Ok(corpus) = Corpus::try_from_path(&json_file) {
// It's a corpus file - enumerate its tests
let metadata_files = corpus.enumerate_tests();
for metadata in metadata_files {
files.push(metadata.metadata_file_path);
}
} else {
// It's a metadata file - use it directly
files.push(json_file);
}
}
}
// Then, find .sol files that are NOT in directories with test.json
for sol_file in FilesWithExtensionIterator::new(path)
.with_allowed_extension("sol")
.with_use_cached_fs(true)
{
if let Some(parent) = sol_file.parent() {
if !test_json_dirs.contains(parent) {
files.push(sol_file);
}
} else {
files.push(sol_file);
}
}
} else {
anyhow::bail!("Path is neither a file nor a directory: {}", path.display());
}
Ok(files)
}
/// Load metadata from a test file
fn load_metadata_file(path: &Path) -> anyhow::Result<MetadataFile> {
let metadata = Metadata::try_from_file(path)
.ok_or_else(|| anyhow::anyhow!("Failed to load metadata from {}", path.display()))?;
Ok(MetadataFile {
metadata_file_path: path.to_path_buf(),
corpus_file_path: path.to_path_buf(),
content: metadata,
})
}
/// Execute all test cases in a metadata file
async fn execute_test_file(
metadata_file: &MetadataFile,
platform: &dyn Platform,
node: &'static dyn revive_dt_node_interaction::EthereumNode,
context: &revive_dt_config::Context,
) -> anyhow::Result<()> {
if metadata_file.cases.is_empty() {
anyhow::bail!("No test cases found in file");
}
info!("Processing {} test case(s)", metadata_file.cases.len());
let temp_dir = TempDir::new()?;
info!("Created temporary directory: {}", temp_dir.path().display());
info!("Initializing cached compiler");
let cached_compiler = CachedCompiler::new(temp_dir.path().join("compilation_cache"), false)
.await
.map(Arc::new)
.context("Failed to create cached compiler")?;
let private_key_allocator =
Arc::new(Mutex::new(PrivateKeyAllocator::new(alloy::primitives::U256::from(100))));
let (reporter, report_task) =
revive_dt_report::ReportAggregator::new(context.clone()).into_task();
tokio::spawn(report_task);
info!("Building test definitions for {} case(s)", metadata_file.cases.len());
let mut test_definitions = Vec::new();
for (case_idx, case) in metadata_file.cases.iter().enumerate() {
info!("Building test definition for case {}", case_idx);
let test_def = build_test_definition(
metadata_file,
case,
case_idx,
platform,
node,
&context,
&reporter,
)
.await?;
if let Some(test_def) = test_def {
info!("Test definition for case {} created successfully", case_idx);
test_definitions.push(test_def);
}
}
info!("Executing {} test definition(s)", test_definitions.len());
for (idx, test_definition) in test_definitions.iter().enumerate() {
info!("─────────────────────────────────────────────────────────────────");
info!(
"Executing case {}/{}: case_idx={}, mode={}, steps={}",
idx + 1,
test_definitions.len(),
test_definition.case_idx,
test_definition.mode,
test_definition.case.steps.len()
);
info!("Creating driver for case {}", test_definition.case_idx);
let driver = revive_dt_core::differential_tests::Driver::new_root(
test_definition,
private_key_allocator.clone(),
&cached_compiler,
)
.await
.context("Failed to create driver")?;
info!(
"Running {} step(s) for case {}",
test_definition.case.steps.len(),
test_definition.case_idx
);
let steps_executed = driver
.execute_all()
.await
.context(format!("Failed to execute case {}", test_definition.case_idx))?;
info!(
"✓ Case {} completed successfully, executed {} step(s)",
test_definition.case_idx, steps_executed
);
}
info!("─────────────────────────────────────────────────────────────────");
info!("All {} test case(s) executed successfully", test_definitions.len());
Ok(())
}
/// Build a test definition for a single test case
async fn build_test_definition<'a>(
metadata_file: &'a MetadataFile,
case: &'a revive_dt_format::case::Case,
case_idx: usize,
platform: &'a dyn Platform,
node: &'a dyn revive_dt_node_interaction::EthereumNode,
context: &revive_dt_config::Context,
reporter: &revive_dt_report::Reporter,
) -> anyhow::Result<Option<TestDefinition<'a>>> {
let mode = case
.modes
.as_ref()
.or(metadata_file.modes.as_ref())
.and_then(|modes| modes.first())
.and_then(|parsed_mode| parsed_mode.to_modes().next())
.map(Cow::Owned)
.or_else(|| revive_dt_compiler::Mode::all().next().map(Cow::Borrowed))
.unwrap();
let compiler = platform
.new_compiler(context.clone(), mode.version.clone().map(Into::into))
.await
.context("Failed to create compiler")?;
let test_reporter =
reporter.test_specific_reporter(Arc::new(revive_dt_report::TestSpecifier {
solc_mode: mode.as_ref().clone(),
metadata_file_path: metadata_file.metadata_file_path.clone(),
case_idx: CaseIdx::new(case_idx),
}));
let execution_reporter =
test_reporter.execution_specific_reporter(node.id(), platform.platform_identifier());
let mut platforms = BTreeMap::new();
platforms.insert(
platform.platform_identifier(),
TestPlatformInformation { platform, node, compiler, reporter: execution_reporter },
);
let test_definition = TestDefinition {
metadata: metadata_file,
metadata_file_path: &metadata_file.metadata_file_path,
mode,
case_idx: CaseIdx::new(case_idx),
case,
platforms,
reporter: test_reporter,
};
if let Err((reason, _)) = test_definition.check_compatibility() {
info!("Skipping case {}: {}", case_idx, reason);
return Ok(None);
}
Ok(Some(test_definition))
}
/// Load cached passed tests from file
fn load_cached_passed(path: &Path) -> anyhow::Result<HashSet<String>> {
if !path.exists() {
return Ok(HashSet::new());
}
let file = File::open(path).context("Failed to open cached-passed file")?;
let reader = BufReader::new(file);
let mut cache = HashSet::new();
for line in reader.lines() {
let line = line?;
let trimmed = line.trim();
if !trimmed.is_empty() {
cache.insert(trimmed.to_string());
}
}
Ok(cache)
}
/// Save cached passed tests to file
fn save_cached_passed(path: &Path, cache: &HashSet<String>) -> anyhow::Result<()> {
let file = File::create(path).context("Failed to create cached-passed file")?;
let mut writer = BufWriter::new(file);
let mut entries: Vec<_> = cache.iter().collect();
entries.sort();
for entry in entries {
writeln!(writer, "{}", entry)?;
}
writer.flush()?;
Ok(())
}
/// Save cached failed tests to file
fn save_cached_failed(path: &Path, cache: &HashSet<String>) -> anyhow::Result<()> {
let file = File::create(path).context("Failed to create cached-failed file")?;
let mut writer = BufWriter::new(file);
let mut entries: Vec<_> = cache.iter().collect();
entries.sort();
for entry in entries {
writeln!(writer, "{}", entry)?;
}
writer.flush()?;
Ok(())
}
+69 -62
View File
@@ -1,11 +1,14 @@
//! This crate implements all node interactions. //! This crate implements all node interactions.
use std::pin::Pin; use std::{pin::Pin, sync::Arc};
use std::sync::Arc;
use alloy::primitives::{Address, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256}; use alloy::{
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace}; primitives::{Address, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest}; rpc::types::{
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace},
},
};
use anyhow::Result; use anyhow::Result;
use futures::Stream; use futures::Stream;
@@ -15,81 +18,85 @@ use revive_dt_format::traits::ResolverApi;
/// An interface for all interactions with Ethereum compatible nodes. /// An interface for all interactions with Ethereum compatible nodes.
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
pub trait EthereumNode { pub trait EthereumNode {
/// A function to run post spawning the nodes and before any transactions are run on the node. /// A function to run post spawning the nodes and before any transactions are run on the node.
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>>; fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>>;
fn id(&self) -> usize; fn id(&self) -> usize;
/// Returns the nodes connection string. /// Returns the nodes connection string.
fn connection_string(&self) -> &str; fn connection_string(&self) -> &str;
fn submit_transaction( fn submit_transaction(
&self, &self,
transaction: TransactionRequest, transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = Result<TxHash>> + '_>>; ) -> Pin<Box<dyn Future<Output = Result<TxHash>> + '_>>;
fn get_receipt( fn get_receipt(
&self, &self,
tx_hash: TxHash, tx_hash: TxHash,
) -> Pin<Box<dyn Future<Output = Result<TransactionReceipt>> + '_>>; ) -> Pin<Box<dyn Future<Output = Result<TransactionReceipt>> + '_>>;
/// Execute the [TransactionRequest] and return a [TransactionReceipt]. /// Execute the [TransactionRequest] and return a [TransactionReceipt].
fn execute_transaction( fn execute_transaction(
&self, &self,
transaction: TransactionRequest, transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = Result<TransactionReceipt>> + '_>>; ) -> Pin<Box<dyn Future<Output = Result<TransactionReceipt>> + '_>>;
/// Trace the transaction in the [TransactionReceipt] and return a [GethTrace]. /// Trace the transaction in the [TransactionReceipt] and return a [GethTrace].
fn trace_transaction( fn trace_transaction(
&self, &self,
tx_hash: TxHash, tx_hash: TxHash,
trace_options: GethDebugTracingOptions, trace_options: GethDebugTracingOptions,
) -> Pin<Box<dyn Future<Output = Result<GethTrace>> + '_>>; ) -> Pin<Box<dyn Future<Output = Result<GethTrace>> + '_>>;
/// Returns the state diff of the transaction hash in the [TransactionReceipt]. /// Returns the state diff of the transaction hash in the [TransactionReceipt].
fn state_diff(&self, tx_hash: TxHash) -> Pin<Box<dyn Future<Output = Result<DiffMode>> + '_>>; fn state_diff(&self, tx_hash: TxHash) -> Pin<Box<dyn Future<Output = Result<DiffMode>> + '_>>;
/// Returns the balance of the provided [`Address`] back. /// Returns the balance of the provided [`Address`] back.
fn balance_of(&self, address: Address) -> Pin<Box<dyn Future<Output = Result<U256>> + '_>>; fn balance_of(&self, address: Address) -> Pin<Box<dyn Future<Output = Result<U256>> + '_>>;
/// Returns the latest storage proof of the provided [`Address`] /// Returns the latest storage proof of the provided [`Address`]
fn latest_state_proof( fn latest_state_proof(
&self, &self,
address: Address, address: Address,
keys: Vec<StorageKey>, keys: Vec<StorageKey>,
) -> Pin<Box<dyn Future<Output = Result<EIP1186AccountProofResponse>> + '_>>; ) -> Pin<Box<dyn Future<Output = Result<EIP1186AccountProofResponse>> + '_>>;
/// Returns the resolver that is to use with this ethereum node. /// Returns the resolver that is to use with this ethereum node.
fn resolver(&self) -> Pin<Box<dyn Future<Output = Result<Arc<dyn ResolverApi + '_>>> + '_>>; fn resolver(&self) -> Pin<Box<dyn Future<Output = Result<Arc<dyn ResolverApi + '_>>> + '_>>;
/// Returns the EVM version of the node. /// Returns the EVM version of the node.
fn evm_version(&self) -> EVMVersion; fn evm_version(&self) -> EVMVersion;
/// Returns a stream of the blocks that were mined by the node. /// Returns a stream of the blocks that were mined by the node.
fn subscribe_to_full_blocks_information( fn subscribe_to_full_blocks_information(
&self, &self,
) -> Pin< ) -> Pin<
Box< Box<
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>> dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
+ '_, + '_,
>, >,
>; >;
/// Checks if the provided address is in the wallet. If it is, returns the address.
/// Otherwise, returns the default signer's address.
fn resolve_signer_or_default(&self, address: Address) -> Address;
} }
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct MinedBlockInformation { pub struct MinedBlockInformation {
/// The block number. /// The block number.
pub block_number: BlockNumber, pub block_number: BlockNumber,
/// The block timestamp. /// The block timestamp.
pub block_timestamp: BlockTimestamp, pub block_timestamp: BlockTimestamp,
/// The amount of gas mined in the block. /// The amount of gas mined in the block.
pub mined_gas: u128, pub mined_gas: u128,
/// The gas limit of the block. /// The gas limit of the block.
pub block_gas_limit: u128, pub block_gas_limit: u128,
/// The hashes of the transactions that were mined as part of the block. /// The hashes of the transactions that were mined as part of the block.
pub transaction_hashes: Vec<TxHash>, pub transaction_hashes: Vec<TxHash>,
} }
+132 -151
View File
@@ -1,9 +1,9 @@
use std::{ use std::{
fs::{File, OpenOptions}, fs::{File, OpenOptions},
io::{BufRead, BufReader, Write}, io::{BufRead, BufReader, Write},
path::Path, path::Path,
process::{Child, Command}, process::{Child, Command},
time::{Duration, Instant}, time::{Duration, Instant},
}; };
use anyhow::{Context, Result, bail}; use anyhow::{Context, Result, bail};
@@ -12,180 +12,161 @@ use anyhow::{Context, Result, bail};
/// when the process is dropped. /// when the process is dropped.
#[derive(Debug)] #[derive(Debug)]
pub struct Process { pub struct Process {
/// The handle of the child process. /// The handle of the child process.
child: Child, child: Child,
/// The file that stdout is being logged to. /// The file that stdout is being logged to.
stdout_logs_file: File, stdout_logs_file: File,
/// The file that stderr is being logged to. /// The file that stderr is being logged to.
stderr_logs_file: File, stderr_logs_file: File,
} }
impl Process { impl Process {
pub fn new( pub fn new(
log_file_prefix: impl Into<Option<&'static str>>, log_file_prefix: impl Into<Option<&'static str>>,
logs_directory: impl AsRef<Path>, logs_directory: impl AsRef<Path>,
binary_path: impl AsRef<Path>, binary_path: impl AsRef<Path>,
command_building_callback: impl FnOnce(&mut Command, File, File), command_building_callback: impl FnOnce(&mut Command, File, File),
process_readiness_wait_behavior: ProcessReadinessWaitBehavior, process_readiness_wait_behavior: ProcessReadinessWaitBehavior,
) -> Result<Self> { ) -> Result<Self> {
let log_file_prefix = log_file_prefix.into(); let log_file_prefix = log_file_prefix.into();
let (stdout_file_name, stderr_file_name) = match log_file_prefix { let (stdout_file_name, stderr_file_name) = match log_file_prefix {
Some(prefix) => ( Some(prefix) => (format!("{prefix}_stdout.log"), format!("{prefix}_stderr.log")),
format!("{prefix}_stdout.log"), None => ("stdout.log".to_string(), "stderr.log".to_string()),
format!("{prefix}_stderr.log"), };
),
None => ("stdout.log".to_string(), "stderr.log".to_string()),
};
let stdout_logs_file_path = logs_directory.as_ref().join(stdout_file_name); let stdout_logs_file_path = logs_directory.as_ref().join(stdout_file_name);
let stderr_logs_file_path = logs_directory.as_ref().join(stderr_file_name); let stderr_logs_file_path = logs_directory.as_ref().join(stderr_file_name);
let stdout_logs_file = OpenOptions::new() let stdout_logs_file = OpenOptions::new()
.write(true) .write(true)
.truncate(true) .truncate(true)
.create(true) .create(true)
.open(stdout_logs_file_path.as_path()) .open(stdout_logs_file_path.as_path())
.context("Failed to open the stdout logs file")?; .context("Failed to open the stdout logs file")?;
let stderr_logs_file = OpenOptions::new() let stderr_logs_file = OpenOptions::new()
.write(true) .write(true)
.truncate(true) .truncate(true)
.create(true) .create(true)
.open(stderr_logs_file_path.as_path()) .open(stderr_logs_file_path.as_path())
.context("Failed to open the stderr logs file")?; .context("Failed to open the stderr logs file")?;
let mut command = { let mut command = {
let stdout_logs_file = stdout_logs_file let stdout_logs_file =
.try_clone() stdout_logs_file.try_clone().context("Failed to clone the stdout logs file")?;
.context("Failed to clone the stdout logs file")?; let stderr_logs_file =
let stderr_logs_file = stderr_logs_file stderr_logs_file.try_clone().context("Failed to clone the stderr logs file")?;
.try_clone()
.context("Failed to clone the stderr logs file")?;
let mut command = Command::new(binary_path.as_ref()); let mut command = Command::new(binary_path.as_ref());
command_building_callback(&mut command, stdout_logs_file, stderr_logs_file); command_building_callback(&mut command, stdout_logs_file, stderr_logs_file);
command command
}; };
let mut child = command let mut child = command.spawn().context("Failed to spawn the built command")?;
.spawn()
.context("Failed to spawn the built command")?;
match process_readiness_wait_behavior { match process_readiness_wait_behavior {
ProcessReadinessWaitBehavior::NoStartupWait => {} ProcessReadinessWaitBehavior::NoStartupWait => {},
ProcessReadinessWaitBehavior::WaitDuration(duration) => std::thread::sleep(duration), ProcessReadinessWaitBehavior::WaitDuration(duration) => std::thread::sleep(duration),
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction { ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
max_wait_duration, max_wait_duration,
mut check_function, mut check_function,
} => { } => {
let spawn_time = Instant::now(); let spawn_time = Instant::now();
let stdout_logs_file = OpenOptions::new() let stdout_logs_file = OpenOptions::new()
.read(true) .read(true)
.open(stdout_logs_file_path) .open(stdout_logs_file_path)
.context("Failed to open the stdout logs file")?; .context("Failed to open the stdout logs file")?;
let stderr_logs_file = OpenOptions::new() let stderr_logs_file = OpenOptions::new()
.read(true) .read(true)
.open(stderr_logs_file_path) .open(stderr_logs_file_path)
.context("Failed to open the stderr logs file")?; .context("Failed to open the stderr logs file")?;
let mut stdout_lines = BufReader::new(stdout_logs_file).lines(); let mut stdout_lines = BufReader::new(stdout_logs_file).lines();
let mut stderr_lines = BufReader::new(stderr_logs_file).lines(); let mut stderr_lines = BufReader::new(stderr_logs_file).lines();
let mut stdout = String::new(); let mut stdout = String::new();
let mut stderr = String::new(); let mut stderr = String::new();
loop { loop {
let stdout_line = stdout_lines.next().and_then(Result::ok); let stdout_line = stdout_lines.next().and_then(Result::ok);
let stderr_line = stderr_lines.next().and_then(Result::ok); let stderr_line = stderr_lines.next().and_then(Result::ok);
if let Some(stdout_line) = stdout_line.as_ref() { if let Some(stdout_line) = stdout_line.as_ref() {
stdout.push_str(stdout_line); stdout.push_str(stdout_line);
stdout.push('\n'); stdout.push('\n');
} }
if let Some(stderr_line) = stderr_line.as_ref() { if let Some(stderr_line) = stderr_line.as_ref() {
stderr.push_str(stderr_line); stderr.push_str(stderr_line);
stderr.push('\n'); stderr.push('\n');
} }
let check_result = let check_result =
check_function(stdout_line.as_deref(), stderr_line.as_deref()).context( check_function(stdout_line.as_deref(), stderr_line.as_deref()).context(
format!( format!(
"Failed to wait for the process to be ready - {stdout} - {stderr}" "Failed to wait for the process to be ready - {stdout} - {stderr}"
), ),
)?; )?;
if check_result { if check_result {
break; break;
} }
if Instant::now().duration_since(spawn_time) > max_wait_duration { if Instant::now().duration_since(spawn_time) > max_wait_duration {
bail!( bail!(
"Waited for the process to start but it failed to start in time. stderr {stderr} - stdout {stdout}" "Waited for the process to start but it failed to start in time. stderr {stderr} - stdout {stdout}"
) )
} }
} }
} },
ProcessReadinessWaitBehavior::WaitForCommandToExit => { ProcessReadinessWaitBehavior::WaitForCommandToExit => {
if !child if !child.wait().context("Failed waiting for process to finish")?.success() {
.wait() anyhow::bail!("Failed to spawn command");
.context("Failed waiting for process to finish")? }
.success() },
{ }
anyhow::bail!("Failed to spawn command");
}
}
}
Ok(Self { Ok(Self { child, stdout_logs_file, stderr_logs_file })
child, }
stdout_logs_file,
stderr_logs_file,
})
}
} }
impl Drop for Process { impl Drop for Process {
fn drop(&mut self) { fn drop(&mut self) {
self.child.kill().expect("Failed to kill the process"); self.child.kill().expect("Failed to kill the process");
self.stdout_logs_file self.stdout_logs_file.flush().expect("Failed to flush the stdout logs file");
.flush() self.stderr_logs_file.flush().expect("Failed to flush the stderr logs file");
.expect("Failed to flush the stdout logs file"); }
self.stderr_logs_file
.flush()
.expect("Failed to flush the stderr logs file");
}
} }
pub enum ProcessReadinessWaitBehavior { pub enum ProcessReadinessWaitBehavior {
/// The process does not require any kind of wait after it's been spawned and can be used /// The process does not require any kind of wait after it's been spawned and can be used
/// straight away. /// straight away.
NoStartupWait, NoStartupWait,
/// Waits for the command to exit. /// Waits for the command to exit.
WaitForCommandToExit, WaitForCommandToExit,
/// The process does require some amount of wait duration after it's been started. /// The process does require some amount of wait duration after it's been started.
WaitDuration(Duration), WaitDuration(Duration),
/// The process requires a time bounded wait function which is a function of the lines that /// The process requires a time bounded wait function which is a function of the lines that
/// appear in the log files. /// appear in the log files.
TimeBoundedWaitFunction { TimeBoundedWaitFunction {
/// The maximum amount of time to wait for the check function to return true. /// The maximum amount of time to wait for the check function to return true.
max_wait_duration: Duration, max_wait_duration: Duration,
/// The function to use to check if the process spawned is ready to use or not. This /// The function to use to check if the process spawned is ready to use or not. This
/// function should return the following in the following cases: /// function should return the following in the following cases:
/// ///
/// - `Ok(true)`: Returned when the condition the process is waiting for has been fulfilled /// - `Ok(true)`: Returned when the condition the process is waiting for has been fulfilled
/// and the wait is completed. /// and the wait is completed.
/// - `Ok(false)`: The process is not ready yet but it might be ready in the future. /// - `Ok(false)`: The process is not ready yet but it might be ready in the future.
/// - `Err`: The process is not ready yet and will not be ready in the future as it appears /// - `Err`: The process is not ready yet and will not be ready in the future as it appears
/// that it has encountered an error when it was being spawned. /// that it has encountered an error when it was being spawned.
/// ///
/// The first argument is a line from stdout and the second argument is a line from stderr. /// The first argument is a line from stdout and the second argument is a line from stderr.
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
check_function: Box<dyn FnMut(Option<&str>, Option<&str>) -> anyhow::Result<bool>>, check_function: Box<dyn FnMut(Option<&str>, Option<&str>) -> anyhow::Result<bool>>,
}, },
} }
+10 -10
View File
@@ -10,16 +10,16 @@ pub mod provider_utils;
/// An abstract interface for testing nodes. /// An abstract interface for testing nodes.
pub trait Node: EthereumNode { pub trait Node: EthereumNode {
/// Spawns a node configured according to the genesis json. /// Spawns a node configured according to the genesis json.
/// ///
/// Blocking until it's ready to accept transactions. /// Blocking until it's ready to accept transactions.
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()>; fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()>;
/// Prune the node instance and related data. /// Prune the node instance and related data.
/// ///
/// Blocking until it's completely stopped. /// Blocking until it's completely stopped.
fn shutdown(&mut self) -> anyhow::Result<()>; fn shutdown(&mut self) -> anyhow::Result<()>;
/// Returns the node version. /// Returns the node version.
fn version(&self) -> anyhow::Result<String>; fn version(&self) -> anyhow::Result<String>;
} }
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
@@ -6,64 +6,56 @@ use tower::{Layer, Service};
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct ConcurrencyLimiterLayer { pub struct ConcurrencyLimiterLayer {
semaphore: Arc<Semaphore>, semaphore: Arc<Semaphore>,
} }
impl ConcurrencyLimiterLayer { impl ConcurrencyLimiterLayer {
pub fn new(permit_count: usize) -> Self { pub fn new(permit_count: usize) -> Self {
Self { Self { semaphore: Arc::new(Semaphore::new(permit_count)) }
semaphore: Arc::new(Semaphore::new(permit_count)), }
}
}
} }
impl<S> Layer<S> for ConcurrencyLimiterLayer { impl<S> Layer<S> for ConcurrencyLimiterLayer {
type Service = ConcurrencyLimiterService<S>; type Service = ConcurrencyLimiterService<S>;
fn layer(&self, inner: S) -> Self::Service { fn layer(&self, inner: S) -> Self::Service {
ConcurrencyLimiterService { ConcurrencyLimiterService { service: inner, semaphore: self.semaphore.clone() }
service: inner, }
semaphore: self.semaphore.clone(),
}
}
} }
#[derive(Clone)] #[derive(Clone)]
pub struct ConcurrencyLimiterService<S> { pub struct ConcurrencyLimiterService<S> {
service: S, service: S,
semaphore: Arc<Semaphore>, semaphore: Arc<Semaphore>,
} }
impl<S, Request> Service<Request> for ConcurrencyLimiterService<S> impl<S, Request> Service<Request> for ConcurrencyLimiterService<S>
where where
S: Service<Request> + Send, S: Service<Request> + Send,
S::Future: Send + 'static, S::Future: Send + 'static,
{ {
type Response = S::Response; type Response = S::Response;
type Error = S::Error; type Error = S::Error;
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>; type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready( fn poll_ready(
&mut self, &mut self,
cx: &mut std::task::Context<'_>, cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), Self::Error>> { ) -> std::task::Poll<Result<(), Self::Error>> {
self.service.poll_ready(cx) self.service.poll_ready(cx)
} }
fn call(&mut self, req: Request) -> Self::Future { fn call(&mut self, req: Request) -> Self::Future {
let semaphore = self.semaphore.clone(); let semaphore = self.semaphore.clone();
let future = self.service.call(req); let future = self.service.call(req);
Box::pin(async move { Box::pin(async move {
let _permit = semaphore let _permit = semaphore.acquire().await.expect("Semaphore has been closed");
.acquire() tracing::debug!(
.await available_permits = semaphore.available_permits(),
.expect("Semaphore has been closed"); "Acquired Semaphore Permit"
tracing::debug!( );
available_permits = semaphore.available_permits(), future.await
"Acquired Semaphore Permit" })
); }
future.await
})
}
} }
@@ -1,84 +1,76 @@
use alloy::{ use alloy::{
network::{Network, TransactionBuilder}, network::{Network, TransactionBuilder},
providers::{ providers::{
Provider, SendableTx, Provider, SendableTx,
fillers::{GasFiller, TxFiller}, fillers::{GasFiller, TxFiller},
}, },
transports::TransportResult, transports::TransportResult,
}; };
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct FallbackGasFiller { pub struct FallbackGasFiller {
inner: GasFiller, inner: GasFiller,
default_gas_limit: u64, default_gas_limit: u64,
default_max_fee_per_gas: u128, default_max_fee_per_gas: u128,
default_priority_fee: u128, default_priority_fee: u128,
} }
impl FallbackGasFiller { impl FallbackGasFiller {
pub fn new( pub fn new(
default_gas_limit: u64, default_gas_limit: u64,
default_max_fee_per_gas: u128, default_max_fee_per_gas: u128,
default_priority_fee: u128, default_priority_fee: u128,
) -> Self { ) -> Self {
Self { Self { inner: GasFiller, default_gas_limit, default_max_fee_per_gas, default_priority_fee }
inner: GasFiller, }
default_gas_limit,
default_max_fee_per_gas,
default_priority_fee,
}
}
} }
impl Default for FallbackGasFiller { impl Default for FallbackGasFiller {
fn default() -> Self { fn default() -> Self {
FallbackGasFiller::new(25_000_000, 1_000_000_000, 1_000_000_000) FallbackGasFiller::new(10_000_000, 1_000_000_000, 1_000_000_000)
} }
} }
impl<N> TxFiller<N> for FallbackGasFiller impl<N> TxFiller<N> for FallbackGasFiller
where where
N: Network, N: Network,
{ {
type Fillable = Option<<GasFiller as TxFiller<N>>::Fillable>; type Fillable = Option<<GasFiller as TxFiller<N>>::Fillable>;
fn status( fn status(
&self, &self,
tx: &<N as Network>::TransactionRequest, tx: &<N as Network>::TransactionRequest,
) -> alloy::providers::fillers::FillerControlFlow { ) -> alloy::providers::fillers::FillerControlFlow {
<GasFiller as TxFiller<N>>::status(&self.inner, tx) <GasFiller as TxFiller<N>>::status(&self.inner, tx)
} }
fn fill_sync(&self, _: &mut alloy::providers::SendableTx<N>) {} fn fill_sync(&self, _: &mut alloy::providers::SendableTx<N>) {}
async fn prepare<P: Provider<N>>( async fn prepare<P: Provider<N>>(
&self, &self,
provider: &P, provider: &P,
tx: &<N as Network>::TransactionRequest, tx: &<N as Network>::TransactionRequest,
) -> TransportResult<Self::Fillable> { ) -> TransportResult<Self::Fillable> {
// Try to fetch GasFillers fillable (gas_price, base_fee, estimate_gas, …) // Try to fetch GasFiller's "fillable" (gas_price, base_fee, estimate_gas, …)
// If it errors (i.e. tx would revert under eth_estimateGas), swallow it. // Propagate errors so caller can handle them appropriately
match self.inner.prepare(provider, tx).await { self.inner.prepare(provider, tx).await.map(Some)
Ok(fill) => Ok(Some(fill)), }
Err(_) => Ok(None),
}
}
async fn fill( async fn fill(
&self, &self,
fillable: Self::Fillable, fillable: Self::Fillable,
mut tx: alloy::providers::SendableTx<N>, mut tx: alloy::providers::SendableTx<N>,
) -> TransportResult<SendableTx<N>> { ) -> TransportResult<SendableTx<N>> {
if let Some(fill) = fillable { if let Some(fill) = fillable {
// our inner GasFiller succeeded — use it // our inner GasFiller succeeded — use it
self.inner.fill(fill, tx).await self.inner.fill(fill, tx).await
} else { } else {
if let Some(builder) = tx.as_mut_builder() { if let Some(builder) = tx.as_mut_builder() {
builder.set_gas_limit(self.default_gas_limit); builder.set_gas_limit(self.default_gas_limit);
builder.set_max_fee_per_gas(self.default_max_fee_per_gas); builder.set_max_fee_per_gas(self.default_max_fee_per_gas);
builder.set_max_priority_fee_per_gas(self.default_priority_fee); builder.set_max_priority_fee_per_gas(self.default_priority_fee);
} }
Ok(tx) Ok(tx)
} }
} }
} }
+91 -95
View File
@@ -1,12 +1,12 @@
use std::{ops::ControlFlow, sync::LazyLock, time::Duration}; use std::{ops::ControlFlow, sync::LazyLock, time::Duration};
use alloy::{ use alloy::{
network::{Ethereum, Network, NetworkWallet, TransactionBuilder4844}, network::{Ethereum, Network, NetworkWallet, TransactionBuilder4844},
providers::{ providers::{
Identity, PendingTransactionBuilder, Provider, ProviderBuilder, RootProvider, Identity, PendingTransactionBuilder, Provider, ProviderBuilder, RootProvider,
fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller}, fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller},
}, },
rpc::client::ClientBuilder, rpc::client::ClientBuilder,
}; };
use anyhow::{Context, Result}; use anyhow::{Context, Result};
use revive_dt_common::futures::{PollingWaitBehavior, poll}; use revive_dt_common::futures::{PollingWaitBehavior, poll};
@@ -15,114 +15,110 @@ use tracing::debug;
use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller}; use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller};
pub type ConcreteProvider<N, W> = FillProvider< pub type ConcreteProvider<N, W> = FillProvider<
JoinFill< JoinFill<
JoinFill<JoinFill<JoinFill<Identity, FallbackGasFiller>, ChainIdFiller>, NonceFiller>, JoinFill<JoinFill<JoinFill<Identity, FallbackGasFiller>, ChainIdFiller>, NonceFiller>,
WalletFiller<W>, WalletFiller<W>,
>, >,
RootProvider<N>, RootProvider<N>,
N, N,
>; >;
pub async fn construct_concurrency_limited_provider<N, W>( pub async fn construct_concurrency_limited_provider<N, W>(
rpc_url: &str, rpc_url: &str,
fallback_gas_filler: FallbackGasFiller, fallback_gas_filler: FallbackGasFiller,
chain_id_filler: ChainIdFiller, chain_id_filler: ChainIdFiller,
nonce_filler: NonceFiller, nonce_filler: NonceFiller,
wallet: W, wallet: W,
) -> Result<ConcreteProvider<N, W>> ) -> Result<ConcreteProvider<N, W>>
where where
N: Network<TransactionRequest: TransactionBuilder4844>, N: Network<TransactionRequest: TransactionBuilder4844>,
W: NetworkWallet<N>, W: NetworkWallet<N>,
Identity: TxFiller<N>, Identity: TxFiller<N>,
FallbackGasFiller: TxFiller<N>, FallbackGasFiller: TxFiller<N>,
ChainIdFiller: TxFiller<N>, ChainIdFiller: TxFiller<N>,
NonceFiller: TxFiller<N>, NonceFiller: TxFiller<N>,
WalletFiller<W>: TxFiller<N>, WalletFiller<W>: TxFiller<N>,
{ {
// This is a global limit on the RPC concurrency that applies to all of the providers created // This is a global limit on the RPC concurrency that applies to all of the providers created
// by the framework. With this limit, it means that we can have a maximum of N concurrent // by the framework. With this limit, it means that we can have a maximum of N concurrent
// requests at any point of time and no more than that. This is done in an effort to stabilize // requests at any point of time and no more than that. This is done in an effort to stabilize
// the framework from some of the interment issues that we've been seeing related to RPC calls. // the framework from some of the interment issues that we've been seeing related to RPC calls.
static GLOBAL_CONCURRENCY_LIMITER_LAYER: LazyLock<ConcurrencyLimiterLayer> = static GLOBAL_CONCURRENCY_LIMITER_LAYER: LazyLock<ConcurrencyLimiterLayer> =
LazyLock::new(|| ConcurrencyLimiterLayer::new(10)); LazyLock::new(|| ConcurrencyLimiterLayer::new(10));
let client = ClientBuilder::default() let client = ClientBuilder::default()
.layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone()) .layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone())
.connect(rpc_url) .connect(rpc_url)
.await .await
.context("Failed to construct the RPC client")?; .context("Failed to construct the RPC client")?;
let provider = ProviderBuilder::new() let provider = ProviderBuilder::new()
.disable_recommended_fillers() .disable_recommended_fillers()
.network::<N>() .network::<N>()
.filler(fallback_gas_filler) .filler(fallback_gas_filler)
.filler(chain_id_filler) .filler(chain_id_filler)
.filler(nonce_filler) .filler(nonce_filler)
.wallet(wallet) .wallet(wallet)
.connect_client(client); .connect_client(client);
Ok(provider) Ok(provider)
} }
pub async fn execute_transaction<N, W>( pub async fn execute_transaction<N, W>(
provider: ConcreteProvider<N, W>, provider: ConcreteProvider<N, W>,
transaction: N::TransactionRequest, transaction: N::TransactionRequest,
) -> Result<N::ReceiptResponse> ) -> Result<N::ReceiptResponse>
where where
N: Network< N: Network<
TransactionRequest: TransactionBuilder4844, TransactionRequest: TransactionBuilder4844,
TxEnvelope = <Ethereum as Network>::TxEnvelope, TxEnvelope = <Ethereum as Network>::TxEnvelope,
>, >,
W: NetworkWallet<N>, W: NetworkWallet<N>,
Identity: TxFiller<N>, Identity: TxFiller<N>,
FallbackGasFiller: TxFiller<N>, FallbackGasFiller: TxFiller<N>,
ChainIdFiller: TxFiller<N>, ChainIdFiller: TxFiller<N>,
NonceFiller: TxFiller<N>, NonceFiller: TxFiller<N>,
WalletFiller<W>: TxFiller<N>, WalletFiller<W>: TxFiller<N>,
{ {
let sendable_transaction = provider let sendable_transaction =
.fill(transaction) provider.fill(transaction).await.context("Failed to fill transaction")?;
.await
.context("Failed to fill transaction")?;
let transaction_envelope = sendable_transaction let transaction_envelope = sendable_transaction
.try_into_envelope() .try_into_envelope()
.context("Failed to convert transaction into an envelope")?; .context("Failed to convert transaction into an envelope")?;
let tx_hash = *transaction_envelope.tx_hash(); let tx_hash = *transaction_envelope.tx_hash();
let mut pending_transaction = match provider.send_tx_envelope(transaction_envelope).await { let mut pending_transaction = match provider.send_tx_envelope(transaction_envelope).await {
Ok(pending_transaction) => pending_transaction, Ok(pending_transaction) => pending_transaction,
Err(error) => { Err(error) => {
let error_string = error.to_string(); let error_string = error.to_string();
if error_string.contains("Transaction Already Imported") { if error_string.contains("Transaction Already Imported") {
PendingTransactionBuilder::<N>::new(provider.root().clone(), tx_hash) PendingTransactionBuilder::<N>::new(provider.root().clone(), tx_hash)
} else { } else {
return Err(error).context(format!("Failed to submit transaction {tx_hash}")); return Err(error).context(format!("Failed to submit transaction {tx_hash}"));
} }
} },
}; };
debug!(%tx_hash, "Submitted Transaction"); debug!(%tx_hash, "Submitted Transaction");
pending_transaction.set_timeout(Some(Duration::from_secs(120))); pending_transaction.set_timeout(Some(Duration::from_secs(120)));
let tx_hash = pending_transaction.watch().await.context(format!( let tx_hash = pending_transaction
"Transaction inclusion watching timeout for {tx_hash}" .watch()
))?; .await
.context(format!("Transaction inclusion watching timeout for {tx_hash}"))?;
poll( debug!(%tx_hash, "Transaction included, polling for receipt");
Duration::from_secs(60),
PollingWaitBehavior::Constant(Duration::from_secs(3)),
|| {
let provider = provider.clone();
async move { poll(Duration::from_secs(30), PollingWaitBehavior::Constant(Duration::from_secs(3)), || {
match provider.get_transaction_receipt(tx_hash).await { let provider = provider.clone();
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)), async move {
_ => Ok(ControlFlow::Continue(())), match provider.get_transaction_receipt(tx_hash).await {
} Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
} _ => Ok(ControlFlow::Continue(())),
}, }
) }
.await })
.context(format!("Polling for receipt failed for {tx_hash}")) .await
.context(format!("Polling for receipt timed out for {tx_hash}"))
} }
+399 -448
View File
@@ -2,10 +2,10 @@
//! reporters and combines them into a single unified report. //! reporters and combines them into a single unified report.
use std::{ use std::{
collections::{BTreeMap, BTreeSet, HashMap, HashSet}, collections::{BTreeMap, BTreeSet, HashMap, HashSet},
fs::OpenOptions, fs::OpenOptions,
path::PathBuf, path::PathBuf,
time::{SystemTime, UNIX_EPOCH}, time::{SystemTime, UNIX_EPOCH},
}; };
use alloy::primitives::Address; use alloy::primitives::Address;
@@ -19,434 +19,385 @@ use semver::Version;
use serde::Serialize; use serde::Serialize;
use serde_with::{DisplayFromStr, serde_as}; use serde_with::{DisplayFromStr, serde_as};
use tokio::sync::{ use tokio::sync::{
broadcast::{Sender, channel}, broadcast::{Sender, channel},
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel}, mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
}; };
use tracing::debug; use tracing::debug;
use crate::*; use crate::*;
pub struct ReportAggregator { pub struct ReportAggregator {
/* Internal Report State */ /* Internal Report State */
report: Report, report: Report,
remaining_cases: HashMap<MetadataFilePath, HashMap<Mode, HashSet<CaseIdx>>>, remaining_cases: HashMap<MetadataFilePath, HashMap<Mode, HashSet<CaseIdx>>>,
/* Channels */ /* Channels */
runner_tx: Option<UnboundedSender<RunnerEvent>>, runner_tx: Option<UnboundedSender<RunnerEvent>>,
runner_rx: UnboundedReceiver<RunnerEvent>, runner_rx: UnboundedReceiver<RunnerEvent>,
listener_tx: Sender<ReporterEvent>, listener_tx: Sender<ReporterEvent>,
} }
impl ReportAggregator { impl ReportAggregator {
pub fn new(context: Context) -> Self { pub fn new(context: Context) -> Self {
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>(); let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
let (listener_tx, _) = channel::<ReporterEvent>(1024); let (listener_tx, _) = channel::<ReporterEvent>(1024);
Self { Self {
report: Report::new(context), report: Report::new(context),
remaining_cases: Default::default(), remaining_cases: Default::default(),
runner_tx: Some(runner_tx), runner_tx: Some(runner_tx),
runner_rx, runner_rx,
listener_tx, listener_tx,
} }
} }
pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<()>>) { pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<()>>) {
let reporter = self let reporter = self
.runner_tx .runner_tx
.take() .take()
.map(Into::into) .map(Into::into)
.expect("Can't fail since this can only be called once"); .expect("Can't fail since this can only be called once");
(reporter, async move { self.aggregate().await }) (reporter, async move { self.aggregate().await })
} }
async fn aggregate(mut self) -> Result<()> { async fn aggregate(mut self) -> Result<()> {
debug!("Starting to aggregate report"); debug!("Starting to aggregate report");
while let Some(event) = self.runner_rx.recv().await { while let Some(event) = self.runner_rx.recv().await {
debug!(?event, "Received Event"); debug!(?event, "Received Event");
match event { match event {
RunnerEvent::SubscribeToEvents(event) => { RunnerEvent::SubscribeToEvents(event) => {
self.handle_subscribe_to_events_event(*event); self.handle_subscribe_to_events_event(*event);
} },
RunnerEvent::CorpusFileDiscovery(event) => { RunnerEvent::CorpusFileDiscovery(event) =>
self.handle_corpus_file_discovered_event(*event) self.handle_corpus_file_discovered_event(*event),
} RunnerEvent::MetadataFileDiscovery(event) => {
RunnerEvent::MetadataFileDiscovery(event) => { self.handle_metadata_file_discovery_event(*event);
self.handle_metadata_file_discovery_event(*event); },
} RunnerEvent::TestCaseDiscovery(event) => {
RunnerEvent::TestCaseDiscovery(event) => { self.handle_test_case_discovery(*event);
self.handle_test_case_discovery(*event); },
} RunnerEvent::TestSucceeded(event) => {
RunnerEvent::TestSucceeded(event) => { self.handle_test_succeeded_event(*event);
self.handle_test_succeeded_event(*event); },
} RunnerEvent::TestFailed(event) => {
RunnerEvent::TestFailed(event) => { self.handle_test_failed_event(*event);
self.handle_test_failed_event(*event); },
} RunnerEvent::TestIgnored(event) => {
RunnerEvent::TestIgnored(event) => { self.handle_test_ignored_event(*event);
self.handle_test_ignored_event(*event); },
} RunnerEvent::NodeAssigned(event) => {
RunnerEvent::NodeAssigned(event) => { self.handle_node_assigned_event(*event);
self.handle_node_assigned_event(*event); },
} RunnerEvent::PreLinkContractsCompilationSucceeded(event) =>
RunnerEvent::PreLinkContractsCompilationSucceeded(event) => { self.handle_pre_link_contracts_compilation_succeeded_event(*event),
self.handle_pre_link_contracts_compilation_succeeded_event(*event) RunnerEvent::PostLinkContractsCompilationSucceeded(event) =>
} self.handle_post_link_contracts_compilation_succeeded_event(*event),
RunnerEvent::PostLinkContractsCompilationSucceeded(event) => { RunnerEvent::PreLinkContractsCompilationFailed(event) =>
self.handle_post_link_contracts_compilation_succeeded_event(*event) self.handle_pre_link_contracts_compilation_failed_event(*event),
} RunnerEvent::PostLinkContractsCompilationFailed(event) =>
RunnerEvent::PreLinkContractsCompilationFailed(event) => { self.handle_post_link_contracts_compilation_failed_event(*event),
self.handle_pre_link_contracts_compilation_failed_event(*event) RunnerEvent::LibrariesDeployed(event) => {
} self.handle_libraries_deployed_event(*event);
RunnerEvent::PostLinkContractsCompilationFailed(event) => { },
self.handle_post_link_contracts_compilation_failed_event(*event) RunnerEvent::ContractDeployed(event) => {
} self.handle_contract_deployed_event(*event);
RunnerEvent::LibrariesDeployed(event) => { },
self.handle_libraries_deployed_event(*event); RunnerEvent::Completion(event) => {
} self.handle_completion(*event);
RunnerEvent::ContractDeployed(event) => { break;
self.handle_contract_deployed_event(*event); },
} }
RunnerEvent::Completion(event) => { }
self.handle_completion(*event); debug!("Report aggregation completed");
break;
}
}
}
debug!("Report aggregation completed");
let file_name = { let file_name = {
let current_timestamp = SystemTime::now() let current_timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH) .duration_since(UNIX_EPOCH)
.context("System clock is before UNIX_EPOCH; cannot compute report timestamp")? .context("System clock is before UNIX_EPOCH; cannot compute report timestamp")?
.as_secs(); .as_secs();
let mut file_name = current_timestamp.to_string(); let mut file_name = current_timestamp.to_string();
file_name.push_str(".json"); file_name.push_str(".json");
file_name file_name
}; };
let file_path = self let file_path =
.report self.report.context.working_directory_configuration().as_path().join(file_name);
.context let file = OpenOptions::new()
.working_directory_configuration() .create(true)
.as_path() .write(true)
.join(file_name); .truncate(true)
let file = OpenOptions::new() .read(false)
.create(true) .open(&file_path)
.write(true) .with_context(|| {
.truncate(true) format!("Failed to open report file for writing: {}", file_path.display())
.read(false) })?;
.open(&file_path) serde_json::to_writer_pretty(&file, &self.report).with_context(|| {
.with_context(|| { format!("Failed to serialize report JSON to {}", file_path.display())
format!( })?;
"Failed to open report file for writing: {}",
file_path.display()
)
})?;
serde_json::to_writer_pretty(&file, &self.report).with_context(|| {
format!("Failed to serialize report JSON to {}", file_path.display())
})?;
Ok(()) Ok(())
} }
fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) { fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) {
let _ = event.tx.send(self.listener_tx.subscribe()); let _ = event.tx.send(self.listener_tx.subscribe());
} }
fn handle_corpus_file_discovered_event(&mut self, event: CorpusFileDiscoveryEvent) { fn handle_corpus_file_discovered_event(&mut self, event: CorpusFileDiscoveryEvent) {
self.report.corpora.push(event.corpus); self.report.corpora.push(event.corpus);
} }
fn handle_metadata_file_discovery_event(&mut self, event: MetadataFileDiscoveryEvent) { fn handle_metadata_file_discovery_event(&mut self, event: MetadataFileDiscoveryEvent) {
self.report.metadata_files.insert(event.path.clone()); self.report.metadata_files.insert(event.path.clone());
} }
fn handle_test_case_discovery(&mut self, event: TestCaseDiscoveryEvent) { fn handle_test_case_discovery(&mut self, event: TestCaseDiscoveryEvent) {
self.remaining_cases self.remaining_cases
.entry(event.test_specifier.metadata_file_path.clone().into()) .entry(event.test_specifier.metadata_file_path.clone().into())
.or_default() .or_default()
.entry(event.test_specifier.solc_mode.clone()) .entry(event.test_specifier.solc_mode.clone())
.or_default() .or_default()
.insert(event.test_specifier.case_idx); .insert(event.test_specifier.case_idx);
} }
fn handle_test_succeeded_event(&mut self, event: TestSucceededEvent) { fn handle_test_succeeded_event(&mut self, event: TestSucceededEvent) {
// Remove this from the set of cases we're tracking since it has completed. // Remove this from the set of cases we're tracking since it has completed.
self.remaining_cases self.remaining_cases
.entry(event.test_specifier.metadata_file_path.clone().into()) .entry(event.test_specifier.metadata_file_path.clone().into())
.or_default() .or_default()
.entry(event.test_specifier.solc_mode.clone()) .entry(event.test_specifier.solc_mode.clone())
.or_default() .or_default()
.remove(&event.test_specifier.case_idx); .remove(&event.test_specifier.case_idx);
// Add information on the fact that the case was ignored to the report. // Add information on the fact that the case was ignored to the report.
let test_case_report = self.test_case_report(&event.test_specifier); let test_case_report = self.test_case_report(&event.test_specifier);
test_case_report.status = Some(TestCaseStatus::Succeeded { test_case_report.status =
steps_executed: event.steps_executed, Some(TestCaseStatus::Succeeded { steps_executed: event.steps_executed });
}); self.handle_post_test_case_status_update(&event.test_specifier);
self.handle_post_test_case_status_update(&event.test_specifier); }
}
fn handle_test_failed_event(&mut self, event: TestFailedEvent) { fn handle_test_failed_event(&mut self, event: TestFailedEvent) {
// Remove this from the set of cases we're tracking since it has completed. // Remove this from the set of cases we're tracking since it has completed.
self.remaining_cases self.remaining_cases
.entry(event.test_specifier.metadata_file_path.clone().into()) .entry(event.test_specifier.metadata_file_path.clone().into())
.or_default() .or_default()
.entry(event.test_specifier.solc_mode.clone()) .entry(event.test_specifier.solc_mode.clone())
.or_default() .or_default()
.remove(&event.test_specifier.case_idx); .remove(&event.test_specifier.case_idx);
// Add information on the fact that the case was ignored to the report. // Add information on the fact that the case was ignored to the report.
let test_case_report = self.test_case_report(&event.test_specifier); let test_case_report = self.test_case_report(&event.test_specifier);
test_case_report.status = Some(TestCaseStatus::Failed { test_case_report.status = Some(TestCaseStatus::Failed { reason: event.reason });
reason: event.reason, self.handle_post_test_case_status_update(&event.test_specifier);
}); }
self.handle_post_test_case_status_update(&event.test_specifier);
}
fn handle_test_ignored_event(&mut self, event: TestIgnoredEvent) { fn handle_test_ignored_event(&mut self, event: TestIgnoredEvent) {
// Remove this from the set of cases we're tracking since it has completed. // Remove this from the set of cases we're tracking since it has completed.
self.remaining_cases self.remaining_cases
.entry(event.test_specifier.metadata_file_path.clone().into()) .entry(event.test_specifier.metadata_file_path.clone().into())
.or_default() .or_default()
.entry(event.test_specifier.solc_mode.clone()) .entry(event.test_specifier.solc_mode.clone())
.or_default() .or_default()
.remove(&event.test_specifier.case_idx); .remove(&event.test_specifier.case_idx);
// Add information on the fact that the case was ignored to the report. // Add information on the fact that the case was ignored to the report.
let test_case_report = self.test_case_report(&event.test_specifier); let test_case_report = self.test_case_report(&event.test_specifier);
test_case_report.status = Some(TestCaseStatus::Ignored { test_case_report.status = Some(TestCaseStatus::Ignored {
reason: event.reason, reason: event.reason,
additional_fields: event.additional_fields, additional_fields: event.additional_fields,
}); });
self.handle_post_test_case_status_update(&event.test_specifier); self.handle_post_test_case_status_update(&event.test_specifier);
} }
fn handle_post_test_case_status_update(&mut self, specifier: &TestSpecifier) { fn handle_post_test_case_status_update(&mut self, specifier: &TestSpecifier) {
let remaining_cases = self let remaining_cases = self
.remaining_cases .remaining_cases
.entry(specifier.metadata_file_path.clone().into()) .entry(specifier.metadata_file_path.clone().into())
.or_default() .or_default()
.entry(specifier.solc_mode.clone()) .entry(specifier.solc_mode.clone())
.or_default(); .or_default();
if !remaining_cases.is_empty() { if !remaining_cases.is_empty() {
return; return;
} }
let case_status = self let case_status = self
.report .report
.test_case_information .test_case_information
.entry(specifier.metadata_file_path.clone().into()) .entry(specifier.metadata_file_path.clone().into())
.or_default() .or_default()
.entry(specifier.solc_mode.clone()) .entry(specifier.solc_mode.clone())
.or_default() .or_default()
.iter() .iter()
.map(|(case_idx, case_report)| { .map(|(case_idx, case_report)| {
( (*case_idx, case_report.status.clone().expect("Can't be uninitialized"))
*case_idx, })
case_report.status.clone().expect("Can't be uninitialized"), .collect::<BTreeMap<_, _>>();
) let event = ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
}) metadata_file_path: specifier.metadata_file_path.clone().into(),
.collect::<BTreeMap<_, _>>(); mode: specifier.solc_mode.clone(),
let event = ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted { case_status,
metadata_file_path: specifier.metadata_file_path.clone().into(), };
mode: specifier.solc_mode.clone(),
case_status,
};
// According to the documentation on send, the sending fails if there are no more receiver // According to the documentation on send, the sending fails if there are no more receiver
// handles. Therefore, this isn't an error that we want to bubble up or anything. If we fail // handles. Therefore, this isn't an error that we want to bubble up or anything. If we fail
// to send then we ignore the error. // to send then we ignore the error.
let _ = self.listener_tx.send(event); let _ = self.listener_tx.send(event);
} }
fn handle_node_assigned_event(&mut self, event: NodeAssignedEvent) { fn handle_node_assigned_event(&mut self, event: NodeAssignedEvent) {
let execution_information = self.execution_information(&ExecutionSpecifier { let execution_information = self.execution_information(&ExecutionSpecifier {
test_specifier: event.test_specifier, test_specifier: event.test_specifier,
node_id: event.id, node_id: event.id,
platform_identifier: event.platform_identifier, platform_identifier: event.platform_identifier,
}); });
execution_information.node = Some(TestCaseNodeInformation { execution_information.node = Some(TestCaseNodeInformation {
id: event.id, id: event.id,
platform_identifier: event.platform_identifier, platform_identifier: event.platform_identifier,
connection_string: event.connection_string, connection_string: event.connection_string,
}); });
} }
fn handle_pre_link_contracts_compilation_succeeded_event( fn handle_pre_link_contracts_compilation_succeeded_event(
&mut self, &mut self,
event: PreLinkContractsCompilationSucceededEvent, event: PreLinkContractsCompilationSucceededEvent,
) { ) {
let include_input = self let include_input = self.report.context.report_configuration().include_compiler_input;
.report let include_output = self.report.context.report_configuration().include_compiler_output;
.context
.report_configuration()
.include_compiler_input;
let include_output = self
.report
.context
.report_configuration()
.include_compiler_output;
let execution_information = self.execution_information(&event.execution_specifier); let execution_information = self.execution_information(&event.execution_specifier);
let compiler_input = if include_input { let compiler_input = if include_input { event.compiler_input } else { None };
event.compiler_input let compiler_output = if include_output { Some(event.compiler_output) } else { None };
} else {
None
};
let compiler_output = if include_output {
Some(event.compiler_output)
} else {
None
};
execution_information.pre_link_compilation_status = Some(CompilationStatus::Success { execution_information.pre_link_compilation_status = Some(CompilationStatus::Success {
is_cached: event.is_cached, is_cached: event.is_cached,
compiler_version: event.compiler_version, compiler_version: event.compiler_version,
compiler_path: event.compiler_path, compiler_path: event.compiler_path,
compiler_input, compiler_input,
compiler_output, compiler_output,
}); });
} }
fn handle_post_link_contracts_compilation_succeeded_event( fn handle_post_link_contracts_compilation_succeeded_event(
&mut self, &mut self,
event: PostLinkContractsCompilationSucceededEvent, event: PostLinkContractsCompilationSucceededEvent,
) { ) {
let include_input = self let include_input = self.report.context.report_configuration().include_compiler_input;
.report let include_output = self.report.context.report_configuration().include_compiler_output;
.context
.report_configuration()
.include_compiler_input;
let include_output = self
.report
.context
.report_configuration()
.include_compiler_output;
let execution_information = self.execution_information(&event.execution_specifier); let execution_information = self.execution_information(&event.execution_specifier);
let compiler_input = if include_input { let compiler_input = if include_input { event.compiler_input } else { None };
event.compiler_input let compiler_output = if include_output { Some(event.compiler_output) } else { None };
} else {
None
};
let compiler_output = if include_output {
Some(event.compiler_output)
} else {
None
};
execution_information.post_link_compilation_status = Some(CompilationStatus::Success { execution_information.post_link_compilation_status = Some(CompilationStatus::Success {
is_cached: event.is_cached, is_cached: event.is_cached,
compiler_version: event.compiler_version, compiler_version: event.compiler_version,
compiler_path: event.compiler_path, compiler_path: event.compiler_path,
compiler_input, compiler_input,
compiler_output, compiler_output,
}); });
} }
fn handle_pre_link_contracts_compilation_failed_event( fn handle_pre_link_contracts_compilation_failed_event(
&mut self, &mut self,
event: PreLinkContractsCompilationFailedEvent, event: PreLinkContractsCompilationFailedEvent,
) { ) {
let execution_information = self.execution_information(&event.execution_specifier); let execution_information = self.execution_information(&event.execution_specifier);
execution_information.pre_link_compilation_status = Some(CompilationStatus::Failure { execution_information.pre_link_compilation_status = Some(CompilationStatus::Failure {
reason: event.reason, reason: event.reason,
compiler_version: event.compiler_version, compiler_version: event.compiler_version,
compiler_path: event.compiler_path, compiler_path: event.compiler_path,
compiler_input: event.compiler_input, compiler_input: event.compiler_input,
}); });
} }
fn handle_post_link_contracts_compilation_failed_event( fn handle_post_link_contracts_compilation_failed_event(
&mut self, &mut self,
event: PostLinkContractsCompilationFailedEvent, event: PostLinkContractsCompilationFailedEvent,
) { ) {
let execution_information = self.execution_information(&event.execution_specifier); let execution_information = self.execution_information(&event.execution_specifier);
execution_information.post_link_compilation_status = Some(CompilationStatus::Failure { execution_information.post_link_compilation_status = Some(CompilationStatus::Failure {
reason: event.reason, reason: event.reason,
compiler_version: event.compiler_version, compiler_version: event.compiler_version,
compiler_path: event.compiler_path, compiler_path: event.compiler_path,
compiler_input: event.compiler_input, compiler_input: event.compiler_input,
}); });
} }
fn handle_libraries_deployed_event(&mut self, event: LibrariesDeployedEvent) { fn handle_libraries_deployed_event(&mut self, event: LibrariesDeployedEvent) {
self.execution_information(&event.execution_specifier) self.execution_information(&event.execution_specifier).deployed_libraries =
.deployed_libraries = Some(event.libraries); Some(event.libraries);
} }
fn handle_contract_deployed_event(&mut self, event: ContractDeployedEvent) { fn handle_contract_deployed_event(&mut self, event: ContractDeployedEvent) {
self.execution_information(&event.execution_specifier) self.execution_information(&event.execution_specifier)
.deployed_contracts .deployed_contracts
.get_or_insert_default() .get_or_insert_default()
.insert(event.contract_instance, event.address); .insert(event.contract_instance, event.address);
} }
fn handle_completion(&mut self, _: CompletionEvent) { fn handle_completion(&mut self, _: CompletionEvent) {
self.runner_rx.close(); self.runner_rx.close();
} }
fn test_case_report(&mut self, specifier: &TestSpecifier) -> &mut TestCaseReport { fn test_case_report(&mut self, specifier: &TestSpecifier) -> &mut TestCaseReport {
self.report self.report
.test_case_information .test_case_information
.entry(specifier.metadata_file_path.clone().into()) .entry(specifier.metadata_file_path.clone().into())
.or_default() .or_default()
.entry(specifier.solc_mode.clone()) .entry(specifier.solc_mode.clone())
.or_default() .or_default()
.entry(specifier.case_idx) .entry(specifier.case_idx)
.or_default() .or_default()
} }
fn execution_information( fn execution_information(
&mut self, &mut self,
specifier: &ExecutionSpecifier, specifier: &ExecutionSpecifier,
) -> &mut ExecutionInformation { ) -> &mut ExecutionInformation {
let test_case_report = self.test_case_report(&specifier.test_specifier); let test_case_report = self.test_case_report(&specifier.test_specifier);
test_case_report test_case_report
.platform_execution .platform_execution
.entry(specifier.platform_identifier) .entry(specifier.platform_identifier)
.or_default() .or_default()
.get_or_insert_default() .get_or_insert_default()
} }
} }
#[serde_as] #[serde_as]
#[derive(Clone, Debug, Serialize)] #[derive(Clone, Debug, Serialize)]
pub struct Report { pub struct Report {
/// The context that the tool was started up with. /// The context that the tool was started up with.
pub context: Context, pub context: Context,
/// The list of corpus files that the tool found. /// The list of corpus files that the tool found.
pub corpora: Vec<Corpus>, pub corpora: Vec<Corpus>,
/// The list of metadata files that were found by the tool. /// The list of metadata files that were found by the tool.
pub metadata_files: BTreeSet<MetadataFilePath>, pub metadata_files: BTreeSet<MetadataFilePath>,
/// Information relating to each test case. /// Information relating to each test case.
#[serde_as(as = "BTreeMap<_, HashMap<DisplayFromStr, BTreeMap<DisplayFromStr, _>>>")] #[serde_as(as = "BTreeMap<_, HashMap<DisplayFromStr, BTreeMap<DisplayFromStr, _>>>")]
pub test_case_information: pub test_case_information:
BTreeMap<MetadataFilePath, HashMap<Mode, BTreeMap<CaseIdx, TestCaseReport>>>, BTreeMap<MetadataFilePath, HashMap<Mode, BTreeMap<CaseIdx, TestCaseReport>>>,
} }
impl Report { impl Report {
pub fn new(context: Context) -> Self { pub fn new(context: Context) -> Self {
Self { Self {
context, context,
corpora: Default::default(), corpora: Default::default(),
metadata_files: Default::default(), metadata_files: Default::default(),
test_case_information: Default::default(), test_case_information: Default::default(),
} }
} }
} }
#[derive(Clone, Debug, Serialize, Default)] #[derive(Clone, Debug, Serialize, Default)]
pub struct TestCaseReport { pub struct TestCaseReport {
/// Information on the status of the test case and whether it succeeded, failed, or was ignored. /// Information on the status of the test case and whether it succeeded, failed, or was
#[serde(skip_serializing_if = "Option::is_none")] /// ignored.
pub status: Option<TestCaseStatus>, #[serde(skip_serializing_if = "Option::is_none")]
/// Information related to the execution on one of the platforms. pub status: Option<TestCaseStatus>,
pub platform_execution: BTreeMap<PlatformIdentifier, Option<ExecutionInformation>>, /// Information related to the execution on one of the platforms.
pub platform_execution: BTreeMap<PlatformIdentifier, Option<ExecutionInformation>>,
} }
/// Information related to the status of the test. Could be that the test succeeded, failed, or that /// Information related to the status of the test. Could be that the test succeeded, failed, or that
@@ -454,93 +405,93 @@ pub struct TestCaseReport {
#[derive(Clone, Debug, Serialize)] #[derive(Clone, Debug, Serialize)]
#[serde(tag = "status")] #[serde(tag = "status")]
pub enum TestCaseStatus { pub enum TestCaseStatus {
/// The test case succeeded. /// The test case succeeded.
Succeeded { Succeeded {
/// The number of steps of the case that were executed. /// The number of steps of the case that were executed.
steps_executed: usize, steps_executed: usize,
}, },
/// The test case failed. /// The test case failed.
Failed { Failed {
/// The reason for the failure of the test case. /// The reason for the failure of the test case.
reason: String, reason: String,
}, },
/// The test case was ignored. This variant carries information related to why it was ignored. /// The test case was ignored. This variant carries information related to why it was ignored.
Ignored { Ignored {
/// The reason behind the test case being ignored. /// The reason behind the test case being ignored.
reason: String, reason: String,
/// Additional fields that describe more information on why the test case is ignored. /// Additional fields that describe more information on why the test case is ignored.
#[serde(flatten)] #[serde(flatten)]
additional_fields: IndexMap<String, serde_json::Value>, additional_fields: IndexMap<String, serde_json::Value>,
}, },
} }
/// Information related to the platform node that's being used to execute the step. /// Information related to the platform node that's being used to execute the step.
#[derive(Clone, Debug, Serialize)] #[derive(Clone, Debug, Serialize)]
pub struct TestCaseNodeInformation { pub struct TestCaseNodeInformation {
/// The ID of the node that this case is being executed on. /// The ID of the node that this case is being executed on.
pub id: usize, pub id: usize,
/// The platform of the node. /// The platform of the node.
pub platform_identifier: PlatformIdentifier, pub platform_identifier: PlatformIdentifier,
/// The connection string of the node. /// The connection string of the node.
pub connection_string: String, pub connection_string: String,
} }
/// Execution information tied to the platform. /// Execution information tied to the platform.
#[derive(Clone, Debug, Default, Serialize)] #[derive(Clone, Debug, Default, Serialize)]
pub struct ExecutionInformation { pub struct ExecutionInformation {
/// Information related to the node assigned to this test case. /// Information related to the node assigned to this test case.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub node: Option<TestCaseNodeInformation>, pub node: Option<TestCaseNodeInformation>,
/// Information on the pre-link compiled contracts. /// Information on the pre-link compiled contracts.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub pre_link_compilation_status: Option<CompilationStatus>, pub pre_link_compilation_status: Option<CompilationStatus>,
/// Information on the post-link compiled contracts. /// Information on the post-link compiled contracts.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub post_link_compilation_status: Option<CompilationStatus>, pub post_link_compilation_status: Option<CompilationStatus>,
/// Information on the deployed libraries. /// Information on the deployed libraries.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub deployed_libraries: Option<BTreeMap<ContractInstance, Address>>, pub deployed_libraries: Option<BTreeMap<ContractInstance, Address>>,
/// Information on the deployed contracts. /// Information on the deployed contracts.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub deployed_contracts: Option<BTreeMap<ContractInstance, Address>>, pub deployed_contracts: Option<BTreeMap<ContractInstance, Address>>,
} }
/// Information related to compilation /// Information related to compilation
#[derive(Clone, Debug, Serialize)] #[derive(Clone, Debug, Serialize)]
#[serde(tag = "status")] #[serde(tag = "status")]
pub enum CompilationStatus { pub enum CompilationStatus {
/// The compilation was successful. /// The compilation was successful.
Success { Success {
/// A flag with information on whether the compilation artifacts were cached or not. /// A flag with information on whether the compilation artifacts were cached or not.
is_cached: bool, is_cached: bool,
/// The version of the compiler used to compile the contracts. /// The version of the compiler used to compile the contracts.
compiler_version: Version, compiler_version: Version,
/// The path of the compiler used to compile the contracts. /// The path of the compiler used to compile the contracts.
compiler_path: PathBuf, compiler_path: PathBuf,
/// The input provided to the compiler to compile the contracts. This is only included if /// The input provided to the compiler to compile the contracts. This is only included if
/// the appropriate flag is set in the CLI context and if the contracts were not cached and /// the appropriate flag is set in the CLI context and if the contracts were not cached and
/// the compiler was invoked. /// the compiler was invoked.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_input: Option<CompilerInput>, compiler_input: Option<CompilerInput>,
/// The output of the compiler. This is only included if the appropriate flag is set in the /// The output of the compiler. This is only included if the appropriate flag is set in the
/// CLI contexts. /// CLI contexts.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_output: Option<CompilerOutput>, compiler_output: Option<CompilerOutput>,
}, },
/// The compilation failed. /// The compilation failed.
Failure { Failure {
/// The failure reason. /// The failure reason.
reason: String, reason: String,
/// The version of the compiler used to compile the contracts. /// The version of the compiler used to compile the contracts.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_version: Option<Version>, compiler_version: Option<Version>,
/// The path of the compiler used to compile the contracts. /// The path of the compiler used to compile the contracts.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_path: Option<PathBuf>, compiler_path: Option<PathBuf>,
/// The input provided to the compiler to compile the contracts. This is only included if /// The input provided to the compiler to compile the contracts. This is only included if
/// the appropriate flag is set in the CLI context and if the contracts were not cached and /// the appropriate flag is set in the CLI context and if the contracts were not cached and
/// the compiler was invoked. /// the compiler was invoked.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_input: Option<CompilerInput>, compiler_input: Option<CompilerInput>,
}, },
} }
+11 -11
View File
@@ -8,30 +8,30 @@ use revive_dt_format::{case::CaseIdx, steps::StepPath};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
define_wrapper_type!( define_wrapper_type!(
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[serde(transparent)] #[serde(transparent)]
pub struct MetadataFilePath(PathBuf); pub struct MetadataFilePath(PathBuf);
); );
/// An absolute specifier for a test. /// An absolute specifier for a test.
#[derive(Clone, Debug, PartialEq, Eq, Hash)] #[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct TestSpecifier { pub struct TestSpecifier {
pub solc_mode: Mode, pub solc_mode: Mode,
pub metadata_file_path: PathBuf, pub metadata_file_path: PathBuf,
pub case_idx: CaseIdx, pub case_idx: CaseIdx,
} }
/// An absolute path for a test that also includes information about the node that it's assigned to /// An absolute path for a test that also includes information about the node that it's assigned to
/// and what platform it belongs to. /// and what platform it belongs to.
#[derive(Clone, Debug, PartialEq, Eq, Hash)] #[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct ExecutionSpecifier { pub struct ExecutionSpecifier {
pub test_specifier: Arc<TestSpecifier>, pub test_specifier: Arc<TestSpecifier>,
pub node_id: usize, pub node_id: usize,
pub platform_identifier: PlatformIdentifier, pub platform_identifier: PlatformIdentifier,
} }
#[derive(Clone, Debug, PartialEq, Eq, Hash)] #[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct StepExecutionSpecifier { pub struct StepExecutionSpecifier {
pub execution_specifier: Arc<ExecutionSpecifier>, pub execution_specifier: Arc<ExecutionSpecifier>,
pub step_idx: StepPath, pub step_idx: StepPath,
} }
+10 -10
View File
@@ -9,14 +9,14 @@ use crate::{MetadataFilePath, TestCaseStatus};
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub enum ReporterEvent { pub enum ReporterEvent {
/// An event sent by the reporter once an entire metadata file and solc mode combination has /// An event sent by the reporter once an entire metadata file and solc mode combination has
/// finished execution. /// finished execution.
MetadataFileSolcModeCombinationExecutionCompleted { MetadataFileSolcModeCombinationExecutionCompleted {
/// The path of the metadata file. /// The path of the metadata file.
metadata_file_path: MetadataFilePath, metadata_file_path: MetadataFilePath,
/// The Solc mode that this metadata file was executed in. /// The Solc mode that this metadata file was executed in.
mode: Mode, mode: Mode,
/// The status of each one of the cases. /// The status of each one of the cases.
case_status: BTreeMap<CaseIdx, TestCaseStatus>, case_status: BTreeMap<CaseIdx, TestCaseStatus>,
}, },
} }
+154 -152
View File
@@ -8,8 +8,10 @@ use anyhow::Context as _;
use indexmap::IndexMap; use indexmap::IndexMap;
use revive_dt_common::types::PlatformIdentifier; use revive_dt_common::types::PlatformIdentifier;
use revive_dt_compiler::{CompilerInput, CompilerOutput}; use revive_dt_compiler::{CompilerInput, CompilerOutput};
use revive_dt_format::metadata::Metadata; use revive_dt_format::{
use revive_dt_format::{corpus::Corpus, metadata::ContractInstance}; corpus::Corpus,
metadata::{ContractInstance, Metadata},
};
use semver::Version; use semver::Version;
use tokio::sync::{broadcast, oneshot}; use tokio::sync::{broadcast, oneshot};
@@ -472,160 +474,160 @@ macro_rules! define_event {
} }
define_event! { define_event! {
/// An event type that's sent by the test runners/drivers to the report aggregator. /// An event type that's sent by the test runners/drivers to the report aggregator.
pub(crate) enum RunnerEvent { pub(crate) enum RunnerEvent {
/// An event emitted by the reporter when it wishes to listen to events emitted by the /// An event emitted by the reporter when it wishes to listen to events emitted by the
/// aggregator. /// aggregator.
SubscribeToEvents { SubscribeToEvents {
/// The channel that the aggregator is to send the receive side of the channel on. /// The channel that the aggregator is to send the receive side of the channel on.
tx: oneshot::Sender<broadcast::Receiver<ReporterEvent>> tx: oneshot::Sender<broadcast::Receiver<ReporterEvent>>
}, },
/// An event emitted by runners when they've discovered a corpus file. /// An event emitted by runners when they've discovered a corpus file.
CorpusFileDiscovery { CorpusFileDiscovery {
/// The contents of the corpus file. /// The contents of the corpus file.
corpus: Corpus corpus: Corpus
}, },
/// An event emitted by runners when they've discovered a metadata file. /// An event emitted by runners when they've discovered a metadata file.
MetadataFileDiscovery { MetadataFileDiscovery {
/// The path of the metadata file discovered. /// The path of the metadata file discovered.
path: MetadataFilePath, path: MetadataFilePath,
/// The content of the metadata file. /// The content of the metadata file.
metadata: Metadata metadata: Metadata
}, },
/// An event emitted by the runners when they discover a test case. /// An event emitted by the runners when they discover a test case.
TestCaseDiscovery { TestCaseDiscovery {
/// A specifier for the test that was discovered. /// A specifier for the test that was discovered.
test_specifier: Arc<TestSpecifier>, test_specifier: Arc<TestSpecifier>,
}, },
/// An event emitted by the runners when a test case is ignored. /// An event emitted by the runners when a test case is ignored.
TestIgnored { TestIgnored {
/// A specifier for the test that's been ignored. /// A specifier for the test that's been ignored.
test_specifier: Arc<TestSpecifier>, test_specifier: Arc<TestSpecifier>,
/// A reason for the test to be ignored. /// A reason for the test to be ignored.
reason: String, reason: String,
/// Additional fields that describe more information on why the test was ignored. /// Additional fields that describe more information on why the test was ignored.
additional_fields: IndexMap<String, serde_json::Value> additional_fields: IndexMap<String, serde_json::Value>
}, },
/// An event emitted by the runners when a test case has succeeded. /// An event emitted by the runners when a test case has succeeded.
TestSucceeded { TestSucceeded {
/// A specifier for the test that succeeded. /// A specifier for the test that succeeded.
test_specifier: Arc<TestSpecifier>, test_specifier: Arc<TestSpecifier>,
/// The number of steps of the case that were executed by the driver. /// The number of steps of the case that were executed by the driver.
steps_executed: usize, steps_executed: usize,
}, },
/// An event emitted by the runners when a test case has failed. /// An event emitted by the runners when a test case has failed.
TestFailed { TestFailed {
/// A specifier for the test that succeeded. /// A specifier for the test that succeeded.
test_specifier: Arc<TestSpecifier>, test_specifier: Arc<TestSpecifier>,
/// A reason for the failure of the test. /// A reason for the failure of the test.
reason: String, reason: String,
}, },
/// An event emitted when the test case is assigned a platform node. /// An event emitted when the test case is assigned a platform node.
NodeAssigned { NodeAssigned {
/// A specifier for the test that the assignment is for. /// A specifier for the test that the assignment is for.
test_specifier: Arc<TestSpecifier>, test_specifier: Arc<TestSpecifier>,
/// The ID of the node that this case is being executed on. /// The ID of the node that this case is being executed on.
id: usize, id: usize,
/// The identifier of the platform used. /// The identifier of the platform used.
platform_identifier: PlatformIdentifier, platform_identifier: PlatformIdentifier,
/// The connection string of the node. /// The connection string of the node.
connection_string: String, connection_string: String,
}, },
/// An event emitted by the runners when the compilation of the contracts has succeeded /// An event emitted by the runners when the compilation of the contracts has succeeded
/// on the pre-link contracts. /// on the pre-link contracts.
PreLinkContractsCompilationSucceeded { PreLinkContractsCompilationSucceeded {
/// A specifier for the execution that's taking place. /// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>, execution_specifier: Arc<ExecutionSpecifier>,
/// The version of the compiler used to compile the contracts. /// The version of the compiler used to compile the contracts.
compiler_version: Version, compiler_version: Version,
/// The path of the compiler used to compile the contracts. /// The path of the compiler used to compile the contracts.
compiler_path: PathBuf, compiler_path: PathBuf,
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled /// A flag of whether the contract bytecode and ABI were cached or if they were compiled
/// anew. /// anew.
is_cached: bool, is_cached: bool,
/// The input provided to the compiler - this is optional and not provided if the /// The input provided to the compiler - this is optional and not provided if the
/// contracts were obtained from the cache. /// contracts were obtained from the cache.
compiler_input: Option<CompilerInput>, compiler_input: Option<CompilerInput>,
/// The output of the compiler. /// The output of the compiler.
compiler_output: CompilerOutput compiler_output: CompilerOutput
}, },
/// An event emitted by the runners when the compilation of the contracts has succeeded /// An event emitted by the runners when the compilation of the contracts has succeeded
/// on the post-link contracts. /// on the post-link contracts.
PostLinkContractsCompilationSucceeded { PostLinkContractsCompilationSucceeded {
/// A specifier for the execution that's taking place. /// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>, execution_specifier: Arc<ExecutionSpecifier>,
/// The version of the compiler used to compile the contracts. /// The version of the compiler used to compile the contracts.
compiler_version: Version, compiler_version: Version,
/// The path of the compiler used to compile the contracts. /// The path of the compiler used to compile the contracts.
compiler_path: PathBuf, compiler_path: PathBuf,
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled /// A flag of whether the contract bytecode and ABI were cached or if they were compiled
/// anew. /// anew.
is_cached: bool, is_cached: bool,
/// The input provided to the compiler - this is optional and not provided if the /// The input provided to the compiler - this is optional and not provided if the
/// contracts were obtained from the cache. /// contracts were obtained from the cache.
compiler_input: Option<CompilerInput>, compiler_input: Option<CompilerInput>,
/// The output of the compiler. /// The output of the compiler.
compiler_output: CompilerOutput compiler_output: CompilerOutput
}, },
/// An event emitted by the runners when the compilation of the pre-link contract has /// An event emitted by the runners when the compilation of the pre-link contract has
/// failed. /// failed.
PreLinkContractsCompilationFailed { PreLinkContractsCompilationFailed {
/// A specifier for the execution that's taking place. /// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>, execution_specifier: Arc<ExecutionSpecifier>,
/// The version of the compiler used to compile the contracts. /// The version of the compiler used to compile the contracts.
compiler_version: Option<Version>, compiler_version: Option<Version>,
/// The path of the compiler used to compile the contracts. /// The path of the compiler used to compile the contracts.
compiler_path: Option<PathBuf>, compiler_path: Option<PathBuf>,
/// The input provided to the compiler - this is optional and not provided if the /// The input provided to the compiler - this is optional and not provided if the
/// contracts were obtained from the cache. /// contracts were obtained from the cache.
compiler_input: Option<CompilerInput>, compiler_input: Option<CompilerInput>,
/// The failure reason. /// The failure reason.
reason: String, reason: String,
}, },
/// An event emitted by the runners when the compilation of the post-link contract has /// An event emitted by the runners when the compilation of the post-link contract has
/// failed. /// failed.
PostLinkContractsCompilationFailed { PostLinkContractsCompilationFailed {
/// A specifier for the execution that's taking place. /// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>, execution_specifier: Arc<ExecutionSpecifier>,
/// The version of the compiler used to compile the contracts. /// The version of the compiler used to compile the contracts.
compiler_version: Option<Version>, compiler_version: Option<Version>,
/// The path of the compiler used to compile the contracts. /// The path of the compiler used to compile the contracts.
compiler_path: Option<PathBuf>, compiler_path: Option<PathBuf>,
/// The input provided to the compiler - this is optional and not provided if the /// The input provided to the compiler - this is optional and not provided if the
/// contracts were obtained from the cache. /// contracts were obtained from the cache.
compiler_input: Option<CompilerInput>, compiler_input: Option<CompilerInput>,
/// The failure reason. /// The failure reason.
reason: String, reason: String,
}, },
/// An event emitted by the runners when a library has been deployed. /// An event emitted by the runners when a library has been deployed.
LibrariesDeployed { LibrariesDeployed {
/// A specifier for the execution that's taking place. /// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>, execution_specifier: Arc<ExecutionSpecifier>,
/// The addresses of the libraries that were deployed. /// The addresses of the libraries that were deployed.
libraries: BTreeMap<ContractInstance, Address> libraries: BTreeMap<ContractInstance, Address>
}, },
/// An event emitted by the runners when they've deployed a new contract. /// An event emitted by the runners when they've deployed a new contract.
ContractDeployed { ContractDeployed {
/// A specifier for the execution that's taking place. /// A specifier for the execution that's taking place.
execution_specifier: Arc<ExecutionSpecifier>, execution_specifier: Arc<ExecutionSpecifier>,
/// The instance name of the contract. /// The instance name of the contract.
contract_instance: ContractInstance, contract_instance: ContractInstance,
/// The address of the contract. /// The address of the contract.
address: Address address: Address
}, },
/// Reports the completion of the run. /// Reports the completion of the run.
Completion {} Completion {}
} }
} }
/// An extension to the [`Reporter`] implemented by the macro. /// An extension to the [`Reporter`] implemented by the macro.
impl RunnerEventReporter { impl RunnerEventReporter {
pub async fn subscribe(&self) -> anyhow::Result<broadcast::Receiver<ReporterEvent>> { pub async fn subscribe(&self) -> anyhow::Result<broadcast::Receiver<ReporterEvent>> {
let (tx, rx) = oneshot::channel::<broadcast::Receiver<ReporterEvent>>(); let (tx, rx) = oneshot::channel::<broadcast::Receiver<ReporterEvent>>();
self.report_subscribe_to_events_event(tx) self.report_subscribe_to_events_event(tx)
.context("Failed to send subscribe request to reporter task")?; .context("Failed to send subscribe request to reporter task")?;
rx.await.map_err(Into::into) rx.await.map_err(Into::into)
} }
} }
pub type Reporter = RunnerEventReporter; pub type Reporter = RunnerEventReporter;
+62 -81
View File
@@ -1,12 +1,12 @@
//! Helper for caching the solc binaries. //! Helper for caching the solc binaries.
use std::{ use std::{
collections::HashSet, collections::HashSet,
fs::{File, create_dir_all}, fs::{File, create_dir_all},
io::{BufWriter, Write}, io::{BufWriter, Write},
os::unix::fs::PermissionsExt, os::unix::fs::PermissionsExt,
path::{Path, PathBuf}, path::{Path, PathBuf},
sync::LazyLock, sync::LazyLock,
}; };
use semver::Version; use semver::Version;
@@ -19,90 +19,71 @@ pub const SOLC_CACHE_DIRECTORY: &str = "solc";
pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default); pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default);
pub(crate) async fn get_or_download( pub(crate) async fn get_or_download(
working_directory: &Path, working_directory: &Path,
downloader: &SolcDownloader, downloader: &SolcDownloader,
) -> anyhow::Result<(Version, PathBuf)> { ) -> anyhow::Result<(Version, PathBuf)> {
let target_directory = working_directory let target_directory = working_directory
.join(SOLC_CACHE_DIRECTORY) .join(SOLC_CACHE_DIRECTORY)
.join(downloader.version.to_string()); .join(downloader.version.to_string());
let target_file = target_directory.join(downloader.target); let target_file = target_directory.join(downloader.target);
let mut cache = SOLC_CACHER.lock().await; let mut cache = SOLC_CACHER.lock().await;
if cache.contains(&target_file) { if cache.contains(&target_file) {
tracing::debug!("using cached solc: {}", target_file.display()); tracing::debug!("using cached solc: {}", target_file.display());
return Ok((downloader.version.clone(), target_file)); return Ok((downloader.version.clone(), target_file));
} }
create_dir_all(&target_directory).with_context(|| { create_dir_all(&target_directory).with_context(|| {
format!( format!("Failed to create solc cache directory: {}", target_directory.display())
"Failed to create solc cache directory: {}", })?;
target_directory.display() download_to_file(&target_file, downloader)
) .await
})?; .with_context(|| format!("Failed to write downloaded solc to {}", target_file.display()))?;
download_to_file(&target_file, downloader) cache.insert(target_file.clone());
.await
.with_context(|| {
format!(
"Failed to write downloaded solc to {}",
target_file.display()
)
})?;
cache.insert(target_file.clone());
Ok((downloader.version.clone(), target_file)) Ok((downloader.version.clone(), target_file))
} }
async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::Result<()> { async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::Result<()> {
let Ok(file) = File::create_new(path) else { let Ok(file) = File::create_new(path) else {
return Ok(()); return Ok(());
}; };
#[cfg(unix)] #[cfg(unix)]
{ {
let mut permissions = file let mut permissions = file
.metadata() .metadata()
.with_context(|| format!("Failed to read metadata for {}", path.display()))? .with_context(|| format!("Failed to read metadata for {}", path.display()))?
.permissions(); .permissions();
permissions.set_mode(permissions.mode() | 0o111); permissions.set_mode(permissions.mode() | 0o111);
file.set_permissions(permissions).with_context(|| { file.set_permissions(permissions).with_context(|| {
format!("Failed to set executable permissions on {}", path.display()) format!("Failed to set executable permissions on {}", path.display())
})?; })?;
} }
let mut file = BufWriter::new(file); let mut file = BufWriter::new(file);
file.write_all( file.write_all(&downloader.download().await.context("Failed to download solc binary bytes")?)
&downloader .with_context(|| format!("Failed to write solc binary to {}", path.display()))?;
.download() file.flush()
.await .with_context(|| format!("Failed to flush file {}", path.display()))?;
.context("Failed to download solc binary bytes")?, drop(file);
)
.with_context(|| format!("Failed to write solc binary to {}", path.display()))?;
file.flush()
.with_context(|| format!("Failed to flush file {}", path.display()))?;
drop(file);
#[cfg(target_os = "macos")] #[cfg(target_os = "macos")]
std::process::Command::new("xattr") std::process::Command::new("xattr")
.arg("-d") .arg("-d")
.arg("com.apple.quarantine") .arg("com.apple.quarantine")
.arg(path) .arg(path)
.stderr(std::process::Stdio::null()) .stderr(std::process::Stdio::null())
.stdout(std::process::Stdio::null()) .stdout(std::process::Stdio::null())
.stdout(std::process::Stdio::null()) .stdout(std::process::Stdio::null())
.spawn() .spawn()
.with_context(|| { .with_context(|| {
format!( format!("Failed to spawn xattr to remove quarantine attribute on {}", path.display())
"Failed to spawn xattr to remove quarantine attribute on {}", })?
path.display() .wait()
) .with_context(|| {
})? format!("Failed waiting for xattr operation to complete on {}", path.display())
.wait() })?;
.with_context(|| {
format!(
"Failed waiting for xattr operation to complete on {}",
path.display()
)
})?;
Ok(()) Ok(())
} }
+123 -164
View File
@@ -1,8 +1,8 @@
//! This module downloads solc binaries. //! This module downloads solc binaries.
use std::{ use std::{
collections::HashMap, collections::HashMap,
sync::{LazyLock, Mutex}, sync::{LazyLock, Mutex},
}; };
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
@@ -14,199 +14,158 @@ use crate::list::List;
use anyhow::Context as _; use anyhow::Context as _;
pub static LIST_CACHE: LazyLock<Mutex<HashMap<&'static str, List>>> = pub static LIST_CACHE: LazyLock<Mutex<HashMap<&'static str, List>>> =
LazyLock::new(Default::default); LazyLock::new(Default::default);
impl List { impl List {
pub const LINUX_URL: &str = "https://binaries.soliditylang.org/linux-amd64/list.json"; pub const LINUX_URL: &str = "https://binaries.soliditylang.org/linux-amd64/list.json";
pub const WINDOWS_URL: &str = "https://binaries.soliditylang.org/windows-amd64/list.json"; pub const WINDOWS_URL: &str = "https://binaries.soliditylang.org/windows-amd64/list.json";
pub const MACOSX_URL: &str = "https://binaries.soliditylang.org/macosx-amd64/list.json"; pub const MACOSX_URL: &str = "https://binaries.soliditylang.org/macosx-amd64/list.json";
pub const WASM_URL: &str = "https://binaries.soliditylang.org/wasm/list.json"; pub const WASM_URL: &str = "https://binaries.soliditylang.org/wasm/list.json";
/// Try to downloads the list from the given URL. /// Try to downloads the list from the given URL.
/// ///
/// Caches the list retrieved from the `url` into [LIST_CACHE], /// Caches the list retrieved from the `url` into [LIST_CACHE],
/// subsequent calls with the same `url` will return the cached list. /// subsequent calls with the same `url` will return the cached list.
pub async fn download(url: &'static str) -> anyhow::Result<Self> { pub async fn download(url: &'static str) -> anyhow::Result<Self> {
if let Some(list) = LIST_CACHE.lock().unwrap().get(url) { if let Some(list) = LIST_CACHE.lock().unwrap().get(url) {
return Ok(list.clone()); return Ok(list.clone());
} }
let body: List = reqwest::get(url) let body: List = reqwest::get(url)
.await .await
.with_context(|| format!("Failed to GET solc list from {url}"))? .with_context(|| format!("Failed to GET solc list from {url}"))?
.json() .json()
.await .await
.with_context(|| format!("Failed to deserialize solc list JSON from {url}"))?; .with_context(|| format!("Failed to deserialize solc list JSON from {url}"))?;
LIST_CACHE.lock().unwrap().insert(url, body.clone()); LIST_CACHE.lock().unwrap().insert(url, body.clone());
Ok(body) Ok(body)
} }
} }
/// Download solc binaries from the official SolidityLang site /// Download solc binaries from the official SolidityLang site
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct SolcDownloader { pub struct SolcDownloader {
pub version: Version, pub version: Version,
pub target: &'static str, pub target: &'static str,
pub list: &'static str, pub list: &'static str,
} }
impl SolcDownloader { impl SolcDownloader {
pub const BASE_URL: &str = "https://binaries.soliditylang.org"; pub const BASE_URL: &str = "https://binaries.soliditylang.org";
pub const LINUX_NAME: &str = "linux-amd64"; pub const LINUX_NAME: &str = "linux-amd64";
pub const MACOSX_NAME: &str = "macosx-amd64"; pub const MACOSX_NAME: &str = "macosx-amd64";
pub const WINDOWS_NAME: &str = "windows-amd64"; pub const WINDOWS_NAME: &str = "windows-amd64";
pub const WASM_NAME: &str = "wasm"; pub const WASM_NAME: &str = "wasm";
async fn new( async fn new(
version: impl Into<VersionOrRequirement>, version: impl Into<VersionOrRequirement>,
target: &'static str, target: &'static str,
list: &'static str, list: &'static str,
) -> anyhow::Result<Self> { ) -> anyhow::Result<Self> {
let version_or_requirement = version.into(); let version_or_requirement = version.into();
match version_or_requirement { match version_or_requirement {
VersionOrRequirement::Version(version) => Ok(Self { VersionOrRequirement::Version(version) => Ok(Self { version, target, list }),
version, VersionOrRequirement::Requirement(requirement) => {
target, let Some(version) = List::download(list)
list, .await
}), .with_context(|| format!("Failed to download solc builds list from {list}"))?
VersionOrRequirement::Requirement(requirement) => { .builds
let Some(version) = List::download(list) .into_iter()
.await .map(|build| build.version)
.with_context(|| format!("Failed to download solc builds list from {list}"))? .filter(|version| requirement.matches(version))
.builds .max()
.into_iter() else {
.map(|build| build.version) anyhow::bail!("Failed to find a version that satisfies {requirement:?}");
.filter(|version| requirement.matches(version)) };
.max() Ok(Self { version, target, list })
else { },
anyhow::bail!("Failed to find a version that satisfies {requirement:?}"); }
}; }
Ok(Self {
version,
target,
list,
})
}
}
}
pub async fn linux(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> { pub async fn linux(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
Self::new(version, Self::LINUX_NAME, List::LINUX_URL).await Self::new(version, Self::LINUX_NAME, List::LINUX_URL).await
} }
pub async fn macosx(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> { pub async fn macosx(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
Self::new(version, Self::MACOSX_NAME, List::MACOSX_URL).await Self::new(version, Self::MACOSX_NAME, List::MACOSX_URL).await
} }
pub async fn windows(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> { pub async fn windows(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
Self::new(version, Self::WINDOWS_NAME, List::WINDOWS_URL).await Self::new(version, Self::WINDOWS_NAME, List::WINDOWS_URL).await
} }
pub async fn wasm(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> { pub async fn wasm(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
Self::new(version, Self::WASM_NAME, List::WASM_URL).await Self::new(version, Self::WASM_NAME, List::WASM_URL).await
} }
/// Download the solc binary. /// Download the solc binary.
/// ///
/// Errors out if the download fails or the digest of the downloaded file /// Errors out if the download fails or the digest of the downloaded file
/// mismatches the expected digest from the release [List]. /// mismatches the expected digest from the release [List].
pub async fn download(&self) -> anyhow::Result<Vec<u8>> { pub async fn download(&self) -> anyhow::Result<Vec<u8>> {
let builds = List::download(self.list) let builds = List::download(self.list)
.await .await
.with_context(|| format!("Failed to download solc builds list from {}", self.list))? .with_context(|| format!("Failed to download solc builds list from {}", self.list))?
.builds; .builds;
let build = builds let build = builds
.iter() .iter()
.find(|build| build.version == self.version) .find(|build| build.version == self.version)
.ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version)) .ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version))
.with_context(|| { .with_context(|| {
format!( format!(
"Requested solc version {} was not found in builds list fetched from {}", "Requested solc version {} was not found in builds list fetched from {}",
self.version, self.list self.version, self.list
) )
})?; })?;
let path = build.path.clone(); let path = build.path.clone();
let expected_digest = build let expected_digest = build.sha256.strip_prefix("0x").unwrap_or(&build.sha256).to_string();
.sha256 let url = format!("{}/{}/{}", Self::BASE_URL, self.target, path.display());
.strip_prefix("0x")
.unwrap_or(&build.sha256)
.to_string();
let url = format!("{}/{}/{}", Self::BASE_URL, self.target, path.display());
let file = reqwest::get(&url) let file = reqwest::get(&url)
.await .await
.with_context(|| format!("Failed to GET solc binary from {url}"))? .with_context(|| format!("Failed to GET solc binary from {url}"))?
.bytes() .bytes()
.await .await
.with_context(|| format!("Failed to read solc binary bytes from {url}"))? .with_context(|| format!("Failed to read solc binary bytes from {url}"))?
.to_vec(); .to_vec();
if hex::encode(Sha256::digest(&file)) != expected_digest { if hex::encode(Sha256::digest(&file)) != expected_digest {
anyhow::bail!("sha256 mismatch for solc version {}", self.version); anyhow::bail!("sha256 mismatch for solc version {}", self.version);
} }
Ok(file) Ok(file)
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::{download::SolcDownloader, list::List}; use crate::{download::SolcDownloader, list::List};
#[tokio::test] #[tokio::test]
async fn try_get_windows() { async fn try_get_windows() {
let version = List::download(List::WINDOWS_URL) let version = List::download(List::WINDOWS_URL).await.unwrap().latest_release;
.await SolcDownloader::windows(version).await.unwrap().download().await.unwrap();
.unwrap() }
.latest_release;
SolcDownloader::windows(version)
.await
.unwrap()
.download()
.await
.unwrap();
}
#[tokio::test] #[tokio::test]
async fn try_get_macosx() { async fn try_get_macosx() {
let version = List::download(List::MACOSX_URL) let version = List::download(List::MACOSX_URL).await.unwrap().latest_release;
.await SolcDownloader::macosx(version).await.unwrap().download().await.unwrap();
.unwrap() }
.latest_release;
SolcDownloader::macosx(version)
.await
.unwrap()
.download()
.await
.unwrap();
}
#[tokio::test] #[tokio::test]
async fn try_get_linux() { async fn try_get_linux() {
let version = List::download(List::LINUX_URL) let version = List::download(List::LINUX_URL).await.unwrap().latest_release;
.await SolcDownloader::linux(version).await.unwrap().download().await.unwrap();
.unwrap() }
.latest_release;
SolcDownloader::linux(version)
.await
.unwrap()
.download()
.await
.unwrap();
}
#[tokio::test] #[tokio::test]
async fn try_get_wasm() { async fn try_get_wasm() {
let version = List::download(List::WASM_URL).await.unwrap().latest_release; let version = List::download(List::WASM_URL).await.unwrap().latest_release;
SolcDownloader::wasm(version) SolcDownloader::wasm(version).await.unwrap().download().await.unwrap();
.await }
.unwrap()
.download()
.await
.unwrap();
}
} }
+16 -16
View File
@@ -22,22 +22,22 @@ pub mod list;
/// Subsequent calls for the same version will use a cached artifact /// Subsequent calls for the same version will use a cached artifact
/// and not download it again. /// and not download it again.
pub async fn download_solc( pub async fn download_solc(
cache_directory: &Path, cache_directory: &Path,
version: impl Into<VersionOrRequirement>, version: impl Into<VersionOrRequirement>,
wasm: bool, wasm: bool,
) -> anyhow::Result<(Version, PathBuf)> { ) -> anyhow::Result<(Version, PathBuf)> {
let downloader = if wasm { let downloader = if wasm {
SolcDownloader::wasm(version).await SolcDownloader::wasm(version).await
} else if cfg!(target_os = "linux") { } else if cfg!(target_os = "linux") {
SolcDownloader::linux(version).await SolcDownloader::linux(version).await
} else if cfg!(target_os = "macos") { } else if cfg!(target_os = "macos") {
SolcDownloader::macosx(version).await SolcDownloader::macosx(version).await
} else if cfg!(target_os = "windows") { } else if cfg!(target_os = "windows") {
SolcDownloader::windows(version).await SolcDownloader::windows(version).await
} else { } else {
unimplemented!() unimplemented!()
} }
.context("Failed to initialize the Solc Downloader")?; .context("Failed to initialize the Solc Downloader")?;
get_or_download(cache_directory, &downloader).await get_or_download(cache_directory, &downloader).await
} }
+12 -12
View File
@@ -7,20 +7,20 @@ use serde::Deserialize;
#[derive(Debug, Deserialize, Clone, Eq, PartialEq)] #[derive(Debug, Deserialize, Clone, Eq, PartialEq)]
pub struct List { pub struct List {
pub builds: Vec<Build>, pub builds: Vec<Build>,
pub releases: HashMap<Version, String>, pub releases: HashMap<Version, String>,
#[serde(rename = "latestRelease")] #[serde(rename = "latestRelease")]
pub latest_release: Version, pub latest_release: Version,
} }
#[derive(Debug, Deserialize, Clone, Eq, PartialEq)] #[derive(Debug, Deserialize, Clone, Eq, PartialEq)]
pub struct Build { pub struct Build {
pub path: PathBuf, pub path: PathBuf,
pub version: Version, pub version: Version,
pub build: String, pub build: String,
#[serde(rename = "longVersion")] #[serde(rename = "longVersion")]
pub long_version: String, pub long_version: String,
pub keccak256: String, pub keccak256: String,
pub sha256: String, pub sha256: String,
pub urls: Vec<String>, pub urls: Vec<String>,
} }