mirror of
https://github.com/pezkuwichain/revive-differential-tests.git
synced 2026-04-22 10:17:56 +00:00
Compare commits
29 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 1659164310 | |||
| 0a68800856 | |||
| 8303d789cd | |||
| 40bf44fe58 | |||
| ba8ad03290 | |||
| 3dd99f3ac8 | |||
| 6618463c68 | |||
| dffb80ac0a | |||
| 43a1114337 | |||
| 3a07ea042b | |||
| 9e2aa972db | |||
| 86f2173e8b | |||
| 6e658aec49 | |||
| 1aba74ec3e | |||
| 180bd64bc5 | |||
| 967cbac349 | |||
| a8d84c8360 | |||
| c83a755416 | |||
| 0711216539 | |||
| b40c17c0af | |||
| 8ae994f9de | |||
| 3f3cbfa934 | |||
| c676114fe1 | |||
| 92885351ed | |||
| e16f8ebf59 | |||
| d482808eb2 | |||
| 1f84ce6f61 | |||
| 765569a8b6 | |||
| 6e64f678ee |
@@ -13,4 +13,3 @@ resolc-compiler-tests
|
||||
workdir
|
||||
|
||||
!/schema.json
|
||||
!/dev-genesis.json
|
||||
@@ -0,0 +1,25 @@
|
||||
# Basic
|
||||
edition = "2024"
|
||||
hard_tabs = true
|
||||
max_width = 100
|
||||
use_small_heuristics = "Max"
|
||||
# Imports
|
||||
imports_granularity = "Crate"
|
||||
reorder_imports = true
|
||||
# Consistency
|
||||
newline_style = "Unix"
|
||||
# Misc
|
||||
chain_width = 80
|
||||
spaces_around_ranges = false
|
||||
binop_separator = "Back"
|
||||
reorder_impl_items = false
|
||||
match_arm_leading_pipes = "Preserve"
|
||||
match_arm_blocks = false
|
||||
match_block_trailing_comma = true
|
||||
trailing_comma = "Vertical"
|
||||
trailing_semicolon = false
|
||||
use_field_init_shorthand = true
|
||||
# Format comments
|
||||
comment_width = 100
|
||||
wrap_comments = true
|
||||
|
||||
Generated
+21
@@ -4526,6 +4526,27 @@ dependencies = [
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ml-test-runner"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"alloy",
|
||||
"anyhow",
|
||||
"clap",
|
||||
"revive-dt-common",
|
||||
"revive-dt-compiler",
|
||||
"revive-dt-config",
|
||||
"revive-dt-core",
|
||||
"revive-dt-format",
|
||||
"revive-dt-node",
|
||||
"revive-dt-node-interaction",
|
||||
"revive-dt-report",
|
||||
"temp-dir",
|
||||
"tokio",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "moka"
|
||||
version = "0.12.10"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
.PHONY: format clippy test machete
|
||||
|
||||
format:
|
||||
cargo fmt --all -- --check
|
||||
cargo +nightly fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
cargo clippy --all-features --workspace -- --deny warnings
|
||||
|
||||
@@ -1,49 +1,48 @@
|
||||
//! This module implements a cached file system allowing for results to be stored in-memory rather
|
||||
//! rather being queried from the file system again.
|
||||
|
||||
use std::fs;
|
||||
use std::io::{Error, Result};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::{
|
||||
fs,
|
||||
io::{Error, Result},
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use moka::sync::Cache;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
pub fn read(path: impl AsRef<Path>) -> Result<Vec<u8>> {
|
||||
static READ_CACHE: Lazy<Cache<PathBuf, Vec<u8>>> = Lazy::new(|| Cache::new(10_000));
|
||||
static READ_CACHE: Lazy<Cache<PathBuf, Vec<u8>>> = Lazy::new(|| Cache::new(10_000));
|
||||
|
||||
let path = path.as_ref().canonicalize()?;
|
||||
match READ_CACHE.get(path.as_path()) {
|
||||
Some(content) => Ok(content),
|
||||
None => {
|
||||
let content = fs::read(path.as_path())?;
|
||||
READ_CACHE.insert(path, content.clone());
|
||||
Ok(content)
|
||||
}
|
||||
}
|
||||
let path = path.as_ref().canonicalize()?;
|
||||
match READ_CACHE.get(path.as_path()) {
|
||||
Some(content) => Ok(content),
|
||||
None => {
|
||||
let content = fs::read(path.as_path())?;
|
||||
READ_CACHE.insert(path, content.clone());
|
||||
Ok(content)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_to_string(path: impl AsRef<Path>) -> Result<String> {
|
||||
let content = read(path)?;
|
||||
String::from_utf8(content).map_err(|_| {
|
||||
Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
"The contents of the file are not valid UTF8",
|
||||
)
|
||||
})
|
||||
let content = read(path)?;
|
||||
String::from_utf8(content).map_err(|_| {
|
||||
Error::new(std::io::ErrorKind::InvalidData, "The contents of the file are not valid UTF8")
|
||||
})
|
||||
}
|
||||
|
||||
pub fn read_dir(path: impl AsRef<Path>) -> Result<Box<dyn Iterator<Item = Result<PathBuf>>>> {
|
||||
static READ_DIR_CACHE: Lazy<Cache<PathBuf, Vec<PathBuf>>> = Lazy::new(|| Cache::new(10_000));
|
||||
static READ_DIR_CACHE: Lazy<Cache<PathBuf, Vec<PathBuf>>> = Lazy::new(|| Cache::new(10_000));
|
||||
|
||||
let path = path.as_ref().canonicalize()?;
|
||||
match READ_DIR_CACHE.get(path.as_path()) {
|
||||
Some(entries) => Ok(Box::new(entries.into_iter().map(Ok)) as Box<_>),
|
||||
None => {
|
||||
let entries = fs::read_dir(path.as_path())?
|
||||
.flat_map(|maybe_entry| maybe_entry.map(|entry| entry.path()))
|
||||
.collect();
|
||||
READ_DIR_CACHE.insert(path.clone(), entries);
|
||||
Ok(read_dir(path).unwrap())
|
||||
}
|
||||
}
|
||||
let path = path.as_ref().canonicalize()?;
|
||||
match READ_DIR_CACHE.get(path.as_path()) {
|
||||
Some(entries) => Ok(Box::new(entries.into_iter().map(Ok)) as Box<_>),
|
||||
None => {
|
||||
let entries = fs::read_dir(path.as_path())?
|
||||
.flat_map(|maybe_entry| maybe_entry.map(|entry| entry.path()))
|
||||
.collect();
|
||||
READ_DIR_CACHE.insert(path.clone(), entries);
|
||||
Ok(read_dir(path).unwrap())
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::{
|
||||
fs::{read_dir, remove_dir_all, remove_file},
|
||||
path::Path,
|
||||
fs::{read_dir, remove_dir_all, remove_file},
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
@@ -8,24 +8,21 @@ use anyhow::{Context, Result};
|
||||
/// This method clears the passed directory of all of the files and directories contained within
|
||||
/// without deleting the directory.
|
||||
pub fn clear_directory(path: impl AsRef<Path>) -> Result<()> {
|
||||
for entry in read_dir(path.as_ref())
|
||||
.with_context(|| format!("Failed to read directory: {}", path.as_ref().display()))?
|
||||
{
|
||||
let entry = entry.with_context(|| {
|
||||
format!(
|
||||
"Failed to read an entry in directory: {}",
|
||||
path.as_ref().display()
|
||||
)
|
||||
})?;
|
||||
let entry_path = entry.path();
|
||||
for entry in read_dir(path.as_ref())
|
||||
.with_context(|| format!("Failed to read directory: {}", path.as_ref().display()))?
|
||||
{
|
||||
let entry = entry.with_context(|| {
|
||||
format!("Failed to read an entry in directory: {}", path.as_ref().display())
|
||||
})?;
|
||||
let entry_path = entry.path();
|
||||
|
||||
if entry_path.is_file() {
|
||||
remove_file(&entry_path)
|
||||
.with_context(|| format!("Failed to remove file: {}", entry_path.display()))?
|
||||
} else {
|
||||
remove_dir_all(&entry_path)
|
||||
.with_context(|| format!("Failed to remove directory: {}", entry_path.display()))?
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
if entry_path.is_file() {
|
||||
remove_file(&entry_path)
|
||||
.with_context(|| format!("Failed to remove file: {}", entry_path.display()))?
|
||||
} else {
|
||||
remove_dir_all(&entry_path)
|
||||
.with_context(|| format!("Failed to remove directory: {}", entry_path.display()))?
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use std::ops::ControlFlow;
|
||||
use std::time::Duration;
|
||||
use std::{ops::ControlFlow, time::Duration};
|
||||
|
||||
use anyhow::{Context as _, Result, anyhow};
|
||||
|
||||
@@ -18,55 +17,51 @@ const EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION: Duration = Duration::from_secs(60);
|
||||
/// [`Break`]: ControlFlow::Break
|
||||
/// [`Continue`]: ControlFlow::Continue
|
||||
pub async fn poll<F, O>(
|
||||
polling_duration: Duration,
|
||||
polling_wait_behavior: PollingWaitBehavior,
|
||||
mut future: impl FnMut() -> F,
|
||||
polling_duration: Duration,
|
||||
polling_wait_behavior: PollingWaitBehavior,
|
||||
mut future: impl FnMut() -> F,
|
||||
) -> Result<O>
|
||||
where
|
||||
F: Future<Output = Result<ControlFlow<O, ()>>>,
|
||||
F: Future<Output = Result<ControlFlow<O, ()>>>,
|
||||
{
|
||||
let mut retries = 0;
|
||||
let mut total_wait_duration = Duration::ZERO;
|
||||
let max_allowed_wait_duration = polling_duration;
|
||||
let mut retries = 0;
|
||||
let mut total_wait_duration = Duration::ZERO;
|
||||
let max_allowed_wait_duration = polling_duration;
|
||||
|
||||
loop {
|
||||
if total_wait_duration >= max_allowed_wait_duration {
|
||||
break Err(anyhow!(
|
||||
"Polling failed after {} retries and a total of {:?} of wait time",
|
||||
retries,
|
||||
total_wait_duration
|
||||
));
|
||||
}
|
||||
loop {
|
||||
if total_wait_duration >= max_allowed_wait_duration {
|
||||
break Err(anyhow!(
|
||||
"Polling failed after {} retries and a total of {:?} of wait time",
|
||||
retries,
|
||||
total_wait_duration
|
||||
));
|
||||
}
|
||||
|
||||
match future()
|
||||
.await
|
||||
.context("Polled future returned an error during polling loop")?
|
||||
{
|
||||
ControlFlow::Continue(()) => {
|
||||
let next_wait_duration = match polling_wait_behavior {
|
||||
PollingWaitBehavior::Constant(duration) => duration,
|
||||
PollingWaitBehavior::ExponentialBackoff => {
|
||||
Duration::from_secs(2u64.pow(retries))
|
||||
.min(EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION)
|
||||
}
|
||||
};
|
||||
let next_wait_duration =
|
||||
next_wait_duration.min(max_allowed_wait_duration - total_wait_duration);
|
||||
total_wait_duration += next_wait_duration;
|
||||
retries += 1;
|
||||
match future().await.context("Polled future returned an error during polling loop")? {
|
||||
ControlFlow::Continue(()) => {
|
||||
let next_wait_duration = match polling_wait_behavior {
|
||||
PollingWaitBehavior::Constant(duration) => duration,
|
||||
PollingWaitBehavior::ExponentialBackoff =>
|
||||
Duration::from_secs(2u64.pow(retries))
|
||||
.min(EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION),
|
||||
};
|
||||
let next_wait_duration =
|
||||
next_wait_duration.min(max_allowed_wait_duration - total_wait_duration);
|
||||
total_wait_duration += next_wait_duration;
|
||||
retries += 1;
|
||||
|
||||
tokio::time::sleep(next_wait_duration).await;
|
||||
}
|
||||
ControlFlow::Break(output) => {
|
||||
break Ok(output);
|
||||
}
|
||||
}
|
||||
}
|
||||
tokio::time::sleep(next_wait_duration).await;
|
||||
},
|
||||
ControlFlow::Break(output) => {
|
||||
break Ok(output);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
|
||||
pub enum PollingWaitBehavior {
|
||||
Constant(Duration),
|
||||
#[default]
|
||||
ExponentialBackoff,
|
||||
Constant(Duration),
|
||||
#[default]
|
||||
ExponentialBackoff,
|
||||
}
|
||||
|
||||
@@ -1,21 +1,21 @@
|
||||
/// An iterator that could be either of two iterators.
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum EitherIter<A, B> {
|
||||
A(A),
|
||||
B(B),
|
||||
A(A),
|
||||
B(B),
|
||||
}
|
||||
|
||||
impl<A, B, T> Iterator for EitherIter<A, B>
|
||||
where
|
||||
A: Iterator<Item = T>,
|
||||
B: Iterator<Item = T>,
|
||||
A: Iterator<Item = T>,
|
||||
B: Iterator<Item = T>,
|
||||
{
|
||||
type Item = T;
|
||||
type Item = T;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
match self {
|
||||
EitherIter::A(iter) => iter.next(),
|
||||
EitherIter::B(iter) => iter.next(),
|
||||
}
|
||||
}
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
match self {
|
||||
EitherIter::A(iter) => iter.next(),
|
||||
EitherIter::B(iter) => iter.next(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,91 +1,90 @@
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::HashSet,
|
||||
path::{Path, PathBuf},
|
||||
borrow::Cow,
|
||||
collections::HashSet,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
/// An iterator that finds files of a certain extension in the provided directory. You can think of
|
||||
/// this a glob pattern similar to: `${path}/**/*.md`
|
||||
pub struct FilesWithExtensionIterator {
|
||||
/// The set of allowed extensions that that match the requirement and that should be returned
|
||||
/// when found.
|
||||
allowed_extensions: HashSet<Cow<'static, str>>,
|
||||
/// The set of allowed extensions that that match the requirement and that should be returned
|
||||
/// when found.
|
||||
allowed_extensions: HashSet<Cow<'static, str>>,
|
||||
|
||||
/// The set of directories to visit next. This iterator does BFS and so these directories will
|
||||
/// only be visited if we can't find any files in our state.
|
||||
directories_to_search: Vec<PathBuf>,
|
||||
/// The set of directories to visit next. This iterator does BFS and so these directories will
|
||||
/// only be visited if we can't find any files in our state.
|
||||
directories_to_search: Vec<PathBuf>,
|
||||
|
||||
/// The set of files matching the allowed extensions that were found. If there are entries in
|
||||
/// this vector then they will be returned when the [`Iterator::next`] method is called. If not
|
||||
/// then we visit one of the next directories to visit.
|
||||
files_matching_allowed_extensions: Vec<PathBuf>,
|
||||
/// The set of files matching the allowed extensions that were found. If there are entries in
|
||||
/// this vector then they will be returned when the [`Iterator::next`] method is called. If not
|
||||
/// then we visit one of the next directories to visit.
|
||||
files_matching_allowed_extensions: Vec<PathBuf>,
|
||||
|
||||
/// This option controls if the the cached file system should be used or not. This could be
|
||||
/// better for certain cases where the entries in the directories do not change and therefore
|
||||
/// caching can be used.
|
||||
use_cached_fs: bool,
|
||||
/// This option controls if the the cached file system should be used or not. This could be
|
||||
/// better for certain cases where the entries in the directories do not change and therefore
|
||||
/// caching can be used.
|
||||
use_cached_fs: bool,
|
||||
}
|
||||
|
||||
impl FilesWithExtensionIterator {
|
||||
pub fn new(root_directory: impl AsRef<Path>) -> Self {
|
||||
Self {
|
||||
allowed_extensions: Default::default(),
|
||||
directories_to_search: vec![root_directory.as_ref().to_path_buf()],
|
||||
files_matching_allowed_extensions: Default::default(),
|
||||
use_cached_fs: Default::default(),
|
||||
}
|
||||
}
|
||||
pub fn new(root_directory: impl AsRef<Path>) -> Self {
|
||||
Self {
|
||||
allowed_extensions: Default::default(),
|
||||
directories_to_search: vec![root_directory.as_ref().to_path_buf()],
|
||||
files_matching_allowed_extensions: Default::default(),
|
||||
use_cached_fs: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_allowed_extension(
|
||||
mut self,
|
||||
allowed_extension: impl Into<Cow<'static, str>>,
|
||||
) -> Self {
|
||||
self.allowed_extensions.insert(allowed_extension.into());
|
||||
self
|
||||
}
|
||||
pub fn with_allowed_extension(
|
||||
mut self,
|
||||
allowed_extension: impl Into<Cow<'static, str>>,
|
||||
) -> Self {
|
||||
self.allowed_extensions.insert(allowed_extension.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_use_cached_fs(mut self, use_cached_fs: bool) -> Self {
|
||||
self.use_cached_fs = use_cached_fs;
|
||||
self
|
||||
}
|
||||
pub fn with_use_cached_fs(mut self, use_cached_fs: bool) -> Self {
|
||||
self.use_cached_fs = use_cached_fs;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for FilesWithExtensionIterator {
|
||||
type Item = PathBuf;
|
||||
type Item = PathBuf;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if let Some(file_path) = self.files_matching_allowed_extensions.pop() {
|
||||
return Some(file_path);
|
||||
};
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if let Some(file_path) = self.files_matching_allowed_extensions.pop() {
|
||||
return Some(file_path);
|
||||
};
|
||||
|
||||
let directory_to_search = self.directories_to_search.pop()?;
|
||||
let directory_to_search = self.directories_to_search.pop()?;
|
||||
|
||||
let iterator = if self.use_cached_fs {
|
||||
let Ok(dir_entries) = crate::cached_fs::read_dir(directory_to_search.as_path()) else {
|
||||
return self.next();
|
||||
};
|
||||
Box::new(dir_entries) as Box<dyn Iterator<Item = std::io::Result<PathBuf>>>
|
||||
} else {
|
||||
let Ok(dir_entries) = std::fs::read_dir(directory_to_search) else {
|
||||
return self.next();
|
||||
};
|
||||
Box::new(dir_entries.map(|maybe_entry| maybe_entry.map(|entry| entry.path()))) as Box<_>
|
||||
};
|
||||
let iterator = if self.use_cached_fs {
|
||||
let Ok(dir_entries) = crate::cached_fs::read_dir(directory_to_search.as_path()) else {
|
||||
return self.next();
|
||||
};
|
||||
Box::new(dir_entries) as Box<dyn Iterator<Item = std::io::Result<PathBuf>>>
|
||||
} else {
|
||||
let Ok(dir_entries) = std::fs::read_dir(directory_to_search) else {
|
||||
return self.next();
|
||||
};
|
||||
Box::new(dir_entries.map(|maybe_entry| maybe_entry.map(|entry| entry.path()))) as Box<_>
|
||||
};
|
||||
|
||||
for entry_path in iterator.flatten() {
|
||||
if entry_path.is_dir() {
|
||||
self.directories_to_search.push(entry_path)
|
||||
} else if entry_path.is_file()
|
||||
&& entry_path.extension().is_some_and(|ext| {
|
||||
self.allowed_extensions
|
||||
.iter()
|
||||
.any(|allowed| ext.eq_ignore_ascii_case(allowed.as_ref()))
|
||||
})
|
||||
{
|
||||
self.files_matching_allowed_extensions.push(entry_path)
|
||||
}
|
||||
}
|
||||
for entry_path in iterator.flatten() {
|
||||
if entry_path.is_dir() {
|
||||
self.directories_to_search.push(entry_path)
|
||||
} else if entry_path.is_file() &&
|
||||
entry_path.extension().is_some_and(|ext| {
|
||||
self.allowed_extensions
|
||||
.iter()
|
||||
.any(|allowed| ext.eq_ignore_ascii_case(allowed.as_ref()))
|
||||
}) {
|
||||
self.files_matching_allowed_extensions.push(entry_path)
|
||||
}
|
||||
}
|
||||
|
||||
self.next()
|
||||
}
|
||||
self.next()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,23 +1,23 @@
|
||||
#[macro_export]
|
||||
macro_rules! impl_for_wrapper {
|
||||
(Display, $ident: ident) => {
|
||||
#[automatically_derived]
|
||||
impl std::fmt::Display for $ident {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
std::fmt::Display::fmt(&self.0, f)
|
||||
}
|
||||
}
|
||||
};
|
||||
(FromStr, $ident: ident) => {
|
||||
#[automatically_derived]
|
||||
impl std::str::FromStr for $ident {
|
||||
type Err = anyhow::Error;
|
||||
(Display, $ident: ident) => {
|
||||
#[automatically_derived]
|
||||
impl std::fmt::Display for $ident {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
std::fmt::Display::fmt(&self.0, f)
|
||||
}
|
||||
}
|
||||
};
|
||||
(FromStr, $ident: ident) => {
|
||||
#[automatically_derived]
|
||||
impl std::str::FromStr for $ident {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> anyhow::Result<Self> {
|
||||
s.parse().map(Self).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
};
|
||||
fn from_str(s: &str) -> anyhow::Result<Self> {
|
||||
s.parse().map(Self).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Defines wrappers around types.
|
||||
@@ -135,6 +135,6 @@ macro_rules! define_wrapper_type {
|
||||
};
|
||||
}
|
||||
|
||||
/// Technically not needed but this allows for the macro to be found in the `macros` module of the
|
||||
/// crate in addition to being found in the root of the crate.
|
||||
/// Technically not needed but this allows for the macro to be found in the `macros` module of
|
||||
/// the crate in addition to being found in the root of the crate.
|
||||
pub use {define_wrapper_type, impl_for_wrapper};
|
||||
|
||||
@@ -7,128 +7,128 @@ use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
|
||||
/// could be thought of like the target triple from Rust and LLVM where it specifies the platform
|
||||
/// completely starting with the node, the vm, and finally the compiler used for this combination.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
ValueEnum,
|
||||
EnumString,
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
ValueEnum,
|
||||
EnumString,
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[strum(serialize_all = "kebab-case")]
|
||||
pub enum PlatformIdentifier {
|
||||
/// The Go-ethereum reference full node EVM implementation with the solc compiler.
|
||||
GethEvmSolc,
|
||||
/// The Lighthouse Go-ethereum reference full node EVM implementation with the solc compiler.
|
||||
LighthouseGethEvmSolc,
|
||||
/// The kitchensink node with the PolkaVM backend with the resolc compiler.
|
||||
KitchensinkPolkavmResolc,
|
||||
/// The kitchensink node with the REVM backend with the solc compiler.
|
||||
KitchensinkRevmSolc,
|
||||
/// The revive dev node with the PolkaVM backend with the resolc compiler.
|
||||
ReviveDevNodePolkavmResolc,
|
||||
/// The revive dev node with the REVM backend with the solc compiler.
|
||||
ReviveDevNodeRevmSolc,
|
||||
/// A zombienet based Substrate/Polkadot node with the PolkaVM backend with the resolc compiler.
|
||||
ZombienetPolkavmResolc,
|
||||
/// A zombienet based Substrate/Polkadot node with the REVM backend with the solc compiler.
|
||||
ZombienetRevmSolc,
|
||||
/// The Go-ethereum reference full node EVM implementation with the solc compiler.
|
||||
GethEvmSolc,
|
||||
/// The Lighthouse Go-ethereum reference full node EVM implementation with the solc compiler.
|
||||
LighthouseGethEvmSolc,
|
||||
/// The kitchensink node with the PolkaVM backend with the resolc compiler.
|
||||
KitchensinkPolkavmResolc,
|
||||
/// The kitchensink node with the REVM backend with the solc compiler.
|
||||
KitchensinkRevmSolc,
|
||||
/// The revive dev node with the PolkaVM backend with the resolc compiler.
|
||||
ReviveDevNodePolkavmResolc,
|
||||
/// The revive dev node with the REVM backend with the solc compiler.
|
||||
ReviveDevNodeRevmSolc,
|
||||
/// A zombienet based Substrate/Polkadot node with the PolkaVM backend with the resolc compiler.
|
||||
ZombienetPolkavmResolc,
|
||||
/// A zombienet based Substrate/Polkadot node with the REVM backend with the solc compiler.
|
||||
ZombienetRevmSolc,
|
||||
}
|
||||
|
||||
/// An enum of the platform identifiers of all of the platforms supported by this framework.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
ValueEnum,
|
||||
EnumString,
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
ValueEnum,
|
||||
EnumString,
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
)]
|
||||
pub enum CompilerIdentifier {
|
||||
/// The solc compiler.
|
||||
Solc,
|
||||
/// The resolc compiler.
|
||||
Resolc,
|
||||
/// The solc compiler.
|
||||
Solc,
|
||||
/// The resolc compiler.
|
||||
Resolc,
|
||||
}
|
||||
|
||||
/// An enum representing the identifiers of the supported nodes.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
ValueEnum,
|
||||
EnumString,
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
ValueEnum,
|
||||
EnumString,
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
)]
|
||||
pub enum NodeIdentifier {
|
||||
/// The go-ethereum node implementation.
|
||||
Geth,
|
||||
/// The go-ethereum node implementation.
|
||||
LighthouseGeth,
|
||||
/// The Kitchensink node implementation.
|
||||
Kitchensink,
|
||||
/// The revive dev node implementation.
|
||||
ReviveDevNode,
|
||||
/// A zombienet spawned nodes
|
||||
Zombienet,
|
||||
/// The go-ethereum node implementation.
|
||||
Geth,
|
||||
/// The go-ethereum node implementation.
|
||||
LighthouseGeth,
|
||||
/// The Kitchensink node implementation.
|
||||
Kitchensink,
|
||||
/// The revive dev node implementation.
|
||||
ReviveDevNode,
|
||||
/// A zombienet spawned nodes
|
||||
Zombienet,
|
||||
}
|
||||
|
||||
/// An enum representing the identifiers of the supported VMs.
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
ValueEnum,
|
||||
EnumString,
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
ValueEnum,
|
||||
EnumString,
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
#[strum(serialize_all = "lowercase")]
|
||||
pub enum VmIdentifier {
|
||||
/// The ethereum virtual machine.
|
||||
Evm,
|
||||
/// The EraVM virtual machine.
|
||||
EraVM,
|
||||
/// Polkadot's PolaVM Risc-v based virtual machine.
|
||||
PolkaVM,
|
||||
/// The ethereum virtual machine.
|
||||
Evm,
|
||||
/// The EraVM virtual machine.
|
||||
EraVM,
|
||||
/// Polkadot's PolaVM Risc-v based virtual machine.
|
||||
PolkaVM,
|
||||
}
|
||||
|
||||
+118
-122
@@ -1,9 +1,7 @@
|
||||
use crate::types::VersionOrRequirement;
|
||||
use semver::Version;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
use std::sync::LazyLock;
|
||||
use std::{fmt::Display, str::FromStr, sync::LazyLock};
|
||||
|
||||
/// This represents a mode that a given test should be run with, if possible.
|
||||
///
|
||||
@@ -13,161 +11,159 @@ use std::sync::LazyLock;
|
||||
/// Use [`ParsedMode::to_test_modes()`] to do this.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct Mode {
|
||||
pub pipeline: ModePipeline,
|
||||
pub optimize_setting: ModeOptimizerSetting,
|
||||
pub version: Option<semver::VersionReq>,
|
||||
pub pipeline: ModePipeline,
|
||||
pub optimize_setting: ModeOptimizerSetting,
|
||||
pub version: Option<semver::VersionReq>,
|
||||
}
|
||||
|
||||
impl Display for Mode {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.pipeline.fmt(f)?;
|
||||
f.write_str(" ")?;
|
||||
self.optimize_setting.fmt(f)?;
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.pipeline.fmt(f)?;
|
||||
f.write_str(" ")?;
|
||||
self.optimize_setting.fmt(f)?;
|
||||
|
||||
if let Some(version) = &self.version {
|
||||
f.write_str(" ")?;
|
||||
version.fmt(f)?;
|
||||
}
|
||||
if let Some(version) = &self.version {
|
||||
f.write_str(" ")?;
|
||||
version.fmt(f)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Mode {
|
||||
/// Return all of the available mode combinations.
|
||||
pub fn all() -> impl Iterator<Item = &'static Mode> {
|
||||
static ALL_MODES: LazyLock<Vec<Mode>> = LazyLock::new(|| {
|
||||
ModePipeline::test_cases()
|
||||
.flat_map(|pipeline| {
|
||||
ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode {
|
||||
pipeline,
|
||||
optimize_setting,
|
||||
version: None,
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
});
|
||||
ALL_MODES.iter()
|
||||
}
|
||||
/// Return all of the available mode combinations.
|
||||
pub fn all() -> impl Iterator<Item = &'static Mode> {
|
||||
static ALL_MODES: LazyLock<Vec<Mode>> = LazyLock::new(|| {
|
||||
ModePipeline::test_cases()
|
||||
.flat_map(|pipeline| {
|
||||
ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode {
|
||||
pipeline,
|
||||
optimize_setting,
|
||||
version: None,
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
});
|
||||
ALL_MODES.iter()
|
||||
}
|
||||
|
||||
/// Resolves the [`Mode`]'s solidity version requirement into a [`VersionOrRequirement`] if
|
||||
/// the requirement is present on the object. Otherwise, the passed default version is used.
|
||||
pub fn compiler_version_to_use(&self, default: Version) -> VersionOrRequirement {
|
||||
match self.version {
|
||||
Some(ref requirement) => requirement.clone().into(),
|
||||
None => default.into(),
|
||||
}
|
||||
}
|
||||
/// Resolves the [`Mode`]'s solidity version requirement into a [`VersionOrRequirement`] if
|
||||
/// the requirement is present on the object. Otherwise, the passed default version is used.
|
||||
pub fn compiler_version_to_use(&self, default: Version) -> VersionOrRequirement {
|
||||
match self.version {
|
||||
Some(ref requirement) => requirement.clone().into(),
|
||||
None => default.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// What do we want the compiler to do?
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
|
||||
pub enum ModePipeline {
|
||||
/// Compile Solidity code via Yul IR
|
||||
ViaYulIR,
|
||||
/// Compile Solidity direct to assembly
|
||||
ViaEVMAssembly,
|
||||
/// Compile Solidity code via Yul IR
|
||||
ViaYulIR,
|
||||
/// Compile Solidity direct to assembly
|
||||
ViaEVMAssembly,
|
||||
}
|
||||
|
||||
impl FromStr for ModePipeline {
|
||||
type Err = anyhow::Error;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
// via Yul IR
|
||||
"Y" => Ok(ModePipeline::ViaYulIR),
|
||||
// Don't go via Yul IR
|
||||
"E" => Ok(ModePipeline::ViaEVMAssembly),
|
||||
// Anything else that we see isn't a mode at all
|
||||
_ => Err(anyhow::anyhow!(
|
||||
"Unsupported pipeline '{s}': expected 'Y' or 'E'"
|
||||
)),
|
||||
}
|
||||
}
|
||||
type Err = anyhow::Error;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
// via Yul IR
|
||||
"Y" => Ok(ModePipeline::ViaYulIR),
|
||||
// Don't go via Yul IR
|
||||
"E" => Ok(ModePipeline::ViaEVMAssembly),
|
||||
// Anything else that we see isn't a mode at all
|
||||
_ => Err(anyhow::anyhow!("Unsupported pipeline '{s}': expected 'Y' or 'E'")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ModePipeline {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ModePipeline::ViaYulIR => f.write_str("Y"),
|
||||
ModePipeline::ViaEVMAssembly => f.write_str("E"),
|
||||
}
|
||||
}
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ModePipeline::ViaYulIR => f.write_str("Y"),
|
||||
ModePipeline::ViaEVMAssembly => f.write_str("E"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ModePipeline {
|
||||
/// Should we go via Yul IR?
|
||||
pub fn via_yul_ir(&self) -> bool {
|
||||
matches!(self, ModePipeline::ViaYulIR)
|
||||
}
|
||||
/// Should we go via Yul IR?
|
||||
pub fn via_yul_ir(&self) -> bool {
|
||||
matches!(self, ModePipeline::ViaYulIR)
|
||||
}
|
||||
|
||||
/// An iterator over the available pipelines that we'd like to test,
|
||||
/// when an explicit pipeline was not specified.
|
||||
pub fn test_cases() -> impl Iterator<Item = ModePipeline> + Clone {
|
||||
[ModePipeline::ViaYulIR, ModePipeline::ViaEVMAssembly].into_iter()
|
||||
}
|
||||
/// An iterator over the available pipelines that we'd like to test,
|
||||
/// when an explicit pipeline was not specified.
|
||||
pub fn test_cases() -> impl Iterator<Item = ModePipeline> + Clone {
|
||||
[ModePipeline::ViaYulIR, ModePipeline::ViaEVMAssembly].into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
|
||||
pub enum ModeOptimizerSetting {
|
||||
/// 0 / -: Don't apply any optimizations
|
||||
M0,
|
||||
/// 1: Apply less than default optimizations
|
||||
M1,
|
||||
/// 2: Apply the default optimizations
|
||||
M2,
|
||||
/// 3 / +: Apply aggressive optimizations
|
||||
M3,
|
||||
/// s: Optimize for size
|
||||
Ms,
|
||||
/// z: Aggressively optimize for size
|
||||
Mz,
|
||||
/// 0 / -: Don't apply any optimizations
|
||||
M0,
|
||||
/// 1: Apply less than default optimizations
|
||||
M1,
|
||||
/// 2: Apply the default optimizations
|
||||
M2,
|
||||
/// 3 / +: Apply aggressive optimizations
|
||||
M3,
|
||||
/// s: Optimize for size
|
||||
Ms,
|
||||
/// z: Aggressively optimize for size
|
||||
Mz,
|
||||
}
|
||||
|
||||
impl FromStr for ModeOptimizerSetting {
|
||||
type Err = anyhow::Error;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
"M0" => Ok(ModeOptimizerSetting::M0),
|
||||
"M1" => Ok(ModeOptimizerSetting::M1),
|
||||
"M2" => Ok(ModeOptimizerSetting::M2),
|
||||
"M3" => Ok(ModeOptimizerSetting::M3),
|
||||
"Ms" => Ok(ModeOptimizerSetting::Ms),
|
||||
"Mz" => Ok(ModeOptimizerSetting::Mz),
|
||||
_ => Err(anyhow::anyhow!(
|
||||
"Unsupported optimizer setting '{s}': expected 'M0', 'M1', 'M2', 'M3', 'Ms' or 'Mz'"
|
||||
)),
|
||||
}
|
||||
}
|
||||
type Err = anyhow::Error;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
"M0" => Ok(ModeOptimizerSetting::M0),
|
||||
"M1" => Ok(ModeOptimizerSetting::M1),
|
||||
"M2" => Ok(ModeOptimizerSetting::M2),
|
||||
"M3" => Ok(ModeOptimizerSetting::M3),
|
||||
"Ms" => Ok(ModeOptimizerSetting::Ms),
|
||||
"Mz" => Ok(ModeOptimizerSetting::Mz),
|
||||
_ => Err(anyhow::anyhow!(
|
||||
"Unsupported optimizer setting '{s}': expected 'M0', 'M1', 'M2', 'M3', 'Ms' or 'Mz'"
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ModeOptimizerSetting {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ModeOptimizerSetting::M0 => f.write_str("M0"),
|
||||
ModeOptimizerSetting::M1 => f.write_str("M1"),
|
||||
ModeOptimizerSetting::M2 => f.write_str("M2"),
|
||||
ModeOptimizerSetting::M3 => f.write_str("M3"),
|
||||
ModeOptimizerSetting::Ms => f.write_str("Ms"),
|
||||
ModeOptimizerSetting::Mz => f.write_str("Mz"),
|
||||
}
|
||||
}
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ModeOptimizerSetting::M0 => f.write_str("M0"),
|
||||
ModeOptimizerSetting::M1 => f.write_str("M1"),
|
||||
ModeOptimizerSetting::M2 => f.write_str("M2"),
|
||||
ModeOptimizerSetting::M3 => f.write_str("M3"),
|
||||
ModeOptimizerSetting::Ms => f.write_str("Ms"),
|
||||
ModeOptimizerSetting::Mz => f.write_str("Mz"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ModeOptimizerSetting {
|
||||
/// An iterator over the available optimizer settings that we'd like to test,
|
||||
/// when an explicit optimizer setting was not specified.
|
||||
pub fn test_cases() -> impl Iterator<Item = ModeOptimizerSetting> + Clone {
|
||||
[
|
||||
// No optimizations:
|
||||
ModeOptimizerSetting::M0,
|
||||
// Aggressive optimizations:
|
||||
ModeOptimizerSetting::M3,
|
||||
]
|
||||
.into_iter()
|
||||
}
|
||||
/// An iterator over the available optimizer settings that we'd like to test,
|
||||
/// when an explicit optimizer setting was not specified.
|
||||
pub fn test_cases() -> impl Iterator<Item = ModeOptimizerSetting> + Clone {
|
||||
[
|
||||
// No optimizations:
|
||||
ModeOptimizerSetting::M0,
|
||||
// Aggressive optimizations:
|
||||
ModeOptimizerSetting::M3,
|
||||
]
|
||||
.into_iter()
|
||||
}
|
||||
|
||||
/// Are any optimizations enabled?
|
||||
pub fn optimizations_enabled(&self) -> bool {
|
||||
!matches!(self, ModeOptimizerSetting::M0)
|
||||
}
|
||||
/// Are any optimizations enabled?
|
||||
pub fn optimizations_enabled(&self) -> bool {
|
||||
!matches!(self, ModeOptimizerSetting::M0)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,36 +1,32 @@
|
||||
use alloy::primitives::U256;
|
||||
use alloy::signers::local::PrivateKeySigner;
|
||||
use alloy::{primitives::U256, signers::local::PrivateKeySigner};
|
||||
use anyhow::{Context, Result, bail};
|
||||
|
||||
/// This is a sequential private key allocator. When instantiated, it allocated private keys in
|
||||
/// sequentially and in order until the maximum private key specified is reached.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct PrivateKeyAllocator {
|
||||
/// The next private key to be returned by the allocator when requested.
|
||||
next_private_key: U256,
|
||||
/// The next private key to be returned by the allocator when requested.
|
||||
next_private_key: U256,
|
||||
|
||||
/// The highest private key (exclusive) that can be returned by this allocator.
|
||||
highest_private_key_inclusive: U256,
|
||||
/// The highest private key (exclusive) that can be returned by this allocator.
|
||||
highest_private_key_inclusive: U256,
|
||||
}
|
||||
|
||||
impl PrivateKeyAllocator {
|
||||
/// Creates a new instance of the private key allocator.
|
||||
pub fn new(highest_private_key_inclusive: U256) -> Self {
|
||||
Self {
|
||||
next_private_key: U256::ONE,
|
||||
highest_private_key_inclusive,
|
||||
}
|
||||
}
|
||||
/// Creates a new instance of the private key allocator.
|
||||
pub fn new(highest_private_key_inclusive: U256) -> Self {
|
||||
Self { next_private_key: U256::ONE, highest_private_key_inclusive }
|
||||
}
|
||||
|
||||
/// Allocates a new private key and errors out if the maximum private key has been reached.
|
||||
pub fn allocate(&mut self) -> Result<PrivateKeySigner> {
|
||||
if self.next_private_key > self.highest_private_key_inclusive {
|
||||
bail!("Attempted to allocate a private key but failed since all have been allocated");
|
||||
};
|
||||
let private_key =
|
||||
PrivateKeySigner::from_slice(self.next_private_key.to_be_bytes::<32>().as_slice())
|
||||
.context("Failed to convert the private key digits into a private key")?;
|
||||
self.next_private_key += U256::ONE;
|
||||
Ok(private_key)
|
||||
}
|
||||
/// Allocates a new private key and errors out if the maximum private key has been reached.
|
||||
pub fn allocate(&mut self) -> Result<PrivateKeySigner> {
|
||||
if self.next_private_key > self.highest_private_key_inclusive {
|
||||
bail!("Attempted to allocate a private key but failed since all have been allocated");
|
||||
};
|
||||
let private_key =
|
||||
PrivateKeySigner::from_slice(self.next_private_key.to_be_bytes::<32>().as_slice())
|
||||
.context("Failed to convert the private key digits into a private key")?;
|
||||
self.next_private_key += U256::ONE;
|
||||
Ok(private_key)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,24 +1,21 @@
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
pub struct RoundRobinPool<T> {
|
||||
next_index: AtomicUsize,
|
||||
items: Vec<T>,
|
||||
next_index: AtomicUsize,
|
||||
items: Vec<T>,
|
||||
}
|
||||
|
||||
impl<T> RoundRobinPool<T> {
|
||||
pub fn new(items: Vec<T>) -> Self {
|
||||
Self {
|
||||
next_index: Default::default(),
|
||||
items,
|
||||
}
|
||||
}
|
||||
pub fn new(items: Vec<T>) -> Self {
|
||||
Self { next_index: Default::default(), items }
|
||||
}
|
||||
|
||||
pub fn round_robin(&self) -> &T {
|
||||
let current = self.next_index.fetch_add(1, Ordering::SeqCst) % self.items.len();
|
||||
self.items.get(current).unwrap()
|
||||
}
|
||||
pub fn round_robin(&self) -> &T {
|
||||
let current = self.next_index.fetch_add(1, Ordering::SeqCst) % self.items.len();
|
||||
self.items.get(current).unwrap()
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> impl Iterator<Item = &T> {
|
||||
self.items.iter()
|
||||
}
|
||||
pub fn iter(&self) -> impl Iterator<Item = &T> {
|
||||
self.items.iter()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,40 +2,40 @@ use semver::{Version, VersionReq};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum VersionOrRequirement {
|
||||
Version(Version),
|
||||
Requirement(VersionReq),
|
||||
Version(Version),
|
||||
Requirement(VersionReq),
|
||||
}
|
||||
|
||||
impl From<Version> for VersionOrRequirement {
|
||||
fn from(value: Version) -> Self {
|
||||
Self::Version(value)
|
||||
}
|
||||
fn from(value: Version) -> Self {
|
||||
Self::Version(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<VersionReq> for VersionOrRequirement {
|
||||
fn from(value: VersionReq) -> Self {
|
||||
Self::Requirement(value)
|
||||
}
|
||||
fn from(value: VersionReq) -> Self {
|
||||
Self::Requirement(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<VersionOrRequirement> for Version {
|
||||
type Error = anyhow::Error;
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> {
|
||||
let VersionOrRequirement::Version(version) = value else {
|
||||
anyhow::bail!("Version or requirement was not a version");
|
||||
};
|
||||
Ok(version)
|
||||
}
|
||||
fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> {
|
||||
let VersionOrRequirement::Version(version) = value else {
|
||||
anyhow::bail!("Version or requirement was not a version");
|
||||
};
|
||||
Ok(version)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<VersionOrRequirement> for VersionReq {
|
||||
type Error = anyhow::Error;
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> {
|
||||
let VersionOrRequirement::Requirement(requirement) = value else {
|
||||
anyhow::bail!("Version or requirement was not a requirement");
|
||||
};
|
||||
Ok(requirement)
|
||||
}
|
||||
fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> {
|
||||
let VersionOrRequirement::Requirement(requirement) = value else {
|
||||
anyhow::bail!("Version or requirement was not a requirement");
|
||||
};
|
||||
Ok(requirement)
|
||||
}
|
||||
}
|
||||
|
||||
+111
-112
@@ -4,14 +4,13 @@
|
||||
//! - Polkadot revive Wasm compiler
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
hash::Hash,
|
||||
path::{Path, PathBuf},
|
||||
pin::Pin,
|
||||
collections::HashMap,
|
||||
hash::Hash,
|
||||
path::{Path, PathBuf},
|
||||
pin::Pin,
|
||||
};
|
||||
|
||||
use alloy::json_abi::JsonAbi;
|
||||
use alloy::primitives::Address;
|
||||
use alloy::{json_abi::JsonAbi, primitives::Address};
|
||||
use anyhow::{Context as _, Result};
|
||||
use semver::Version;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -28,149 +27,149 @@ pub mod solc;
|
||||
|
||||
/// A common interface for all supported Solidity compilers.
|
||||
pub trait SolidityCompiler {
|
||||
/// Returns the version of the compiler.
|
||||
fn version(&self) -> &Version;
|
||||
/// Returns the version of the compiler.
|
||||
fn version(&self) -> &Version;
|
||||
|
||||
/// Returns the path of the compiler executable.
|
||||
fn path(&self) -> &Path;
|
||||
/// Returns the path of the compiler executable.
|
||||
fn path(&self) -> &Path;
|
||||
|
||||
/// The low-level compiler interface.
|
||||
fn build(
|
||||
&self,
|
||||
input: CompilerInput,
|
||||
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>>;
|
||||
/// The low-level compiler interface.
|
||||
fn build(
|
||||
&self,
|
||||
input: CompilerInput,
|
||||
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>>;
|
||||
|
||||
/// Does the compiler support the provided mode and version settings.
|
||||
fn supports_mode(
|
||||
&self,
|
||||
optimizer_setting: ModeOptimizerSetting,
|
||||
pipeline: ModePipeline,
|
||||
) -> bool;
|
||||
/// Does the compiler support the provided mode and version settings.
|
||||
fn supports_mode(
|
||||
&self,
|
||||
optimizer_setting: ModeOptimizerSetting,
|
||||
pipeline: ModePipeline,
|
||||
) -> bool;
|
||||
}
|
||||
|
||||
/// The generic compilation input configuration.
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct CompilerInput {
|
||||
pub pipeline: Option<ModePipeline>,
|
||||
pub optimization: Option<ModeOptimizerSetting>,
|
||||
pub evm_version: Option<EVMVersion>,
|
||||
pub allow_paths: Vec<PathBuf>,
|
||||
pub base_path: Option<PathBuf>,
|
||||
pub sources: HashMap<PathBuf, String>,
|
||||
pub libraries: HashMap<PathBuf, HashMap<String, Address>>,
|
||||
pub revert_string_handling: Option<RevertString>,
|
||||
pub pipeline: Option<ModePipeline>,
|
||||
pub optimization: Option<ModeOptimizerSetting>,
|
||||
pub evm_version: Option<EVMVersion>,
|
||||
pub allow_paths: Vec<PathBuf>,
|
||||
pub base_path: Option<PathBuf>,
|
||||
pub sources: HashMap<PathBuf, String>,
|
||||
pub libraries: HashMap<PathBuf, HashMap<String, Address>>,
|
||||
pub revert_string_handling: Option<RevertString>,
|
||||
}
|
||||
|
||||
/// The generic compilation output configuration.
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct CompilerOutput {
|
||||
/// The compiled contracts. The bytecode of the contract is kept as a string in case linking is
|
||||
/// required and the compiled source has placeholders.
|
||||
pub contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
/// The compiled contracts. The bytecode of the contract is kept as a string in case linking is
|
||||
/// required and the compiled source has placeholders.
|
||||
pub contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
}
|
||||
|
||||
/// A generic builder style interface for configuring the supported compiler options.
|
||||
#[derive(Default)]
|
||||
pub struct Compiler {
|
||||
input: CompilerInput,
|
||||
input: CompilerInput,
|
||||
}
|
||||
|
||||
impl Compiler {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
input: CompilerInput {
|
||||
pipeline: Default::default(),
|
||||
optimization: Default::default(),
|
||||
evm_version: Default::default(),
|
||||
allow_paths: Default::default(),
|
||||
base_path: Default::default(),
|
||||
sources: Default::default(),
|
||||
libraries: Default::default(),
|
||||
revert_string_handling: Default::default(),
|
||||
},
|
||||
}
|
||||
}
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
input: CompilerInput {
|
||||
pipeline: Default::default(),
|
||||
optimization: Default::default(),
|
||||
evm_version: Default::default(),
|
||||
allow_paths: Default::default(),
|
||||
base_path: Default::default(),
|
||||
sources: Default::default(),
|
||||
libraries: Default::default(),
|
||||
revert_string_handling: Default::default(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_optimization(mut self, value: impl Into<Option<ModeOptimizerSetting>>) -> Self {
|
||||
self.input.optimization = value.into();
|
||||
self
|
||||
}
|
||||
pub fn with_optimization(mut self, value: impl Into<Option<ModeOptimizerSetting>>) -> Self {
|
||||
self.input.optimization = value.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_pipeline(mut self, value: impl Into<Option<ModePipeline>>) -> Self {
|
||||
self.input.pipeline = value.into();
|
||||
self
|
||||
}
|
||||
pub fn with_pipeline(mut self, value: impl Into<Option<ModePipeline>>) -> Self {
|
||||
self.input.pipeline = value.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_evm_version(mut self, version: impl Into<Option<EVMVersion>>) -> Self {
|
||||
self.input.evm_version = version.into();
|
||||
self
|
||||
}
|
||||
pub fn with_evm_version(mut self, version: impl Into<Option<EVMVersion>>) -> Self {
|
||||
self.input.evm_version = version.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_allow_path(mut self, path: impl AsRef<Path>) -> Self {
|
||||
self.input.allow_paths.push(path.as_ref().into());
|
||||
self
|
||||
}
|
||||
pub fn with_allow_path(mut self, path: impl AsRef<Path>) -> Self {
|
||||
self.input.allow_paths.push(path.as_ref().into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_base_path(mut self, path: impl Into<Option<PathBuf>>) -> Self {
|
||||
self.input.base_path = path.into();
|
||||
self
|
||||
}
|
||||
pub fn with_base_path(mut self, path: impl Into<Option<PathBuf>>) -> Self {
|
||||
self.input.base_path = path.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_source(mut self, path: impl AsRef<Path>) -> Result<Self> {
|
||||
self.input.sources.insert(
|
||||
path.as_ref().to_path_buf(),
|
||||
read_to_string(path.as_ref()).context("Failed to read the contract source")?,
|
||||
);
|
||||
Ok(self)
|
||||
}
|
||||
pub fn with_source(mut self, path: impl AsRef<Path>) -> Result<Self> {
|
||||
self.input.sources.insert(
|
||||
path.as_ref().to_path_buf(),
|
||||
read_to_string(path.as_ref()).context("Failed to read the contract source")?,
|
||||
);
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
pub fn with_library(
|
||||
mut self,
|
||||
path: impl AsRef<Path>,
|
||||
name: impl AsRef<str>,
|
||||
address: Address,
|
||||
) -> Self {
|
||||
self.input
|
||||
.libraries
|
||||
.entry(path.as_ref().to_path_buf())
|
||||
.or_default()
|
||||
.insert(name.as_ref().into(), address);
|
||||
self
|
||||
}
|
||||
pub fn with_library(
|
||||
mut self,
|
||||
path: impl AsRef<Path>,
|
||||
name: impl AsRef<str>,
|
||||
address: Address,
|
||||
) -> Self {
|
||||
self.input
|
||||
.libraries
|
||||
.entry(path.as_ref().to_path_buf())
|
||||
.or_default()
|
||||
.insert(name.as_ref().into(), address);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_revert_string_handling(
|
||||
mut self,
|
||||
revert_string_handling: impl Into<Option<RevertString>>,
|
||||
) -> Self {
|
||||
self.input.revert_string_handling = revert_string_handling.into();
|
||||
self
|
||||
}
|
||||
pub fn with_revert_string_handling(
|
||||
mut self,
|
||||
revert_string_handling: impl Into<Option<RevertString>>,
|
||||
) -> Self {
|
||||
self.input.revert_string_handling = revert_string_handling.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn then(self, callback: impl FnOnce(Self) -> Self) -> Self {
|
||||
callback(self)
|
||||
}
|
||||
pub fn then(self, callback: impl FnOnce(Self) -> Self) -> Self {
|
||||
callback(self)
|
||||
}
|
||||
|
||||
pub fn try_then<E>(self, callback: impl FnOnce(Self) -> Result<Self, E>) -> Result<Self, E> {
|
||||
callback(self)
|
||||
}
|
||||
pub fn try_then<E>(self, callback: impl FnOnce(Self) -> Result<Self, E>) -> Result<Self, E> {
|
||||
callback(self)
|
||||
}
|
||||
|
||||
pub async fn try_build(self, compiler: &dyn SolidityCompiler) -> Result<CompilerOutput> {
|
||||
compiler.build(self.input).await
|
||||
}
|
||||
pub async fn try_build(self, compiler: &dyn SolidityCompiler) -> Result<CompilerOutput> {
|
||||
compiler.build(self.input).await
|
||||
}
|
||||
|
||||
pub fn input(&self) -> &CompilerInput {
|
||||
&self.input
|
||||
}
|
||||
pub fn input(&self) -> &CompilerInput {
|
||||
&self.input
|
||||
}
|
||||
}
|
||||
|
||||
/// Defines how the compiler should handle revert strings.
|
||||
#[derive(
|
||||
Clone, Debug, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize,
|
||||
Clone, Debug, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize,
|
||||
)]
|
||||
pub enum RevertString {
|
||||
#[default]
|
||||
Default,
|
||||
Debug,
|
||||
Strip,
|
||||
VerboseDebug,
|
||||
#[default]
|
||||
Default,
|
||||
Debug,
|
||||
Strip,
|
||||
VerboseDebug,
|
||||
}
|
||||
|
||||
@@ -2,24 +2,24 @@
|
||||
//! compiling contracts to PolkaVM (PVM) bytecode.
|
||||
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
pin::Pin,
|
||||
process::Stdio,
|
||||
sync::{Arc, LazyLock},
|
||||
path::PathBuf,
|
||||
pin::Pin,
|
||||
process::Stdio,
|
||||
sync::{Arc, LazyLock},
|
||||
};
|
||||
|
||||
use dashmap::DashMap;
|
||||
use revive_dt_common::types::VersionOrRequirement;
|
||||
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
|
||||
use revive_solc_json_interface::{
|
||||
SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
|
||||
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection,
|
||||
SolcStandardJsonOutput,
|
||||
SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
|
||||
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection,
|
||||
SolcStandardJsonOutput,
|
||||
};
|
||||
use tracing::{Span, field::display};
|
||||
|
||||
use crate::{
|
||||
CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler, solc::Solc,
|
||||
CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler, solc::Solc,
|
||||
};
|
||||
|
||||
use alloy::json_abi::JsonAbi;
|
||||
@@ -33,55 +33,52 @@ pub struct Resolc(Arc<ResolcInner>);
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
struct ResolcInner {
|
||||
/// The internal solc compiler that the resolc compiler uses as a compiler frontend.
|
||||
solc: Solc,
|
||||
/// Path to the `resolc` executable
|
||||
resolc_path: PathBuf,
|
||||
/// The internal solc compiler that the resolc compiler uses as a compiler frontend.
|
||||
solc: Solc,
|
||||
/// Path to the `resolc` executable
|
||||
resolc_path: PathBuf,
|
||||
}
|
||||
|
||||
impl Resolc {
|
||||
pub async fn new(
|
||||
context: impl AsRef<SolcConfiguration>
|
||||
+ AsRef<ResolcConfiguration>
|
||||
+ AsRef<WorkingDirectoryConfiguration>,
|
||||
version: impl Into<Option<VersionOrRequirement>>,
|
||||
) -> Result<Self> {
|
||||
/// This is a cache of all of the resolc compiler objects. Since we do not currently support
|
||||
/// multiple resolc compiler versions, so our cache is just keyed by the solc compiler and
|
||||
/// its version to the resolc compiler.
|
||||
static COMPILERS_CACHE: LazyLock<DashMap<Solc, Resolc>> = LazyLock::new(Default::default);
|
||||
pub async fn new(
|
||||
context: impl AsRef<SolcConfiguration>
|
||||
+ AsRef<ResolcConfiguration>
|
||||
+ AsRef<WorkingDirectoryConfiguration>,
|
||||
version: impl Into<Option<VersionOrRequirement>>,
|
||||
) -> Result<Self> {
|
||||
/// This is a cache of all of the resolc compiler objects. Since we do not currently support
|
||||
/// multiple resolc compiler versions, so our cache is just keyed by the solc compiler and
|
||||
/// its version to the resolc compiler.
|
||||
static COMPILERS_CACHE: LazyLock<DashMap<Solc, Resolc>> = LazyLock::new(Default::default);
|
||||
|
||||
let resolc_configuration = AsRef::<ResolcConfiguration>::as_ref(&context);
|
||||
let resolc_configuration = AsRef::<ResolcConfiguration>::as_ref(&context);
|
||||
|
||||
let solc = Solc::new(&context, version)
|
||||
.await
|
||||
.context("Failed to create the solc compiler frontend for resolc")?;
|
||||
let solc = Solc::new(&context, version)
|
||||
.await
|
||||
.context("Failed to create the solc compiler frontend for resolc")?;
|
||||
|
||||
Ok(COMPILERS_CACHE
|
||||
.entry(solc.clone())
|
||||
.or_insert_with(|| {
|
||||
Self(Arc::new(ResolcInner {
|
||||
solc,
|
||||
resolc_path: resolc_configuration.path.clone(),
|
||||
}))
|
||||
})
|
||||
.clone())
|
||||
}
|
||||
Ok(COMPILERS_CACHE
|
||||
.entry(solc.clone())
|
||||
.or_insert_with(|| {
|
||||
Self(Arc::new(ResolcInner { solc, resolc_path: resolc_configuration.path.clone() }))
|
||||
})
|
||||
.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl SolidityCompiler for Resolc {
|
||||
fn version(&self) -> &Version {
|
||||
// We currently return the solc compiler version since we do not support multiple resolc
|
||||
// compiler versions.
|
||||
SolidityCompiler::version(&self.0.solc)
|
||||
}
|
||||
fn version(&self) -> &Version {
|
||||
// We currently return the solc compiler version since we do not support multiple resolc
|
||||
// compiler versions.
|
||||
SolidityCompiler::version(&self.0.solc)
|
||||
}
|
||||
|
||||
fn path(&self) -> &std::path::Path {
|
||||
&self.0.resolc_path
|
||||
}
|
||||
fn path(&self) -> &std::path::Path {
|
||||
&self.0.resolc_path
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", ret)]
|
||||
#[tracing::instrument(
|
||||
#[tracing::instrument(level = "debug", ret)]
|
||||
#[tracing::instrument(
|
||||
level = "error",
|
||||
skip_all,
|
||||
fields(
|
||||
@@ -91,221 +88,216 @@ impl SolidityCompiler for Resolc {
|
||||
),
|
||||
err(Debug)
|
||||
)]
|
||||
fn build(
|
||||
&self,
|
||||
CompilerInput {
|
||||
pipeline,
|
||||
optimization,
|
||||
evm_version,
|
||||
allow_paths,
|
||||
base_path,
|
||||
sources,
|
||||
libraries,
|
||||
// TODO: this is currently not being handled since there is no way to pass it into
|
||||
// resolc. So, we need to go back to this later once it's supported.
|
||||
revert_string_handling: _,
|
||||
}: CompilerInput,
|
||||
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>> {
|
||||
Box::pin(async move {
|
||||
if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) {
|
||||
anyhow::bail!(
|
||||
"Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}"
|
||||
);
|
||||
}
|
||||
fn build(
|
||||
&self,
|
||||
CompilerInput {
|
||||
pipeline,
|
||||
optimization,
|
||||
evm_version,
|
||||
allow_paths,
|
||||
base_path,
|
||||
sources,
|
||||
libraries,
|
||||
// TODO: this is currently not being handled since there is no way to pass it into
|
||||
// resolc. So, we need to go back to this later once it's supported.
|
||||
revert_string_handling: _,
|
||||
}: CompilerInput,
|
||||
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>> {
|
||||
Box::pin(async move {
|
||||
if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) {
|
||||
anyhow::bail!(
|
||||
"Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}"
|
||||
);
|
||||
}
|
||||
|
||||
let input = SolcStandardJsonInput {
|
||||
language: SolcStandardJsonInputLanguage::Solidity,
|
||||
sources: sources
|
||||
.into_iter()
|
||||
.map(|(path, source)| (path.display().to_string(), source.into()))
|
||||
.collect(),
|
||||
settings: SolcStandardJsonInputSettings {
|
||||
evm_version,
|
||||
libraries: Some(
|
||||
libraries
|
||||
.into_iter()
|
||||
.map(|(source_code, libraries_map)| {
|
||||
(
|
||||
source_code.display().to_string(),
|
||||
libraries_map
|
||||
.into_iter()
|
||||
.map(|(library_ident, library_address)| {
|
||||
(library_ident, library_address.to_string())
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
),
|
||||
remappings: None,
|
||||
output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()),
|
||||
via_ir: Some(true),
|
||||
optimizer: SolcStandardJsonInputSettingsOptimizer::new(
|
||||
optimization
|
||||
.unwrap_or(ModeOptimizerSetting::M0)
|
||||
.optimizations_enabled(),
|
||||
None,
|
||||
&Version::new(0, 0, 0),
|
||||
false,
|
||||
),
|
||||
metadata: None,
|
||||
polkavm: None,
|
||||
},
|
||||
};
|
||||
Span::current().record("json_in", display(serde_json::to_string(&input).unwrap()));
|
||||
let input = SolcStandardJsonInput {
|
||||
language: SolcStandardJsonInputLanguage::Solidity,
|
||||
sources: sources
|
||||
.into_iter()
|
||||
.map(|(path, source)| (path.display().to_string(), source.into()))
|
||||
.collect(),
|
||||
settings: SolcStandardJsonInputSettings {
|
||||
evm_version,
|
||||
libraries: Some(
|
||||
libraries
|
||||
.into_iter()
|
||||
.map(|(source_code, libraries_map)| {
|
||||
(
|
||||
source_code.display().to_string(),
|
||||
libraries_map
|
||||
.into_iter()
|
||||
.map(|(library_ident, library_address)| {
|
||||
(library_ident, library_address.to_string())
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
),
|
||||
remappings: None,
|
||||
output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()),
|
||||
via_ir: Some(true),
|
||||
optimizer: SolcStandardJsonInputSettingsOptimizer::new(
|
||||
optimization.unwrap_or(ModeOptimizerSetting::M0).optimizations_enabled(),
|
||||
None,
|
||||
&Version::new(0, 0, 0),
|
||||
false,
|
||||
),
|
||||
metadata: None,
|
||||
polkavm: None,
|
||||
},
|
||||
};
|
||||
Span::current().record("json_in", display(serde_json::to_string(&input).unwrap()));
|
||||
|
||||
let path = &self.0.resolc_path;
|
||||
let mut command = AsyncCommand::new(path);
|
||||
command
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.arg("--solc")
|
||||
.arg(self.0.solc.path())
|
||||
.arg("--standard-json");
|
||||
let path = &self.0.resolc_path;
|
||||
let mut command = AsyncCommand::new(path);
|
||||
command
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.arg("--solc")
|
||||
.arg(self.0.solc.path())
|
||||
.arg("--standard-json");
|
||||
|
||||
if let Some(ref base_path) = base_path {
|
||||
command.arg("--base-path").arg(base_path);
|
||||
}
|
||||
if !allow_paths.is_empty() {
|
||||
command.arg("--allow-paths").arg(
|
||||
allow_paths
|
||||
.iter()
|
||||
.map(|path| path.display().to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(","),
|
||||
);
|
||||
}
|
||||
let mut child = command
|
||||
.spawn()
|
||||
.with_context(|| format!("Failed to spawn resolc at {}", path.display()))?;
|
||||
if let Some(ref base_path) = base_path {
|
||||
command.arg("--base-path").arg(base_path);
|
||||
}
|
||||
if !allow_paths.is_empty() {
|
||||
command.arg("--allow-paths").arg(
|
||||
allow_paths
|
||||
.iter()
|
||||
.map(|path| path.display().to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(","),
|
||||
);
|
||||
}
|
||||
let mut child = command
|
||||
.spawn()
|
||||
.with_context(|| format!("Failed to spawn resolc at {}", path.display()))?;
|
||||
|
||||
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
|
||||
let serialized_input = serde_json::to_vec(&input)
|
||||
.context("Failed to serialize Standard JSON input for resolc")?;
|
||||
stdin_pipe
|
||||
.write_all(&serialized_input)
|
||||
.await
|
||||
.context("Failed to write Standard JSON to resolc stdin")?;
|
||||
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
|
||||
let serialized_input = serde_json::to_vec(&input)
|
||||
.context("Failed to serialize Standard JSON input for resolc")?;
|
||||
stdin_pipe
|
||||
.write_all(&serialized_input)
|
||||
.await
|
||||
.context("Failed to write Standard JSON to resolc stdin")?;
|
||||
|
||||
let output = child
|
||||
.wait_with_output()
|
||||
.await
|
||||
.context("Failed while waiting for resolc process to finish")?;
|
||||
let stdout = output.stdout;
|
||||
let stderr = output.stderr;
|
||||
let output = child
|
||||
.wait_with_output()
|
||||
.await
|
||||
.context("Failed while waiting for resolc process to finish")?;
|
||||
let stdout = output.stdout;
|
||||
let stderr = output.stderr;
|
||||
|
||||
if !output.status.success() {
|
||||
let json_in = serde_json::to_string_pretty(&input)
|
||||
.context("Failed to pretty-print Standard JSON input for logging")?;
|
||||
let message = String::from_utf8_lossy(&stderr);
|
||||
tracing::error!(
|
||||
status = %output.status,
|
||||
message = %message,
|
||||
json_input = json_in,
|
||||
"Compilation using resolc failed"
|
||||
);
|
||||
anyhow::bail!("Compilation failed with an error: {message}");
|
||||
}
|
||||
if !output.status.success() {
|
||||
let json_in = serde_json::to_string_pretty(&input)
|
||||
.context("Failed to pretty-print Standard JSON input for logging")?;
|
||||
let message = String::from_utf8_lossy(&stderr);
|
||||
tracing::error!(
|
||||
status = %output.status,
|
||||
message = %message,
|
||||
json_input = json_in,
|
||||
"Compilation using resolc failed"
|
||||
);
|
||||
anyhow::bail!("Compilation failed with an error: {message}");
|
||||
}
|
||||
|
||||
let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout)
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
||||
String::from_utf8_lossy(&stderr)
|
||||
)
|
||||
})
|
||||
.context("Failed to parse resolc standard JSON output")?;
|
||||
let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout)
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
||||
String::from_utf8_lossy(&stderr)
|
||||
)
|
||||
})
|
||||
.context("Failed to parse resolc standard JSON output")?;
|
||||
|
||||
tracing::debug!(
|
||||
output = %serde_json::to_string(&parsed).unwrap(),
|
||||
"Compiled successfully"
|
||||
);
|
||||
tracing::debug!(
|
||||
output = %serde_json::to_string(&parsed).unwrap(),
|
||||
"Compiled successfully"
|
||||
);
|
||||
|
||||
// Detecting if the compiler output contained errors and reporting them through logs and
|
||||
// errors instead of returning the compiler output that might contain errors.
|
||||
for error in parsed.errors.iter().flatten() {
|
||||
if error.severity == "error" {
|
||||
tracing::error!(
|
||||
?error,
|
||||
?input,
|
||||
output = %serde_json::to_string(&parsed).unwrap(),
|
||||
"Encountered an error in the compilation"
|
||||
);
|
||||
anyhow::bail!("Encountered an error in the compilation: {error}")
|
||||
}
|
||||
}
|
||||
// Detecting if the compiler output contained errors and reporting them through logs and
|
||||
// errors instead of returning the compiler output that might contain errors.
|
||||
for error in parsed.errors.iter().flatten() {
|
||||
if error.severity == "error" {
|
||||
tracing::error!(
|
||||
?error,
|
||||
?input,
|
||||
output = %serde_json::to_string(&parsed).unwrap(),
|
||||
"Encountered an error in the compilation"
|
||||
);
|
||||
anyhow::bail!("Encountered an error in the compilation: {error}")
|
||||
}
|
||||
}
|
||||
|
||||
let Some(contracts) = parsed.contracts else {
|
||||
anyhow::bail!("Unexpected error - resolc output doesn't have a contracts section");
|
||||
};
|
||||
let Some(contracts) = parsed.contracts else {
|
||||
anyhow::bail!("Unexpected error - resolc output doesn't have a contracts section");
|
||||
};
|
||||
|
||||
let mut compiler_output = CompilerOutput::default();
|
||||
for (source_path, contracts) in contracts.into_iter() {
|
||||
let src_for_msg = source_path.clone();
|
||||
let source_path = PathBuf::from(source_path)
|
||||
.canonicalize()
|
||||
.with_context(|| format!("Failed to canonicalize path {src_for_msg}"))?;
|
||||
let mut compiler_output = CompilerOutput::default();
|
||||
for (source_path, contracts) in contracts.into_iter() {
|
||||
let src_for_msg = source_path.clone();
|
||||
let source_path = PathBuf::from(source_path)
|
||||
.canonicalize()
|
||||
.with_context(|| format!("Failed to canonicalize path {src_for_msg}"))?;
|
||||
|
||||
let map = compiler_output.contracts.entry(source_path).or_default();
|
||||
for (contract_name, contract_information) in contracts.into_iter() {
|
||||
let bytecode = contract_information
|
||||
.evm
|
||||
.and_then(|evm| evm.bytecode.clone())
|
||||
.context("Unexpected - Contract compiled with resolc has no bytecode")?;
|
||||
let abi = {
|
||||
let metadata = contract_information
|
||||
.metadata
|
||||
.as_ref()
|
||||
.context("No metadata found for the contract")?;
|
||||
let solc_metadata_str = match metadata {
|
||||
serde_json::Value::String(solc_metadata_str) => {
|
||||
solc_metadata_str.as_str()
|
||||
}
|
||||
serde_json::Value::Object(metadata_object) => {
|
||||
let solc_metadata_value = metadata_object
|
||||
.get("solc_metadata")
|
||||
.context("Contract doesn't have a 'solc_metadata' field")?;
|
||||
solc_metadata_value
|
||||
.as_str()
|
||||
.context("The 'solc_metadata' field is not a string")?
|
||||
}
|
||||
serde_json::Value::Null
|
||||
| serde_json::Value::Bool(_)
|
||||
| serde_json::Value::Number(_)
|
||||
| serde_json::Value::Array(_) => {
|
||||
anyhow::bail!("Unsupported type of metadata {metadata:?}")
|
||||
}
|
||||
};
|
||||
let solc_metadata = serde_json::from_str::<serde_json::Value>(
|
||||
solc_metadata_str,
|
||||
)
|
||||
.context(
|
||||
"Failed to deserialize the solc_metadata as a serde_json generic value",
|
||||
)?;
|
||||
let output_value = solc_metadata
|
||||
.get("output")
|
||||
.context("solc_metadata doesn't have an output field")?;
|
||||
let abi_value = output_value
|
||||
.get("abi")
|
||||
.context("solc_metadata output doesn't contain an abi field")?;
|
||||
serde_json::from_value::<JsonAbi>(abi_value.clone())
|
||||
.context("ABI found in solc_metadata output is not valid ABI")?
|
||||
};
|
||||
map.insert(contract_name, (bytecode.object, abi));
|
||||
}
|
||||
}
|
||||
let map = compiler_output.contracts.entry(source_path).or_default();
|
||||
for (contract_name, contract_information) in contracts.into_iter() {
|
||||
let bytecode = contract_information
|
||||
.evm
|
||||
.and_then(|evm| evm.bytecode.clone())
|
||||
.context("Unexpected - Contract compiled with resolc has no bytecode")?;
|
||||
let abi = {
|
||||
let metadata = contract_information
|
||||
.metadata
|
||||
.as_ref()
|
||||
.context("No metadata found for the contract")?;
|
||||
let solc_metadata_str = match metadata {
|
||||
serde_json::Value::String(solc_metadata_str) =>
|
||||
solc_metadata_str.as_str(),
|
||||
serde_json::Value::Object(metadata_object) => {
|
||||
let solc_metadata_value = metadata_object
|
||||
.get("solc_metadata")
|
||||
.context("Contract doesn't have a 'solc_metadata' field")?;
|
||||
solc_metadata_value
|
||||
.as_str()
|
||||
.context("The 'solc_metadata' field is not a string")?
|
||||
},
|
||||
serde_json::Value::Null |
|
||||
serde_json::Value::Bool(_) |
|
||||
serde_json::Value::Number(_) |
|
||||
serde_json::Value::Array(_) => {
|
||||
anyhow::bail!("Unsupported type of metadata {metadata:?}")
|
||||
},
|
||||
};
|
||||
let solc_metadata =
|
||||
serde_json::from_str::<serde_json::Value>(solc_metadata_str).context(
|
||||
"Failed to deserialize the solc_metadata as a serde_json generic value",
|
||||
)?;
|
||||
let output_value = solc_metadata
|
||||
.get("output")
|
||||
.context("solc_metadata doesn't have an output field")?;
|
||||
let abi_value = output_value
|
||||
.get("abi")
|
||||
.context("solc_metadata output doesn't contain an abi field")?;
|
||||
serde_json::from_value::<JsonAbi>(abi_value.clone())
|
||||
.context("ABI found in solc_metadata output is not valid ABI")?
|
||||
};
|
||||
map.insert(contract_name, (bytecode.object, abi));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(compiler_output)
|
||||
})
|
||||
}
|
||||
Ok(compiler_output)
|
||||
})
|
||||
}
|
||||
|
||||
fn supports_mode(
|
||||
&self,
|
||||
optimize_setting: ModeOptimizerSetting,
|
||||
pipeline: ModePipeline,
|
||||
) -> bool {
|
||||
pipeline == ModePipeline::ViaYulIR
|
||||
&& SolidityCompiler::supports_mode(&self.0.solc, optimize_setting, pipeline)
|
||||
}
|
||||
fn supports_mode(
|
||||
&self,
|
||||
optimize_setting: ModeOptimizerSetting,
|
||||
pipeline: ModePipeline,
|
||||
) -> bool {
|
||||
pipeline == ModePipeline::ViaYulIR &&
|
||||
SolidityCompiler::supports_mode(&self.0.solc, optimize_setting, pipeline)
|
||||
}
|
||||
}
|
||||
|
||||
+234
-242
@@ -2,10 +2,10 @@
|
||||
//! compiling contracts to EVM bytecode.
|
||||
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
pin::Pin,
|
||||
process::Stdio,
|
||||
sync::{Arc, LazyLock},
|
||||
path::PathBuf,
|
||||
pin::Pin,
|
||||
process::Stdio,
|
||||
sync::{Arc, LazyLock},
|
||||
};
|
||||
|
||||
use dashmap::DashMap;
|
||||
@@ -18,11 +18,10 @@ use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, S
|
||||
|
||||
use anyhow::{Context as _, Result};
|
||||
use foundry_compilers_artifacts::{
|
||||
output_selection::{
|
||||
BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection,
|
||||
},
|
||||
solc::CompilerOutput as SolcOutput,
|
||||
solc::*,
|
||||
output_selection::{
|
||||
BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection,
|
||||
},
|
||||
solc::{CompilerOutput as SolcOutput, *},
|
||||
};
|
||||
use semver::Version;
|
||||
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
|
||||
@@ -32,268 +31,261 @@ pub struct Solc(Arc<SolcInner>);
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
struct SolcInner {
|
||||
/// The path of the solidity compiler executable that this object uses.
|
||||
solc_path: PathBuf,
|
||||
/// The version of the solidity compiler executable that this object uses.
|
||||
solc_version: Version,
|
||||
/// The path of the solidity compiler executable that this object uses.
|
||||
solc_path: PathBuf,
|
||||
/// The version of the solidity compiler executable that this object uses.
|
||||
solc_version: Version,
|
||||
}
|
||||
|
||||
impl Solc {
|
||||
pub async fn new(
|
||||
context: impl AsRef<SolcConfiguration> + AsRef<WorkingDirectoryConfiguration>,
|
||||
version: impl Into<Option<VersionOrRequirement>>,
|
||||
) -> Result<Self> {
|
||||
// This is a cache for the compiler objects so that whenever the same compiler version is
|
||||
// requested the same object is returned. We do this as we do not want to keep cloning the
|
||||
// compiler around.
|
||||
static COMPILERS_CACHE: LazyLock<DashMap<(PathBuf, Version), Solc>> =
|
||||
LazyLock::new(Default::default);
|
||||
pub async fn new(
|
||||
context: impl AsRef<SolcConfiguration> + AsRef<WorkingDirectoryConfiguration>,
|
||||
version: impl Into<Option<VersionOrRequirement>>,
|
||||
) -> Result<Self> {
|
||||
// This is a cache for the compiler objects so that whenever the same compiler version is
|
||||
// requested the same object is returned. We do this as we do not want to keep cloning the
|
||||
// compiler around.
|
||||
static COMPILERS_CACHE: LazyLock<DashMap<(PathBuf, Version), Solc>> =
|
||||
LazyLock::new(Default::default);
|
||||
|
||||
let working_directory_configuration =
|
||||
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
||||
let solc_configuration = AsRef::<SolcConfiguration>::as_ref(&context);
|
||||
let working_directory_configuration =
|
||||
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
||||
let solc_configuration = AsRef::<SolcConfiguration>::as_ref(&context);
|
||||
|
||||
// We attempt to download the solc binary. Note the following: this call does the version
|
||||
// resolution for us. Therefore, even if the download didn't proceed, this function will
|
||||
// resolve the version requirement into a canonical version of the compiler. It's then up
|
||||
// to us to either use the provided path or not.
|
||||
let version = version
|
||||
.into()
|
||||
.unwrap_or_else(|| solc_configuration.version.clone().into());
|
||||
let (version, path) =
|
||||
download_solc(working_directory_configuration.as_path(), version, false)
|
||||
.await
|
||||
.context("Failed to download/get path to solc binary")?;
|
||||
// We attempt to download the solc binary. Note the following: this call does the version
|
||||
// resolution for us. Therefore, even if the download didn't proceed, this function will
|
||||
// resolve the version requirement into a canonical version of the compiler. It's then up
|
||||
// to us to either use the provided path or not.
|
||||
let version = version.into().unwrap_or_else(|| solc_configuration.version.clone().into());
|
||||
let (version, path) =
|
||||
download_solc(working_directory_configuration.as_path(), version, false)
|
||||
.await
|
||||
.context("Failed to download/get path to solc binary")?;
|
||||
|
||||
Ok(COMPILERS_CACHE
|
||||
.entry((path.clone(), version.clone()))
|
||||
.or_insert_with(|| {
|
||||
info!(
|
||||
solc_path = %path.display(),
|
||||
solc_version = %version,
|
||||
"Created a new solc compiler object"
|
||||
);
|
||||
Self(Arc::new(SolcInner {
|
||||
solc_path: path,
|
||||
solc_version: version,
|
||||
}))
|
||||
})
|
||||
.clone())
|
||||
}
|
||||
Ok(COMPILERS_CACHE
|
||||
.entry((path.clone(), version.clone()))
|
||||
.or_insert_with(|| {
|
||||
info!(
|
||||
solc_path = %path.display(),
|
||||
solc_version = %version,
|
||||
"Created a new solc compiler object"
|
||||
);
|
||||
Self(Arc::new(SolcInner { solc_path: path, solc_version: version }))
|
||||
})
|
||||
.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl SolidityCompiler for Solc {
|
||||
fn version(&self) -> &Version {
|
||||
&self.0.solc_version
|
||||
}
|
||||
fn version(&self) -> &Version {
|
||||
&self.0.solc_version
|
||||
}
|
||||
|
||||
fn path(&self) -> &std::path::Path {
|
||||
&self.0.solc_path
|
||||
}
|
||||
fn path(&self) -> &std::path::Path {
|
||||
&self.0.solc_path
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", ret)]
|
||||
#[tracing::instrument(
|
||||
#[tracing::instrument(level = "debug", ret)]
|
||||
#[tracing::instrument(
|
||||
level = "error",
|
||||
skip_all,
|
||||
fields(json_in = tracing::field::Empty),
|
||||
err(Debug)
|
||||
)]
|
||||
fn build(
|
||||
&self,
|
||||
CompilerInput {
|
||||
pipeline,
|
||||
optimization,
|
||||
evm_version,
|
||||
allow_paths,
|
||||
base_path,
|
||||
sources,
|
||||
libraries,
|
||||
revert_string_handling,
|
||||
}: CompilerInput,
|
||||
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>> {
|
||||
Box::pin(async move {
|
||||
// Be careful to entirely omit the viaIR field if the compiler does not support it,
|
||||
// as it will error if you provide fields it does not know about. Because
|
||||
// `supports_mode` is called prior to instantiating a compiler, we should never
|
||||
// ask for something which is invalid.
|
||||
let via_ir = match (pipeline, self.compiler_supports_yul()) {
|
||||
(pipeline, true) => pipeline.map(|p| p.via_yul_ir()),
|
||||
(_pipeline, false) => None,
|
||||
};
|
||||
fn build(
|
||||
&self,
|
||||
CompilerInput {
|
||||
pipeline,
|
||||
optimization,
|
||||
evm_version,
|
||||
allow_paths,
|
||||
base_path,
|
||||
sources,
|
||||
libraries,
|
||||
revert_string_handling,
|
||||
}: CompilerInput,
|
||||
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>> {
|
||||
Box::pin(async move {
|
||||
// Be careful to entirely omit the viaIR field if the compiler does not support it,
|
||||
// as it will error if you provide fields it does not know about. Because
|
||||
// `supports_mode` is called prior to instantiating a compiler, we should never
|
||||
// ask for something which is invalid.
|
||||
let via_ir = match (pipeline, self.compiler_supports_yul()) {
|
||||
(pipeline, true) => pipeline.map(|p| p.via_yul_ir()),
|
||||
(_pipeline, false) => None,
|
||||
};
|
||||
|
||||
let input = SolcInput {
|
||||
language: SolcLanguage::Solidity,
|
||||
sources: Sources(
|
||||
sources
|
||||
.into_iter()
|
||||
.map(|(source_path, source_code)| (source_path, Source::new(source_code)))
|
||||
.collect(),
|
||||
),
|
||||
settings: Settings {
|
||||
optimizer: Optimizer {
|
||||
enabled: optimization.map(|o| o.optimizations_enabled()),
|
||||
details: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
output_selection: OutputSelection::common_output_selection(
|
||||
[
|
||||
ContractOutputSelection::Abi,
|
||||
ContractOutputSelection::Evm(EvmOutputSelection::ByteCode(
|
||||
BytecodeOutputSelection::Object,
|
||||
)),
|
||||
]
|
||||
.into_iter()
|
||||
.map(|item| item.to_string()),
|
||||
),
|
||||
evm_version: evm_version.map(|version| version.to_string().parse().unwrap()),
|
||||
via_ir,
|
||||
libraries: Libraries {
|
||||
libs: libraries
|
||||
.into_iter()
|
||||
.map(|(file_path, libraries)| {
|
||||
(
|
||||
file_path,
|
||||
libraries
|
||||
.into_iter()
|
||||
.map(|(library_name, library_address)| {
|
||||
(library_name, library_address.to_string())
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
},
|
||||
debug: revert_string_handling.map(|revert_string_handling| DebuggingSettings {
|
||||
revert_strings: match revert_string_handling {
|
||||
crate::RevertString::Default => Some(RevertStrings::Default),
|
||||
crate::RevertString::Debug => Some(RevertStrings::Debug),
|
||||
crate::RevertString::Strip => Some(RevertStrings::Strip),
|
||||
crate::RevertString::VerboseDebug => Some(RevertStrings::VerboseDebug),
|
||||
},
|
||||
debug_info: Default::default(),
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
let input = SolcInput {
|
||||
language: SolcLanguage::Solidity,
|
||||
sources: Sources(
|
||||
sources
|
||||
.into_iter()
|
||||
.map(|(source_path, source_code)| (source_path, Source::new(source_code)))
|
||||
.collect(),
|
||||
),
|
||||
settings: Settings {
|
||||
optimizer: Optimizer {
|
||||
enabled: optimization.map(|o| o.optimizations_enabled()),
|
||||
details: Some(Default::default()),
|
||||
..Default::default()
|
||||
},
|
||||
output_selection: OutputSelection::common_output_selection(
|
||||
[
|
||||
ContractOutputSelection::Abi,
|
||||
ContractOutputSelection::Evm(EvmOutputSelection::ByteCode(
|
||||
BytecodeOutputSelection::Object,
|
||||
)),
|
||||
]
|
||||
.into_iter()
|
||||
.map(|item| item.to_string()),
|
||||
),
|
||||
evm_version: evm_version.map(|version| version.to_string().parse().unwrap()),
|
||||
via_ir,
|
||||
libraries: Libraries {
|
||||
libs: libraries
|
||||
.into_iter()
|
||||
.map(|(file_path, libraries)| {
|
||||
(
|
||||
file_path,
|
||||
libraries
|
||||
.into_iter()
|
||||
.map(|(library_name, library_address)| {
|
||||
(library_name, library_address.to_string())
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
},
|
||||
debug: revert_string_handling.map(|revert_string_handling| DebuggingSettings {
|
||||
revert_strings: match revert_string_handling {
|
||||
crate::RevertString::Default => Some(RevertStrings::Default),
|
||||
crate::RevertString::Debug => Some(RevertStrings::Debug),
|
||||
crate::RevertString::Strip => Some(RevertStrings::Strip),
|
||||
crate::RevertString::VerboseDebug => Some(RevertStrings::VerboseDebug),
|
||||
},
|
||||
debug_info: Default::default(),
|
||||
}),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
|
||||
Span::current().record("json_in", display(serde_json::to_string(&input).unwrap()));
|
||||
Span::current().record("json_in", display(serde_json::to_string(&input).unwrap()));
|
||||
|
||||
let path = &self.0.solc_path;
|
||||
let mut command = AsyncCommand::new(path);
|
||||
command
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::null())
|
||||
.arg("--standard-json");
|
||||
let path = &self.0.solc_path;
|
||||
let mut command = AsyncCommand::new(path);
|
||||
command
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::null())
|
||||
.arg("--standard-json");
|
||||
|
||||
if let Some(ref base_path) = base_path {
|
||||
command.arg("--base-path").arg(base_path);
|
||||
}
|
||||
if !allow_paths.is_empty() {
|
||||
command.arg("--allow-paths").arg(
|
||||
allow_paths
|
||||
.iter()
|
||||
.map(|path| path.display().to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(","),
|
||||
);
|
||||
}
|
||||
let mut child = command
|
||||
.spawn()
|
||||
.with_context(|| format!("Failed to spawn solc at {}", path.display()))?;
|
||||
if let Some(ref base_path) = base_path {
|
||||
command.arg("--base-path").arg(base_path);
|
||||
}
|
||||
if !allow_paths.is_empty() {
|
||||
command.arg("--allow-paths").arg(
|
||||
allow_paths
|
||||
.iter()
|
||||
.map(|path| path.display().to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(","),
|
||||
);
|
||||
}
|
||||
let mut child = command
|
||||
.spawn()
|
||||
.with_context(|| format!("Failed to spawn solc at {}", path.display()))?;
|
||||
|
||||
let stdin = child.stdin.as_mut().expect("should be piped");
|
||||
let serialized_input = serde_json::to_vec(&input)
|
||||
.context("Failed to serialize Standard JSON input for solc")?;
|
||||
stdin
|
||||
.write_all(&serialized_input)
|
||||
.await
|
||||
.context("Failed to write Standard JSON to solc stdin")?;
|
||||
let output = child
|
||||
.wait_with_output()
|
||||
.await
|
||||
.context("Failed while waiting for solc process to finish")?;
|
||||
let stdin = child.stdin.as_mut().expect("should be piped");
|
||||
let serialized_input = serde_json::to_vec(&input)
|
||||
.context("Failed to serialize Standard JSON input for solc")?;
|
||||
stdin
|
||||
.write_all(&serialized_input)
|
||||
.await
|
||||
.context("Failed to write Standard JSON to solc stdin")?;
|
||||
let output = child
|
||||
.wait_with_output()
|
||||
.await
|
||||
.context("Failed while waiting for solc process to finish")?;
|
||||
|
||||
if !output.status.success() {
|
||||
let json_in = serde_json::to_string_pretty(&input)
|
||||
.context("Failed to pretty-print Standard JSON input for logging")?;
|
||||
tracing::error!(
|
||||
status = %output.status,
|
||||
json_input = json_in,
|
||||
"Compilation using solc failed"
|
||||
);
|
||||
anyhow::bail!("Compilation failed");
|
||||
}
|
||||
if !output.status.success() {
|
||||
let json_in = serde_json::to_string_pretty(&input)
|
||||
.context("Failed to pretty-print Standard JSON input for logging")?;
|
||||
tracing::error!(
|
||||
status = %output.status,
|
||||
json_input = json_in,
|
||||
"Compilation using solc failed"
|
||||
);
|
||||
anyhow::bail!("Compilation failed");
|
||||
}
|
||||
|
||||
let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout)
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"failed to parse resolc JSON output: {e}\nstdout: {}",
|
||||
String::from_utf8_lossy(&output.stdout)
|
||||
)
|
||||
})
|
||||
.context("Failed to parse solc standard JSON output")?;
|
||||
let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout)
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"failed to parse resolc JSON output: {e}\nstdout: {}",
|
||||
String::from_utf8_lossy(&output.stdout)
|
||||
)
|
||||
})
|
||||
.context("Failed to parse solc standard JSON output")?;
|
||||
|
||||
// Detecting if the compiler output contained errors and reporting them through logs and
|
||||
// errors instead of returning the compiler output that might contain errors.
|
||||
for error in parsed.errors.iter() {
|
||||
if error.severity == Severity::Error {
|
||||
tracing::error!(?error, ?input, "Encountered an error in the compilation");
|
||||
anyhow::bail!("Encountered an error in the compilation: {error}")
|
||||
}
|
||||
}
|
||||
// Detecting if the compiler output contained errors and reporting them through logs and
|
||||
// errors instead of returning the compiler output that might contain errors.
|
||||
for error in parsed.errors.iter() {
|
||||
if error.severity == Severity::Error {
|
||||
tracing::error!(?error, ?input, "Encountered an error in the compilation");
|
||||
anyhow::bail!("Encountered an error in the compilation: {error}")
|
||||
}
|
||||
}
|
||||
|
||||
tracing::debug!(
|
||||
output = %String::from_utf8_lossy(&output.stdout).to_string(),
|
||||
"Compiled successfully"
|
||||
);
|
||||
tracing::debug!(
|
||||
output = %String::from_utf8_lossy(&output.stdout).to_string(),
|
||||
"Compiled successfully"
|
||||
);
|
||||
|
||||
let mut compiler_output = CompilerOutput::default();
|
||||
for (contract_path, contracts) in parsed.contracts {
|
||||
let map = compiler_output
|
||||
.contracts
|
||||
.entry(contract_path.canonicalize().with_context(|| {
|
||||
format!(
|
||||
"Failed to canonicalize contract path {}",
|
||||
contract_path.display()
|
||||
)
|
||||
})?)
|
||||
.or_default();
|
||||
for (contract_name, contract_info) in contracts.into_iter() {
|
||||
let source_code = contract_info
|
||||
.evm
|
||||
.and_then(|evm| evm.bytecode)
|
||||
.map(|bytecode| match bytecode.object {
|
||||
BytecodeObject::Bytecode(bytecode) => bytecode.to_string(),
|
||||
BytecodeObject::Unlinked(unlinked) => unlinked,
|
||||
})
|
||||
.context("Unexpected - contract compiled with solc has no source code")?;
|
||||
let abi = contract_info
|
||||
.abi
|
||||
.context("Unexpected - contract compiled with solc as no ABI")?;
|
||||
map.insert(contract_name, (source_code, abi));
|
||||
}
|
||||
}
|
||||
let mut compiler_output = CompilerOutput::default();
|
||||
for (contract_path, contracts) in parsed.contracts {
|
||||
let map = compiler_output
|
||||
.contracts
|
||||
.entry(contract_path.canonicalize().with_context(|| {
|
||||
format!("Failed to canonicalize contract path {}", contract_path.display())
|
||||
})?)
|
||||
.or_default();
|
||||
for (contract_name, contract_info) in contracts.into_iter() {
|
||||
let source_code = contract_info
|
||||
.evm
|
||||
.and_then(|evm| evm.bytecode)
|
||||
.map(|bytecode| match bytecode.object {
|
||||
BytecodeObject::Bytecode(bytecode) => bytecode.to_string(),
|
||||
BytecodeObject::Unlinked(unlinked) => unlinked,
|
||||
})
|
||||
.context("Unexpected - contract compiled with solc has no source code")?;
|
||||
let abi = contract_info
|
||||
.abi
|
||||
.context("Unexpected - contract compiled with solc as no ABI")?;
|
||||
map.insert(contract_name, (source_code, abi));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(compiler_output)
|
||||
})
|
||||
}
|
||||
Ok(compiler_output)
|
||||
})
|
||||
}
|
||||
|
||||
fn supports_mode(
|
||||
&self,
|
||||
_optimize_setting: ModeOptimizerSetting,
|
||||
pipeline: ModePipeline,
|
||||
) -> bool {
|
||||
// solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support mode E
|
||||
// (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler is new enough.
|
||||
pipeline == ModePipeline::ViaEVMAssembly
|
||||
|| (pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul())
|
||||
}
|
||||
fn supports_mode(
|
||||
&self,
|
||||
_optimize_setting: ModeOptimizerSetting,
|
||||
pipeline: ModePipeline,
|
||||
) -> bool {
|
||||
// solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support
|
||||
// mode E (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler
|
||||
// is new enough.
|
||||
pipeline == ModePipeline::ViaEVMAssembly ||
|
||||
(pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul())
|
||||
}
|
||||
}
|
||||
|
||||
impl Solc {
|
||||
fn compiler_supports_yul(&self) -> bool {
|
||||
const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13);
|
||||
SolidityCompiler::version(self) >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR
|
||||
}
|
||||
fn compiler_supports_yul(&self) -> bool {
|
||||
const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13);
|
||||
SolidityCompiler::version(self) >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,82 +7,82 @@ use semver::Version;
|
||||
|
||||
#[tokio::test]
|
||||
async fn contracts_can_be_compiled_with_solc() {
|
||||
// Arrange
|
||||
let args = TestExecutionContext::default();
|
||||
let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
|
||||
.await
|
||||
.unwrap();
|
||||
// Arrange
|
||||
let args = TestExecutionContext::default();
|
||||
let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Act
|
||||
let output = Compiler::new()
|
||||
.with_source("./tests/assets/array_one_element/callable.sol")
|
||||
.unwrap()
|
||||
.with_source("./tests/assets/array_one_element/main.sol")
|
||||
.unwrap()
|
||||
.try_build(&solc)
|
||||
.await;
|
||||
// Act
|
||||
let output = Compiler::new()
|
||||
.with_source("./tests/assets/array_one_element/callable.sol")
|
||||
.unwrap()
|
||||
.with_source("./tests/assets/array_one_element/main.sol")
|
||||
.unwrap()
|
||||
.try_build(&solc)
|
||||
.await;
|
||||
|
||||
// Assert
|
||||
let output = output.expect("Failed to compile");
|
||||
assert_eq!(output.contracts.len(), 2);
|
||||
// Assert
|
||||
let output = output.expect("Failed to compile");
|
||||
assert_eq!(output.contracts.len(), 2);
|
||||
|
||||
let main_file_contracts = output
|
||||
.contracts
|
||||
.get(
|
||||
&PathBuf::from("./tests/assets/array_one_element/main.sol")
|
||||
.canonicalize()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let callable_file_contracts = output
|
||||
.contracts
|
||||
.get(
|
||||
&PathBuf::from("./tests/assets/array_one_element/callable.sol")
|
||||
.canonicalize()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
assert!(main_file_contracts.contains_key("Main"));
|
||||
assert!(callable_file_contracts.contains_key("Callable"));
|
||||
let main_file_contracts = output
|
||||
.contracts
|
||||
.get(
|
||||
&PathBuf::from("./tests/assets/array_one_element/main.sol")
|
||||
.canonicalize()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let callable_file_contracts = output
|
||||
.contracts
|
||||
.get(
|
||||
&PathBuf::from("./tests/assets/array_one_element/callable.sol")
|
||||
.canonicalize()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
assert!(main_file_contracts.contains_key("Main"));
|
||||
assert!(callable_file_contracts.contains_key("Callable"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn contracts_can_be_compiled_with_resolc() {
|
||||
// Arrange
|
||||
let args = TestExecutionContext::default();
|
||||
let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
|
||||
.await
|
||||
.unwrap();
|
||||
// Arrange
|
||||
let args = TestExecutionContext::default();
|
||||
let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Act
|
||||
let output = Compiler::new()
|
||||
.with_source("./tests/assets/array_one_element/callable.sol")
|
||||
.unwrap()
|
||||
.with_source("./tests/assets/array_one_element/main.sol")
|
||||
.unwrap()
|
||||
.try_build(&resolc)
|
||||
.await;
|
||||
// Act
|
||||
let output = Compiler::new()
|
||||
.with_source("./tests/assets/array_one_element/callable.sol")
|
||||
.unwrap()
|
||||
.with_source("./tests/assets/array_one_element/main.sol")
|
||||
.unwrap()
|
||||
.try_build(&resolc)
|
||||
.await;
|
||||
|
||||
// Assert
|
||||
let output = output.expect("Failed to compile");
|
||||
assert_eq!(output.contracts.len(), 2);
|
||||
// Assert
|
||||
let output = output.expect("Failed to compile");
|
||||
assert_eq!(output.contracts.len(), 2);
|
||||
|
||||
let main_file_contracts = output
|
||||
.contracts
|
||||
.get(
|
||||
&PathBuf::from("./tests/assets/array_one_element/main.sol")
|
||||
.canonicalize()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let callable_file_contracts = output
|
||||
.contracts
|
||||
.get(
|
||||
&PathBuf::from("./tests/assets/array_one_element/callable.sol")
|
||||
.canonicalize()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
assert!(main_file_contracts.contains_key("Main"));
|
||||
assert!(callable_file_contracts.contains_key("Callable"));
|
||||
let main_file_contracts = output
|
||||
.contracts
|
||||
.get(
|
||||
&PathBuf::from("./tests/assets/array_one_element/main.sol")
|
||||
.canonicalize()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let callable_file_contracts = output
|
||||
.contracts
|
||||
.get(
|
||||
&PathBuf::from("./tests/assets/array_one_element/callable.sol")
|
||||
.canonicalize()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
assert!(main_file_contracts.contains_key("Main"));
|
||||
assert!(callable_file_contracts.contains_key("Callable"));
|
||||
}
|
||||
|
||||
+555
-568
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -14,164 +14,157 @@ use revive_dt_config::{BenchmarkingContext, Context};
|
||||
use revive_dt_report::Reporter;
|
||||
|
||||
use crate::{
|
||||
differential_benchmarks::{Driver, Watcher, WatcherEvent},
|
||||
helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream},
|
||||
differential_benchmarks::{Driver, Watcher, WatcherEvent},
|
||||
helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream},
|
||||
};
|
||||
|
||||
/// Handles the differential testing executing it according to the information defined in the
|
||||
/// context
|
||||
#[instrument(level = "info", err(Debug), skip_all)]
|
||||
pub async fn handle_differential_benchmarks(
|
||||
mut context: BenchmarkingContext,
|
||||
reporter: Reporter,
|
||||
mut context: BenchmarkingContext,
|
||||
reporter: Reporter,
|
||||
) -> anyhow::Result<()> {
|
||||
// A bit of a hack but we need to override the number of nodes specified through the CLI since
|
||||
// benchmarks can only be run on a single node. Perhaps in the future we'd have a cleaner way to
|
||||
// do this. But, for the time being, we need to override the cli arguments.
|
||||
if context.concurrency_configuration.number_of_nodes != 1 {
|
||||
warn!(
|
||||
specified_number_of_nodes = context.concurrency_configuration.number_of_nodes,
|
||||
updated_number_of_nodes = 1,
|
||||
"Invalid number of nodes specified through the CLI. Benchmarks can only be run on a single node. Updated the arguments."
|
||||
);
|
||||
context.concurrency_configuration.number_of_nodes = 1;
|
||||
};
|
||||
let full_context = Context::Benchmark(Box::new(context.clone()));
|
||||
// A bit of a hack but we need to override the number of nodes specified through the CLI since
|
||||
// benchmarks can only be run on a single node. Perhaps in the future we'd have a cleaner way to
|
||||
// do this. But, for the time being, we need to override the cli arguments.
|
||||
if context.concurrency_configuration.number_of_nodes != 1 {
|
||||
warn!(
|
||||
specified_number_of_nodes = context.concurrency_configuration.number_of_nodes,
|
||||
updated_number_of_nodes = 1,
|
||||
"Invalid number of nodes specified through the CLI. Benchmarks can only be run on a single node. Updated the arguments."
|
||||
);
|
||||
context.concurrency_configuration.number_of_nodes = 1;
|
||||
};
|
||||
let full_context = Context::Benchmark(Box::new(context.clone()));
|
||||
|
||||
// Discover all of the metadata files that are defined in the context.
|
||||
let metadata_files = collect_metadata_files(&context)
|
||||
.context("Failed to collect metadata files for differential testing")?;
|
||||
info!(len = metadata_files.len(), "Discovered metadata files");
|
||||
// Discover all of the metadata files that are defined in the context.
|
||||
let metadata_files = collect_metadata_files(&context)
|
||||
.context("Failed to collect metadata files for differential testing")?;
|
||||
info!(len = metadata_files.len(), "Discovered metadata files");
|
||||
|
||||
// Discover the list of platforms that the tests should run on based on the context.
|
||||
let platforms = context
|
||||
.platforms
|
||||
.iter()
|
||||
.copied()
|
||||
.map(Into::<&dyn Platform>::into)
|
||||
.collect::<Vec<_>>();
|
||||
// Discover the list of platforms that the tests should run on based on the context.
|
||||
let platforms = context
|
||||
.platforms
|
||||
.iter()
|
||||
.copied()
|
||||
.map(Into::<&dyn Platform>::into)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Starting the nodes of the various platforms specified in the context. Note that we use the
|
||||
// node pool since it contains all of the code needed to spawn nodes from A to Z and therefore
|
||||
// it's the preferred way for us to start nodes even when we're starting just a single node. The
|
||||
// added overhead from it is quite small (performance wise) since it's involved only when we're
|
||||
// creating the test definitions, but it might have other maintenance overhead as it obscures
|
||||
// the fact that only a single node is spawned.
|
||||
let platforms_and_nodes = {
|
||||
let mut map = BTreeMap::new();
|
||||
// Starting the nodes of the various platforms specified in the context. Note that we use the
|
||||
// node pool since it contains all of the code needed to spawn nodes from A to Z and therefore
|
||||
// it's the preferred way for us to start nodes even when we're starting just a single node. The
|
||||
// added overhead from it is quite small (performance wise) since it's involved only when we're
|
||||
// creating the test definitions, but it might have other maintenance overhead as it obscures
|
||||
// the fact that only a single node is spawned.
|
||||
let platforms_and_nodes = {
|
||||
let mut map = BTreeMap::new();
|
||||
|
||||
for platform in platforms.iter() {
|
||||
let platform_identifier = platform.platform_identifier();
|
||||
for platform in platforms.iter() {
|
||||
let platform_identifier = platform.platform_identifier();
|
||||
|
||||
let node_pool = NodePool::new(full_context.clone(), *platform)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
%platform_identifier,
|
||||
"Failed to initialize the node pool for the platform."
|
||||
)
|
||||
})
|
||||
.context("Failed to initialize the node pool")?;
|
||||
let node_pool = NodePool::new(full_context.clone(), *platform)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
%platform_identifier,
|
||||
"Failed to initialize the node pool for the platform."
|
||||
)
|
||||
})
|
||||
.context("Failed to initialize the node pool")?;
|
||||
|
||||
map.insert(platform_identifier, (*platform, node_pool));
|
||||
}
|
||||
map.insert(platform_identifier, (*platform, node_pool));
|
||||
}
|
||||
|
||||
map
|
||||
};
|
||||
info!("Spawned the platform nodes");
|
||||
map
|
||||
};
|
||||
info!("Spawned the platform nodes");
|
||||
|
||||
// Preparing test definitions for the execution.
|
||||
let test_definitions = create_test_definitions_stream(
|
||||
&full_context,
|
||||
metadata_files.iter(),
|
||||
&platforms_and_nodes,
|
||||
reporter.clone(),
|
||||
)
|
||||
.await
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
info!(len = test_definitions.len(), "Created test definitions");
|
||||
// Preparing test definitions for the execution.
|
||||
let test_definitions = create_test_definitions_stream(
|
||||
&full_context,
|
||||
metadata_files.iter(),
|
||||
&platforms_and_nodes,
|
||||
reporter.clone(),
|
||||
)
|
||||
.await
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
info!(len = test_definitions.len(), "Created test definitions");
|
||||
|
||||
// Creating the objects that will be shared between the various runs. The cached compiler is the
|
||||
// only one at the current moment of time that's safe to share between runs.
|
||||
let cached_compiler = CachedCompiler::new(
|
||||
context
|
||||
.working_directory
|
||||
.as_path()
|
||||
.join("compilation_cache"),
|
||||
context
|
||||
.compilation_configuration
|
||||
.invalidate_compilation_cache,
|
||||
)
|
||||
.await
|
||||
.map(Arc::new)
|
||||
.context("Failed to initialize cached compiler")?;
|
||||
// Creating the objects that will be shared between the various runs. The cached compiler is the
|
||||
// only one at the current moment of time that's safe to share between runs.
|
||||
let cached_compiler = CachedCompiler::new(
|
||||
context.working_directory.as_path().join("compilation_cache"),
|
||||
context.compilation_configuration.invalidate_compilation_cache,
|
||||
)
|
||||
.await
|
||||
.map(Arc::new)
|
||||
.context("Failed to initialize cached compiler")?;
|
||||
|
||||
// Note: we do not want to run all of the workloads concurrently on all platforms. Rather, we'd
|
||||
// like to run all of the workloads for one platform, and then the next sequentially as we'd
|
||||
// like for the effect of concurrency to be minimized when we're doing the benchmarking.
|
||||
for platform in platforms.iter() {
|
||||
let platform_identifier = platform.platform_identifier();
|
||||
// Note: we do not want to run all of the workloads concurrently on all platforms. Rather, we'd
|
||||
// like to run all of the workloads for one platform, and then the next sequentially as we'd
|
||||
// like for the effect of concurrency to be minimized when we're doing the benchmarking.
|
||||
for platform in platforms.iter() {
|
||||
let platform_identifier = platform.platform_identifier();
|
||||
|
||||
let span = info_span!("Benchmarking for the platform", %platform_identifier);
|
||||
let _guard = span.enter();
|
||||
let span = info_span!("Benchmarking for the platform", %platform_identifier);
|
||||
let _guard = span.enter();
|
||||
|
||||
for test_definition in test_definitions.iter() {
|
||||
let platform_information = &test_definition.platforms[&platform_identifier];
|
||||
for test_definition in test_definitions.iter() {
|
||||
let platform_information = &test_definition.platforms[&platform_identifier];
|
||||
|
||||
let span = info_span!(
|
||||
"Executing workload",
|
||||
metadata_file_path = %test_definition.metadata_file_path.display(),
|
||||
case_idx = %test_definition.case_idx,
|
||||
mode = %test_definition.mode,
|
||||
);
|
||||
let _guard = span.enter();
|
||||
let span = info_span!(
|
||||
"Executing workload",
|
||||
metadata_file_path = %test_definition.metadata_file_path.display(),
|
||||
case_idx = %test_definition.case_idx,
|
||||
mode = %test_definition.mode,
|
||||
);
|
||||
let _guard = span.enter();
|
||||
|
||||
// Initializing all of the components requires to execute this particular workload.
|
||||
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
|
||||
context.wallet_configuration.highest_private_key_exclusive(),
|
||||
)));
|
||||
let (watcher, watcher_tx) = Watcher::new(
|
||||
platform_identifier,
|
||||
platform_information
|
||||
.node
|
||||
.subscribe_to_full_blocks_information()
|
||||
.await
|
||||
.context("Failed to subscribe to full blocks information from the node")?,
|
||||
);
|
||||
let driver = Driver::new(
|
||||
platform_information,
|
||||
test_definition,
|
||||
private_key_allocator,
|
||||
cached_compiler.as_ref(),
|
||||
watcher_tx.clone(),
|
||||
test_definition
|
||||
.case
|
||||
.steps_iterator_for_benchmarks(context.default_repetition_count)
|
||||
.enumerate()
|
||||
.map(|(step_idx, step)| -> (StepPath, Step) {
|
||||
(StepPath::new(vec![StepIdx::new(step_idx)]), step)
|
||||
}),
|
||||
)
|
||||
.await
|
||||
.context("Failed to create the benchmarks driver")?;
|
||||
// Initializing all of the components requires to execute this particular workload.
|
||||
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
|
||||
context.wallet_configuration.highest_private_key_exclusive(),
|
||||
)));
|
||||
let (watcher, watcher_tx) = Watcher::new(
|
||||
platform_identifier,
|
||||
platform_information
|
||||
.node
|
||||
.subscribe_to_full_blocks_information()
|
||||
.await
|
||||
.context("Failed to subscribe to full blocks information from the node")?,
|
||||
);
|
||||
let driver = Driver::new(
|
||||
platform_information,
|
||||
test_definition,
|
||||
private_key_allocator,
|
||||
cached_compiler.as_ref(),
|
||||
watcher_tx.clone(),
|
||||
test_definition
|
||||
.case
|
||||
.steps_iterator_for_benchmarks(context.default_repetition_count)
|
||||
.enumerate()
|
||||
.map(|(step_idx, step)| -> (StepPath, Step) {
|
||||
(StepPath::new(vec![StepIdx::new(step_idx)]), step)
|
||||
}),
|
||||
)
|
||||
.await
|
||||
.context("Failed to create the benchmarks driver")?;
|
||||
|
||||
futures::future::try_join(
|
||||
watcher.run(),
|
||||
driver.execute_all().inspect(|_| {
|
||||
info!("All transactions submitted - driver completed execution");
|
||||
watcher_tx
|
||||
.send(WatcherEvent::AllTransactionsSubmitted)
|
||||
.unwrap()
|
||||
}),
|
||||
)
|
||||
.await
|
||||
.context("Failed to run the driver and executor")
|
||||
.inspect(|(_, steps_executed)| info!(steps_executed, "Workload Execution Succeeded"))
|
||||
.inspect_err(|err| error!(?err, "Workload Execution Failed"))?;
|
||||
}
|
||||
}
|
||||
futures::future::try_join(
|
||||
watcher.run(),
|
||||
driver.execute_all().inspect(|_| {
|
||||
info!("All transactions submitted - driver completed execution");
|
||||
watcher_tx.send(WatcherEvent::AllTransactionsSubmitted).unwrap()
|
||||
}),
|
||||
)
|
||||
.await
|
||||
.context("Failed to run the driver and executor")
|
||||
.inspect(|(_, steps_executed)| info!(steps_executed, "Workload Execution Succeeded"))
|
||||
.inspect_err(|err| error!(?err, "Workload Execution Failed"))?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use std::{collections::HashMap, path::PathBuf};
|
||||
|
||||
use alloy::{
|
||||
json_abi::JsonAbi,
|
||||
primitives::{Address, U256},
|
||||
json_abi::JsonAbi,
|
||||
primitives::{Address, U256},
|
||||
};
|
||||
|
||||
use revive_dt_format::metadata::{ContractIdent, ContractInstance};
|
||||
@@ -10,34 +10,31 @@ use revive_dt_format::metadata::{ContractIdent, ContractInstance};
|
||||
#[derive(Clone)]
|
||||
/// The state associated with the test execution of one of the workloads.
|
||||
pub struct ExecutionState {
|
||||
/// The compiled contracts, these contracts have been compiled and have had the libraries linked
|
||||
/// against them and therefore they're ready to be deployed on-demand.
|
||||
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
/// The compiled contracts, these contracts have been compiled and have had the libraries
|
||||
/// linked against them and therefore they're ready to be deployed on-demand.
|
||||
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
|
||||
/// A map of all of the deployed contracts and information about them.
|
||||
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||
/// A map of all of the deployed contracts and information about them.
|
||||
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||
|
||||
/// This map stores the variables used for each one of the cases contained in the metadata file.
|
||||
pub variables: HashMap<String, U256>,
|
||||
/// This map stores the variables used for each one of the cases contained in the metadata
|
||||
/// file.
|
||||
pub variables: HashMap<String, U256>,
|
||||
}
|
||||
|
||||
impl ExecutionState {
|
||||
pub fn new(
|
||||
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||
) -> Self {
|
||||
Self {
|
||||
compiled_contracts,
|
||||
deployed_contracts,
|
||||
variables: Default::default(),
|
||||
}
|
||||
}
|
||||
pub fn new(
|
||||
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||
) -> Self {
|
||||
Self { compiled_contracts, deployed_contracts, variables: Default::default() }
|
||||
}
|
||||
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
compiled_contracts: Default::default(),
|
||||
deployed_contracts: Default::default(),
|
||||
variables: Default::default(),
|
||||
}
|
||||
}
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
compiled_contracts: Default::default(),
|
||||
deployed_contracts: Default::default(),
|
||||
variables: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,8 +6,8 @@ use futures::{Stream, StreamExt};
|
||||
use revive_dt_common::types::PlatformIdentifier;
|
||||
use revive_dt_node_interaction::MinedBlockInformation;
|
||||
use tokio::sync::{
|
||||
RwLock,
|
||||
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
|
||||
RwLock,
|
||||
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
|
||||
};
|
||||
use tracing::{info, instrument};
|
||||
|
||||
@@ -15,193 +15,175 @@ use tracing::{info, instrument};
|
||||
/// and MUST NOT be re-used between workloads since it holds important internal state for a given
|
||||
/// workload and is not designed for reuse.
|
||||
pub struct Watcher {
|
||||
/// The identifier of the platform that this watcher is for.
|
||||
platform_identifier: PlatformIdentifier,
|
||||
/// The identifier of the platform that this watcher is for.
|
||||
platform_identifier: PlatformIdentifier,
|
||||
|
||||
/// The receive side of the channel that all of the drivers and various other parts of the code
|
||||
/// send events to the watcher on.
|
||||
rx: UnboundedReceiver<WatcherEvent>,
|
||||
/// The receive side of the channel that all of the drivers and various other parts of the code
|
||||
/// send events to the watcher on.
|
||||
rx: UnboundedReceiver<WatcherEvent>,
|
||||
|
||||
/// This is a stream of the blocks that were mined by the node. This is for a single platform
|
||||
/// and a single node from that platform.
|
||||
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
||||
/// This is a stream of the blocks that were mined by the node. This is for a single platform
|
||||
/// and a single node from that platform.
|
||||
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
||||
}
|
||||
|
||||
impl Watcher {
|
||||
pub fn new(
|
||||
platform_identifier: PlatformIdentifier,
|
||||
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
||||
) -> (Self, UnboundedSender<WatcherEvent>) {
|
||||
let (tx, rx) = unbounded_channel::<WatcherEvent>();
|
||||
(
|
||||
Self {
|
||||
platform_identifier,
|
||||
rx,
|
||||
blocks_stream,
|
||||
},
|
||||
tx,
|
||||
)
|
||||
}
|
||||
pub fn new(
|
||||
platform_identifier: PlatformIdentifier,
|
||||
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
||||
) -> (Self, UnboundedSender<WatcherEvent>) {
|
||||
let (tx, rx) = unbounded_channel::<WatcherEvent>();
|
||||
(Self { platform_identifier, rx, blocks_stream }, tx)
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub async fn run(mut self) -> Result<()> {
|
||||
// The first event that the watcher receives must be a `RepetitionStartEvent` that informs
|
||||
// the watcher of the last block number that it should ignore and what the block number is
|
||||
// for the first important block that it should look for.
|
||||
let ignore_block_before = loop {
|
||||
let Some(WatcherEvent::RepetitionStartEvent {
|
||||
ignore_block_before,
|
||||
}) = self.rx.recv().await
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
break ignore_block_before;
|
||||
};
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub async fn run(mut self) -> Result<()> {
|
||||
// The first event that the watcher receives must be a `RepetitionStartEvent` that informs
|
||||
// the watcher of the last block number that it should ignore and what the block number is
|
||||
// for the first important block that it should look for.
|
||||
let ignore_block_before = loop {
|
||||
let Some(WatcherEvent::RepetitionStartEvent { ignore_block_before }) =
|
||||
self.rx.recv().await
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
break ignore_block_before;
|
||||
};
|
||||
|
||||
// This is the set of the transaction hashes that the watcher should be looking for and
|
||||
// watch for them in the blocks. The watcher will keep watching for blocks until it sees
|
||||
// that all of the transactions that it was watching for has been seen in the mined blocks.
|
||||
let watch_for_transaction_hashes = Arc::new(RwLock::new(HashSet::<TxHash>::new()));
|
||||
// This is the set of the transaction hashes that the watcher should be looking for and
|
||||
// watch for them in the blocks. The watcher will keep watching for blocks until it sees
|
||||
// that all of the transactions that it was watching for has been seen in the mined blocks.
|
||||
let watch_for_transaction_hashes = Arc::new(RwLock::new(HashSet::<TxHash>::new()));
|
||||
|
||||
// A boolean that keeps track of whether all of the transactions were submitted or if more
|
||||
// txs are expected to come through the receive side of the channel. We do not want to rely
|
||||
// on the channel closing alone for the watcher to know that all of the transactions were
|
||||
// submitted and for there to be an explicit event sent by the core orchestrator that
|
||||
// informs the watcher that no further transactions are to be expected and that it can
|
||||
// safely ignore the channel.
|
||||
let all_transactions_submitted = Arc::new(RwLock::new(false));
|
||||
// A boolean that keeps track of whether all of the transactions were submitted or if more
|
||||
// txs are expected to come through the receive side of the channel. We do not want to rely
|
||||
// on the channel closing alone for the watcher to know that all of the transactions were
|
||||
// submitted and for there to be an explicit event sent by the core orchestrator that
|
||||
// informs the watcher that no further transactions are to be expected and that it can
|
||||
// safely ignore the channel.
|
||||
let all_transactions_submitted = Arc::new(RwLock::new(false));
|
||||
|
||||
let watcher_event_watching_task = {
|
||||
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
|
||||
let all_transactions_submitted = all_transactions_submitted.clone();
|
||||
async move {
|
||||
while let Some(watcher_event) = self.rx.recv().await {
|
||||
match watcher_event {
|
||||
// Subsequent repetition starts are ignored since certain workloads can
|
||||
// contain nested repetitions and therefore there's no use in doing any
|
||||
// action if the repetitions are nested.
|
||||
WatcherEvent::RepetitionStartEvent { .. } => {}
|
||||
WatcherEvent::SubmittedTransaction { transaction_hash } => {
|
||||
watch_for_transaction_hashes
|
||||
.write()
|
||||
.await
|
||||
.insert(transaction_hash);
|
||||
}
|
||||
WatcherEvent::AllTransactionsSubmitted => {
|
||||
*all_transactions_submitted.write().await = true;
|
||||
self.rx.close();
|
||||
info!("Watcher's Events Watching Task Finished");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
let block_information_watching_task = {
|
||||
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
|
||||
let all_transactions_submitted = all_transactions_submitted.clone();
|
||||
let mut blocks_information_stream = self.blocks_stream;
|
||||
async move {
|
||||
let mut mined_blocks_information = Vec::new();
|
||||
let watcher_event_watching_task = {
|
||||
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
|
||||
let all_transactions_submitted = all_transactions_submitted.clone();
|
||||
async move {
|
||||
while let Some(watcher_event) = self.rx.recv().await {
|
||||
match watcher_event {
|
||||
// Subsequent repetition starts are ignored since certain workloads can
|
||||
// contain nested repetitions and therefore there's no use in doing any
|
||||
// action if the repetitions are nested.
|
||||
WatcherEvent::RepetitionStartEvent { .. } => {},
|
||||
WatcherEvent::SubmittedTransaction { transaction_hash } => {
|
||||
watch_for_transaction_hashes.write().await.insert(transaction_hash);
|
||||
},
|
||||
WatcherEvent::AllTransactionsSubmitted => {
|
||||
*all_transactions_submitted.write().await = true;
|
||||
self.rx.close();
|
||||
info!("Watcher's Events Watching Task Finished");
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
let block_information_watching_task = {
|
||||
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
|
||||
let all_transactions_submitted = all_transactions_submitted.clone();
|
||||
let mut blocks_information_stream = self.blocks_stream;
|
||||
async move {
|
||||
let mut mined_blocks_information = Vec::new();
|
||||
|
||||
while let Some(block) = blocks_information_stream.next().await {
|
||||
// If the block number is equal to or less than the last block before the
|
||||
// repetition then we ignore it and continue on to the next block.
|
||||
if block.block_number <= ignore_block_before {
|
||||
continue;
|
||||
}
|
||||
while let Some(block) = blocks_information_stream.next().await {
|
||||
// If the block number is equal to or less than the last block before the
|
||||
// repetition then we ignore it and continue on to the next block.
|
||||
if block.block_number <= ignore_block_before {
|
||||
continue;
|
||||
}
|
||||
|
||||
if *all_transactions_submitted.read().await
|
||||
&& watch_for_transaction_hashes.read().await.is_empty()
|
||||
{
|
||||
break;
|
||||
}
|
||||
if *all_transactions_submitted.read().await &&
|
||||
watch_for_transaction_hashes.read().await.is_empty()
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
info!(
|
||||
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
|
||||
block_tx_count = block.transaction_hashes.len(),
|
||||
"Observed a block"
|
||||
);
|
||||
info!(
|
||||
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
|
||||
block_tx_count = block.transaction_hashes.len(),
|
||||
"Observed a block"
|
||||
);
|
||||
|
||||
// Remove all of the transaction hashes observed in this block from the txs we
|
||||
// are currently watching for.
|
||||
let mut watch_for_transaction_hashes =
|
||||
watch_for_transaction_hashes.write().await;
|
||||
for tx_hash in block.transaction_hashes.iter() {
|
||||
watch_for_transaction_hashes.remove(tx_hash);
|
||||
}
|
||||
// Remove all of the transaction hashes observed in this block from the txs we
|
||||
// are currently watching for.
|
||||
let mut watch_for_transaction_hashes =
|
||||
watch_for_transaction_hashes.write().await;
|
||||
for tx_hash in block.transaction_hashes.iter() {
|
||||
watch_for_transaction_hashes.remove(tx_hash);
|
||||
}
|
||||
|
||||
mined_blocks_information.push(block);
|
||||
}
|
||||
mined_blocks_information.push(block);
|
||||
}
|
||||
|
||||
info!("Watcher's Block Watching Task Finished");
|
||||
mined_blocks_information
|
||||
}
|
||||
};
|
||||
info!("Watcher's Block Watching Task Finished");
|
||||
mined_blocks_information
|
||||
}
|
||||
};
|
||||
|
||||
let (_, mined_blocks_information) =
|
||||
futures::future::join(watcher_event_watching_task, block_information_watching_task)
|
||||
.await;
|
||||
let (_, mined_blocks_information) =
|
||||
futures::future::join(watcher_event_watching_task, block_information_watching_task)
|
||||
.await;
|
||||
|
||||
// region:TEMPORARY
|
||||
{
|
||||
// TODO: The following core is TEMPORARY and will be removed once we have proper
|
||||
// reporting in place and then it can be removed. This serves as as way of doing some
|
||||
// very simple reporting for the time being.
|
||||
use std::io::Write;
|
||||
// region:TEMPORARY
|
||||
{
|
||||
// TODO: The following core is TEMPORARY and will be removed once we have proper
|
||||
// reporting in place and then it can be removed. This serves as as way of doing some
|
||||
// very simple reporting for the time being.
|
||||
use std::io::Write;
|
||||
|
||||
let mut stderr = std::io::stderr().lock();
|
||||
writeln!(
|
||||
stderr,
|
||||
"Watcher information for {}",
|
||||
self.platform_identifier
|
||||
)?;
|
||||
writeln!(
|
||||
stderr,
|
||||
"block_number,block_timestamp,mined_gas,block_gas_limit,tx_count"
|
||||
)?;
|
||||
for block in mined_blocks_information {
|
||||
writeln!(
|
||||
stderr,
|
||||
"{},{},{},{},{}",
|
||||
block.block_number,
|
||||
block.block_timestamp,
|
||||
block.mined_gas,
|
||||
block.block_gas_limit,
|
||||
block.transaction_hashes.len()
|
||||
)?
|
||||
}
|
||||
}
|
||||
// endregion:TEMPORARY
|
||||
let mut stderr = std::io::stderr().lock();
|
||||
writeln!(stderr, "Watcher information for {}", self.platform_identifier)?;
|
||||
writeln!(stderr, "block_number,block_timestamp,mined_gas,block_gas_limit,tx_count")?;
|
||||
for block in mined_blocks_information {
|
||||
writeln!(
|
||||
stderr,
|
||||
"{},{},{},{},{}",
|
||||
block.block_number,
|
||||
block.block_timestamp,
|
||||
block.mined_gas,
|
||||
block.block_gas_limit,
|
||||
block.transaction_hashes.len()
|
||||
)?
|
||||
}
|
||||
}
|
||||
// endregion:TEMPORARY
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub enum WatcherEvent {
|
||||
/// Informs the watcher that it should begin watching for the blocks mined by the platforms.
|
||||
/// Before the watcher receives this event it will not be watching for the mined blocks. The
|
||||
/// reason behind this is that we do not want the initialization transactions (e.g., contract
|
||||
/// deployments) to be included in the overall TPS and GPS measurements since these blocks will
|
||||
/// most likely only contain a single transaction since they're just being used for
|
||||
/// initialization.
|
||||
RepetitionStartEvent {
|
||||
/// This is the block number of the last block seen before the repetition started. This is
|
||||
/// used to instruct the watcher to ignore all block prior to this block when it starts
|
||||
/// streaming the blocks.
|
||||
ignore_block_before: BlockNumber,
|
||||
},
|
||||
/// Informs the watcher that it should begin watching for the blocks mined by the platforms.
|
||||
/// Before the watcher receives this event it will not be watching for the mined blocks. The
|
||||
/// reason behind this is that we do not want the initialization transactions (e.g., contract
|
||||
/// deployments) to be included in the overall TPS and GPS measurements since these blocks will
|
||||
/// most likely only contain a single transaction since they're just being used for
|
||||
/// initialization.
|
||||
RepetitionStartEvent {
|
||||
/// This is the block number of the last block seen before the repetition started. This is
|
||||
/// used to instruct the watcher to ignore all block prior to this block when it starts
|
||||
/// streaming the blocks.
|
||||
ignore_block_before: BlockNumber,
|
||||
},
|
||||
|
||||
/// Informs the watcher that a transaction was submitted and that the watcher should watch for a
|
||||
/// transaction with this hash in the blocks that it watches.
|
||||
SubmittedTransaction {
|
||||
/// The hash of the submitted transaction.
|
||||
transaction_hash: TxHash,
|
||||
},
|
||||
/// Informs the watcher that a transaction was submitted and that the watcher should watch for a
|
||||
/// transaction with this hash in the blocks that it watches.
|
||||
SubmittedTransaction {
|
||||
/// The hash of the submitted transaction.
|
||||
transaction_hash: TxHash,
|
||||
},
|
||||
|
||||
/// Informs the watcher that all of the transactions of this benchmark have been submitted and
|
||||
/// that it can expect to receive no further transaction hashes and not even watch the channel
|
||||
/// any longer.
|
||||
AllTransactionsSubmitted,
|
||||
/// Informs the watcher that all of the transactions of this benchmark have been submitted and
|
||||
/// that it can expect to receive no further transaction hashes and not even watch the channel
|
||||
/// any longer.
|
||||
AllTransactionsSubmitted,
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,16 +1,16 @@
|
||||
//! The main entry point into differential testing.
|
||||
|
||||
use std::{
|
||||
collections::{BTreeMap, BTreeSet},
|
||||
io::{BufWriter, Write, stderr},
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
collections::{BTreeMap, BTreeSet},
|
||||
io::{BufWriter, Write, stderr},
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use crate::Platform;
|
||||
use anyhow::Context as _;
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use revive_dt_common::types::PrivateKeyAllocator;
|
||||
use revive_dt_core::Platform;
|
||||
use tokio::sync::{Mutex, RwLock, Semaphore};
|
||||
use tracing::{Instrument, error, info, info_span, instrument};
|
||||
|
||||
@@ -18,260 +18,249 @@ use revive_dt_config::{Context, TestExecutionContext};
|
||||
use revive_dt_report::{Reporter, ReporterEvent, TestCaseStatus};
|
||||
|
||||
use crate::{
|
||||
differential_tests::Driver,
|
||||
helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream},
|
||||
differential_tests::Driver,
|
||||
helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream},
|
||||
};
|
||||
|
||||
/// Handles the differential testing executing it according to the information defined in the
|
||||
/// context
|
||||
#[instrument(level = "info", err(Debug), skip_all)]
|
||||
pub async fn handle_differential_tests(
|
||||
context: TestExecutionContext,
|
||||
reporter: Reporter,
|
||||
context: TestExecutionContext,
|
||||
reporter: Reporter,
|
||||
) -> anyhow::Result<()> {
|
||||
let reporter_clone = reporter.clone();
|
||||
let reporter_clone = reporter.clone();
|
||||
|
||||
// Discover all of the metadata files that are defined in the context.
|
||||
let metadata_files = collect_metadata_files(&context)
|
||||
.context("Failed to collect metadata files for differential testing")?;
|
||||
info!(len = metadata_files.len(), "Discovered metadata files");
|
||||
// Discover all of the metadata files that are defined in the context.
|
||||
let metadata_files = collect_metadata_files(&context)
|
||||
.context("Failed to collect metadata files for differential testing")?;
|
||||
info!(len = metadata_files.len(), "Discovered metadata files");
|
||||
|
||||
// Discover the list of platforms that the tests should run on based on the context.
|
||||
let platforms = context
|
||||
.platforms
|
||||
.iter()
|
||||
.copied()
|
||||
.map(Into::<&dyn Platform>::into)
|
||||
.collect::<Vec<_>>();
|
||||
// Discover the list of platforms that the tests should run on based on the context.
|
||||
let platforms = context
|
||||
.platforms
|
||||
.iter()
|
||||
.copied()
|
||||
.map(Into::<&dyn Platform>::into)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Starting the nodes of the various platforms specified in the context.
|
||||
let platforms_and_nodes = {
|
||||
let mut map = BTreeMap::new();
|
||||
// Starting the nodes of the various platforms specified in the context.
|
||||
let platforms_and_nodes = {
|
||||
let mut map = BTreeMap::new();
|
||||
|
||||
for platform in platforms.iter() {
|
||||
let platform_identifier = platform.platform_identifier();
|
||||
for platform in platforms.iter() {
|
||||
let platform_identifier = platform.platform_identifier();
|
||||
|
||||
let context = Context::Test(Box::new(context.clone()));
|
||||
let node_pool = NodePool::new(context, *platform)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
%platform_identifier,
|
||||
"Failed to initialize the node pool for the platform."
|
||||
)
|
||||
})
|
||||
.context("Failed to initialize the node pool")?;
|
||||
let context = Context::Test(Box::new(context.clone()));
|
||||
let node_pool = NodePool::new(context, *platform)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
%platform_identifier,
|
||||
"Failed to initialize the node pool for the platform."
|
||||
)
|
||||
})
|
||||
.context("Failed to initialize the node pool")?;
|
||||
|
||||
map.insert(platform_identifier, (*platform, node_pool));
|
||||
}
|
||||
map.insert(platform_identifier, (*platform, node_pool));
|
||||
}
|
||||
|
||||
map
|
||||
};
|
||||
info!("Spawned the platform nodes");
|
||||
map
|
||||
};
|
||||
info!("Spawned the platform nodes");
|
||||
|
||||
// Preparing test definitions.
|
||||
let full_context = Context::Test(Box::new(context.clone()));
|
||||
let test_definitions = create_test_definitions_stream(
|
||||
&full_context,
|
||||
metadata_files.iter(),
|
||||
&platforms_and_nodes,
|
||||
reporter.clone(),
|
||||
)
|
||||
.await
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
info!(len = test_definitions.len(), "Created test definitions");
|
||||
// Preparing test definitions.
|
||||
let full_context = Context::Test(Box::new(context.clone()));
|
||||
let test_definitions = create_test_definitions_stream(
|
||||
&full_context,
|
||||
metadata_files.iter(),
|
||||
&platforms_and_nodes,
|
||||
reporter.clone(),
|
||||
)
|
||||
.await
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
info!(len = test_definitions.len(), "Created test definitions");
|
||||
|
||||
// Creating everything else required for the driver to run.
|
||||
let cached_compiler = CachedCompiler::new(
|
||||
context
|
||||
.working_directory
|
||||
.as_path()
|
||||
.join("compilation_cache"),
|
||||
context
|
||||
.compilation_configuration
|
||||
.invalidate_compilation_cache,
|
||||
)
|
||||
.await
|
||||
.map(Arc::new)
|
||||
.context("Failed to initialize cached compiler")?;
|
||||
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
|
||||
context.wallet_configuration.highest_private_key_exclusive(),
|
||||
)));
|
||||
// Creating everything else required for the driver to run.
|
||||
let cached_compiler = CachedCompiler::new(
|
||||
context.working_directory.as_path().join("compilation_cache"),
|
||||
context.compilation_configuration.invalidate_compilation_cache,
|
||||
)
|
||||
.await
|
||||
.map(Arc::new)
|
||||
.context("Failed to initialize cached compiler")?;
|
||||
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
|
||||
context.wallet_configuration.highest_private_key_exclusive(),
|
||||
)));
|
||||
|
||||
// Creating the driver and executing all of the steps.
|
||||
let semaphore = context
|
||||
.concurrency_configuration
|
||||
.concurrency_limit()
|
||||
.map(Semaphore::new)
|
||||
.map(Arc::new);
|
||||
let running_task_list = Arc::new(RwLock::new(BTreeSet::<usize>::new()));
|
||||
let driver_task = futures::future::join_all(test_definitions.iter().enumerate().map(
|
||||
|(test_id, test_definition)| {
|
||||
let running_task_list = running_task_list.clone();
|
||||
let semaphore = semaphore.clone();
|
||||
// Creating the driver and executing all of the steps.
|
||||
let semaphore = context
|
||||
.concurrency_configuration
|
||||
.concurrency_limit()
|
||||
.map(Semaphore::new)
|
||||
.map(Arc::new);
|
||||
let running_task_list = Arc::new(RwLock::new(BTreeSet::<usize>::new()));
|
||||
let driver_task = futures::future::join_all(test_definitions.iter().enumerate().map(
|
||||
|(test_id, test_definition)| {
|
||||
let running_task_list = running_task_list.clone();
|
||||
let semaphore = semaphore.clone();
|
||||
|
||||
let private_key_allocator = private_key_allocator.clone();
|
||||
let cached_compiler = cached_compiler.clone();
|
||||
let mode = test_definition.mode.clone();
|
||||
let span = info_span!(
|
||||
"Executing Test Case",
|
||||
test_id,
|
||||
metadata_file_path = %test_definition.metadata_file_path.display(),
|
||||
case_idx = %test_definition.case_idx,
|
||||
mode = %mode,
|
||||
);
|
||||
async move {
|
||||
let permit = match semaphore.as_ref() {
|
||||
Some(semaphore) => Some(semaphore.acquire().await.expect("Can't fail")),
|
||||
None => None,
|
||||
};
|
||||
let private_key_allocator = private_key_allocator.clone();
|
||||
let cached_compiler = cached_compiler.clone();
|
||||
let mode = test_definition.mode.clone();
|
||||
let span = info_span!(
|
||||
"Executing Test Case",
|
||||
test_id,
|
||||
metadata_file_path = %test_definition.metadata_file_path.display(),
|
||||
case_idx = %test_definition.case_idx,
|
||||
mode = %mode,
|
||||
);
|
||||
async move {
|
||||
let permit = match semaphore.as_ref() {
|
||||
Some(semaphore) => Some(semaphore.acquire().await.expect("Can't fail")),
|
||||
None => None,
|
||||
};
|
||||
|
||||
running_task_list.write().await.insert(test_id);
|
||||
let driver = match Driver::new_root(
|
||||
test_definition,
|
||||
private_key_allocator,
|
||||
&cached_compiler,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(driver) => driver,
|
||||
Err(error) => {
|
||||
test_definition
|
||||
.reporter
|
||||
.report_test_failed_event(format!("{error:#}"))
|
||||
.expect("Can't fail");
|
||||
error!("Test Case Failed");
|
||||
drop(permit);
|
||||
running_task_list.write().await.remove(&test_id);
|
||||
return;
|
||||
}
|
||||
};
|
||||
info!("Created the driver for the test case");
|
||||
running_task_list.write().await.insert(test_id);
|
||||
let driver = match Driver::new_root(
|
||||
test_definition,
|
||||
private_key_allocator,
|
||||
&cached_compiler,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(driver) => driver,
|
||||
Err(error) => {
|
||||
test_definition
|
||||
.reporter
|
||||
.report_test_failed_event(format!("{error:#}"))
|
||||
.expect("Can't fail");
|
||||
error!("Test Case Failed");
|
||||
drop(permit);
|
||||
running_task_list.write().await.remove(&test_id);
|
||||
return;
|
||||
},
|
||||
};
|
||||
info!("Created the driver for the test case");
|
||||
|
||||
match driver.execute_all().await {
|
||||
Ok(steps_executed) => test_definition
|
||||
.reporter
|
||||
.report_test_succeeded_event(steps_executed)
|
||||
.expect("Can't fail"),
|
||||
Err(error) => {
|
||||
test_definition
|
||||
.reporter
|
||||
.report_test_failed_event(format!("{error:#}"))
|
||||
.expect("Can't fail");
|
||||
error!("Test Case Failed");
|
||||
}
|
||||
};
|
||||
info!("Finished the execution of the test case");
|
||||
drop(permit);
|
||||
running_task_list.write().await.remove(&test_id);
|
||||
}
|
||||
.instrument(span)
|
||||
},
|
||||
))
|
||||
.inspect(|_| {
|
||||
info!("Finished executing all test cases");
|
||||
reporter_clone
|
||||
.report_completion_event()
|
||||
.expect("Can't fail")
|
||||
});
|
||||
let cli_reporting_task = start_cli_reporting_task(reporter);
|
||||
match driver.execute_all().await {
|
||||
Ok(steps_executed) => test_definition
|
||||
.reporter
|
||||
.report_test_succeeded_event(steps_executed)
|
||||
.expect("Can't fail"),
|
||||
Err(error) => {
|
||||
test_definition
|
||||
.reporter
|
||||
.report_test_failed_event(format!("{error:#}"))
|
||||
.expect("Can't fail");
|
||||
error!("Test Case Failed");
|
||||
},
|
||||
};
|
||||
info!("Finished the execution of the test case");
|
||||
drop(permit);
|
||||
running_task_list.write().await.remove(&test_id);
|
||||
}
|
||||
.instrument(span)
|
||||
},
|
||||
))
|
||||
.inspect(|_| {
|
||||
info!("Finished executing all test cases");
|
||||
reporter_clone.report_completion_event().expect("Can't fail")
|
||||
});
|
||||
let cli_reporting_task = start_cli_reporting_task(reporter);
|
||||
|
||||
tokio::task::spawn(async move {
|
||||
loop {
|
||||
let remaining_tasks = running_task_list.read().await;
|
||||
info!(
|
||||
count = remaining_tasks.len(),
|
||||
?remaining_tasks,
|
||||
"Remaining Tests"
|
||||
);
|
||||
tokio::time::sleep(Duration::from_secs(10)).await
|
||||
}
|
||||
});
|
||||
tokio::task::spawn(async move {
|
||||
loop {
|
||||
let remaining_tasks = running_task_list.read().await;
|
||||
info!(count = remaining_tasks.len(), ?remaining_tasks, "Remaining Tests");
|
||||
tokio::time::sleep(Duration::from_secs(10)).await
|
||||
}
|
||||
});
|
||||
|
||||
futures::future::join(driver_task, cli_reporting_task).await;
|
||||
futures::future::join(driver_task, cli_reporting_task).await;
|
||||
|
||||
Ok(())
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
|
||||
async fn start_cli_reporting_task(reporter: Reporter) {
|
||||
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
|
||||
drop(reporter);
|
||||
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
|
||||
drop(reporter);
|
||||
|
||||
let start = Instant::now();
|
||||
let start = Instant::now();
|
||||
|
||||
const GREEN: &str = "\x1B[32m";
|
||||
const RED: &str = "\x1B[31m";
|
||||
const GREY: &str = "\x1B[90m";
|
||||
const COLOR_RESET: &str = "\x1B[0m";
|
||||
const BOLD: &str = "\x1B[1m";
|
||||
const BOLD_RESET: &str = "\x1B[22m";
|
||||
const GREEN: &str = "\x1B[32m";
|
||||
const RED: &str = "\x1B[31m";
|
||||
const GREY: &str = "\x1B[90m";
|
||||
const COLOR_RESET: &str = "\x1B[0m";
|
||||
const BOLD: &str = "\x1B[1m";
|
||||
const BOLD_RESET: &str = "\x1B[22m";
|
||||
|
||||
let mut number_of_successes = 0;
|
||||
let mut number_of_failures = 0;
|
||||
let mut number_of_successes = 0;
|
||||
let mut number_of_failures = 0;
|
||||
|
||||
let mut buf = BufWriter::new(stderr());
|
||||
while let Ok(event) = aggregator_events_rx.recv().await {
|
||||
let ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
||||
metadata_file_path,
|
||||
mode,
|
||||
case_status,
|
||||
} = event
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
let mut buf = BufWriter::new(stderr());
|
||||
while let Ok(event) = aggregator_events_rx.recv().await {
|
||||
let ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
||||
metadata_file_path,
|
||||
mode,
|
||||
case_status,
|
||||
} = event
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
|
||||
for (case_idx, case_status) in case_status.into_iter() {
|
||||
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
|
||||
let _ = match case_status {
|
||||
TestCaseStatus::Succeeded { steps_executed } => {
|
||||
number_of_successes += 1;
|
||||
writeln!(
|
||||
buf,
|
||||
"{}{}Case Succeeded{} - Steps Executed: {}{}",
|
||||
GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET
|
||||
)
|
||||
}
|
||||
TestCaseStatus::Failed { reason } => {
|
||||
number_of_failures += 1;
|
||||
writeln!(
|
||||
buf,
|
||||
"{}{}Case Failed{} - Reason: {}{}",
|
||||
RED,
|
||||
BOLD,
|
||||
BOLD_RESET,
|
||||
reason.trim(),
|
||||
COLOR_RESET,
|
||||
)
|
||||
}
|
||||
TestCaseStatus::Ignored { reason, .. } => writeln!(
|
||||
buf,
|
||||
"{}{}Case Ignored{} - Reason: {}{}",
|
||||
GREY,
|
||||
BOLD,
|
||||
BOLD_RESET,
|
||||
reason.trim(),
|
||||
COLOR_RESET,
|
||||
),
|
||||
};
|
||||
}
|
||||
let _ = writeln!(buf);
|
||||
}
|
||||
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
|
||||
for (case_idx, case_status) in case_status.into_iter() {
|
||||
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
|
||||
let _ = match case_status {
|
||||
TestCaseStatus::Succeeded { steps_executed } => {
|
||||
number_of_successes += 1;
|
||||
writeln!(
|
||||
buf,
|
||||
"{}{}Case Succeeded{} - Steps Executed: {}{}",
|
||||
GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET
|
||||
)
|
||||
},
|
||||
TestCaseStatus::Failed { reason } => {
|
||||
number_of_failures += 1;
|
||||
writeln!(
|
||||
buf,
|
||||
"{}{}Case Failed{} - Reason: {}{}",
|
||||
RED,
|
||||
BOLD,
|
||||
BOLD_RESET,
|
||||
reason.trim(),
|
||||
COLOR_RESET,
|
||||
)
|
||||
},
|
||||
TestCaseStatus::Ignored { reason, .. } => writeln!(
|
||||
buf,
|
||||
"{}{}Case Ignored{} - Reason: {}{}",
|
||||
GREY,
|
||||
BOLD,
|
||||
BOLD_RESET,
|
||||
reason.trim(),
|
||||
COLOR_RESET,
|
||||
),
|
||||
};
|
||||
}
|
||||
let _ = writeln!(buf);
|
||||
}
|
||||
|
||||
// Summary at the end.
|
||||
let _ = writeln!(
|
||||
buf,
|
||||
"{} cases: {}{}{} cases succeeded, {}{}{} cases failed in {} seconds",
|
||||
number_of_successes + number_of_failures,
|
||||
GREEN,
|
||||
number_of_successes,
|
||||
COLOR_RESET,
|
||||
RED,
|
||||
number_of_failures,
|
||||
COLOR_RESET,
|
||||
start.elapsed().as_secs()
|
||||
);
|
||||
// Summary at the end.
|
||||
let _ = writeln!(
|
||||
buf,
|
||||
"{} cases: {}{}{} cases succeeded, {}{}{} cases failed in {} seconds",
|
||||
number_of_successes + number_of_failures,
|
||||
GREEN,
|
||||
number_of_successes,
|
||||
COLOR_RESET,
|
||||
RED,
|
||||
number_of_failures,
|
||||
COLOR_RESET,
|
||||
start.elapsed().as_secs()
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use std::{collections::HashMap, path::PathBuf};
|
||||
|
||||
use alloy::{
|
||||
json_abi::JsonAbi,
|
||||
primitives::{Address, U256},
|
||||
json_abi::JsonAbi,
|
||||
primitives::{Address, U256},
|
||||
};
|
||||
|
||||
use revive_dt_format::metadata::{ContractIdent, ContractInstance};
|
||||
@@ -10,26 +10,23 @@ use revive_dt_format::metadata::{ContractIdent, ContractInstance};
|
||||
#[derive(Clone)]
|
||||
/// The state associated with the test execution of one of the tests.
|
||||
pub struct ExecutionState {
|
||||
/// The compiled contracts, these contracts have been compiled and have had the libraries linked
|
||||
/// against them and therefore they're ready to be deployed on-demand.
|
||||
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
/// The compiled contracts, these contracts have been compiled and have had the libraries
|
||||
/// linked against them and therefore they're ready to be deployed on-demand.
|
||||
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
|
||||
/// A map of all of the deployed contracts and information about them.
|
||||
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||
/// A map of all of the deployed contracts and information about them.
|
||||
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||
|
||||
/// This map stores the variables used for each one of the cases contained in the metadata file.
|
||||
pub variables: HashMap<String, U256>,
|
||||
/// This map stores the variables used for each one of the cases contained in the metadata
|
||||
/// file.
|
||||
pub variables: HashMap<String, U256>,
|
||||
}
|
||||
|
||||
impl ExecutionState {
|
||||
pub fn new(
|
||||
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||
) -> Self {
|
||||
Self {
|
||||
compiled_contracts,
|
||||
deployed_contracts,
|
||||
variables: Default::default(),
|
||||
}
|
||||
}
|
||||
pub fn new(
|
||||
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||
) -> Self {
|
||||
Self { compiled_contracts, deployed_contracts, variables: Default::default() }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,16 +2,16 @@
|
||||
//! be reused between runs.
|
||||
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
sync::{Arc, LazyLock},
|
||||
borrow::Cow,
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
sync::{Arc, LazyLock},
|
||||
};
|
||||
|
||||
use crate::Platform;
|
||||
use futures::FutureExt;
|
||||
use revive_dt_common::{iterators::FilesWithExtensionIterator, types::CompilerIdentifier};
|
||||
use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler};
|
||||
use revive_dt_core::Platform;
|
||||
use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata};
|
||||
|
||||
use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address};
|
||||
@@ -23,33 +23,30 @@ use tokio::sync::{Mutex, RwLock, Semaphore};
|
||||
use tracing::{Instrument, debug, debug_span, instrument};
|
||||
|
||||
pub struct CachedCompiler<'a> {
|
||||
/// The cache that stores the compiled contracts.
|
||||
artifacts_cache: ArtifactsCache,
|
||||
/// The cache that stores the compiled contracts.
|
||||
artifacts_cache: ArtifactsCache,
|
||||
|
||||
/// This is a mechanism that the cached compiler uses so that if multiple compilation requests
|
||||
/// come in for the same contract we never compile all of them and only compile it once and all
|
||||
/// other tasks that request this same compilation concurrently get the cached version.
|
||||
cache_key_lock: RwLock<HashMap<CacheKey<'a>, Arc<Mutex<()>>>>,
|
||||
/// This is a mechanism that the cached compiler uses so that if multiple compilation requests
|
||||
/// come in for the same contract we never compile all of them and only compile it once and all
|
||||
/// other tasks that request this same compilation concurrently get the cached version.
|
||||
cache_key_lock: RwLock<HashMap<CacheKey<'a>, Arc<Mutex<()>>>>,
|
||||
}
|
||||
|
||||
impl<'a> CachedCompiler<'a> {
|
||||
pub async fn new(path: impl AsRef<Path>, invalidate_cache: bool) -> Result<Self> {
|
||||
let mut cache = ArtifactsCache::new(path);
|
||||
if invalidate_cache {
|
||||
cache = cache
|
||||
.with_invalidated_cache()
|
||||
.await
|
||||
.context("Failed to invalidate compilation cache directory")?;
|
||||
}
|
||||
Ok(Self {
|
||||
artifacts_cache: cache,
|
||||
cache_key_lock: Default::default(),
|
||||
})
|
||||
}
|
||||
pub async fn new(path: impl AsRef<Path>, invalidate_cache: bool) -> Result<Self> {
|
||||
let mut cache = ArtifactsCache::new(path);
|
||||
if invalidate_cache {
|
||||
cache = cache
|
||||
.with_invalidated_cache()
|
||||
.await
|
||||
.context("Failed to invalidate compilation cache directory")?;
|
||||
}
|
||||
Ok(Self { artifacts_cache: cache, cache_key_lock: Default::default() })
|
||||
}
|
||||
|
||||
/// Compiles or gets the compilation artifacts from the cache.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(
|
||||
/// Compiles or gets the compilation artifacts from the cache.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(
|
||||
level = "debug",
|
||||
skip_all,
|
||||
fields(
|
||||
@@ -59,317 +56,309 @@ impl<'a> CachedCompiler<'a> {
|
||||
),
|
||||
err
|
||||
)]
|
||||
pub async fn compile_contracts(
|
||||
&self,
|
||||
metadata: &'a Metadata,
|
||||
metadata_file_path: &'a Path,
|
||||
mode: Cow<'a, Mode>,
|
||||
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||
compiler: &dyn SolidityCompiler,
|
||||
platform: &dyn Platform,
|
||||
reporter: &ExecutionSpecificReporter,
|
||||
) -> Result<CompilerOutput> {
|
||||
let cache_key = CacheKey {
|
||||
compiler_identifier: platform.compiler_identifier(),
|
||||
compiler_version: compiler.version().clone(),
|
||||
metadata_file_path,
|
||||
solc_mode: mode.clone(),
|
||||
};
|
||||
pub async fn compile_contracts(
|
||||
&self,
|
||||
metadata: &'a Metadata,
|
||||
metadata_file_path: &'a Path,
|
||||
mode: Cow<'a, Mode>,
|
||||
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||
compiler: &dyn SolidityCompiler,
|
||||
platform: &dyn Platform,
|
||||
reporter: &ExecutionSpecificReporter,
|
||||
) -> Result<CompilerOutput> {
|
||||
let cache_key = CacheKey {
|
||||
compiler_identifier: platform.compiler_identifier(),
|
||||
compiler_version: compiler.version().clone(),
|
||||
metadata_file_path,
|
||||
solc_mode: mode.clone(),
|
||||
};
|
||||
|
||||
let compilation_callback = || {
|
||||
async move {
|
||||
compile_contracts(
|
||||
metadata
|
||||
.directory()
|
||||
.context("Failed to get metadata directory while preparing compilation")?,
|
||||
metadata
|
||||
.files_to_compile()
|
||||
.context("Failed to enumerate files to compile from metadata")?,
|
||||
&mode,
|
||||
deployed_libraries,
|
||||
compiler,
|
||||
reporter,
|
||||
)
|
||||
.map(|compilation_result| compilation_result.map(CacheValue::new))
|
||||
.await
|
||||
}
|
||||
.instrument(debug_span!(
|
||||
"Running compilation for the cache key",
|
||||
cache_key.compiler_identifier = %cache_key.compiler_identifier,
|
||||
cache_key.compiler_version = %cache_key.compiler_version,
|
||||
cache_key.metadata_file_path = %cache_key.metadata_file_path.display(),
|
||||
cache_key.solc_mode = %cache_key.solc_mode,
|
||||
))
|
||||
};
|
||||
let compilation_callback = || {
|
||||
async move {
|
||||
compile_contracts(
|
||||
metadata
|
||||
.directory()
|
||||
.context("Failed to get metadata directory while preparing compilation")?,
|
||||
metadata
|
||||
.files_to_compile()
|
||||
.context("Failed to enumerate files to compile from metadata")?,
|
||||
&mode,
|
||||
deployed_libraries,
|
||||
compiler,
|
||||
reporter,
|
||||
)
|
||||
.map(|compilation_result| compilation_result.map(CacheValue::new))
|
||||
.await
|
||||
}
|
||||
.instrument(debug_span!(
|
||||
"Running compilation for the cache key",
|
||||
cache_key.compiler_identifier = %cache_key.compiler_identifier,
|
||||
cache_key.compiler_version = %cache_key.compiler_version,
|
||||
cache_key.metadata_file_path = %cache_key.metadata_file_path.display(),
|
||||
cache_key.solc_mode = %cache_key.solc_mode,
|
||||
))
|
||||
};
|
||||
|
||||
let compiled_contracts = match deployed_libraries {
|
||||
// If deployed libraries have been specified then we will re-compile the contract as it
|
||||
// means that linking is required in this case.
|
||||
Some(_) => {
|
||||
debug!("Deployed libraries defined, recompilation must take place");
|
||||
debug!("Cache miss");
|
||||
compilation_callback()
|
||||
.await
|
||||
.context("Compilation callback for deployed libraries failed")?
|
||||
.compiler_output
|
||||
}
|
||||
// If no deployed libraries are specified then we can follow the cached flow and attempt
|
||||
// to lookup the compilation artifacts in the cache.
|
||||
None => {
|
||||
debug!("Deployed libraries undefined, attempting to make use of cache");
|
||||
let compiled_contracts = match deployed_libraries {
|
||||
// If deployed libraries have been specified then we will re-compile the contract as it
|
||||
// means that linking is required in this case.
|
||||
Some(_) => {
|
||||
debug!("Deployed libraries defined, recompilation must take place");
|
||||
debug!("Cache miss");
|
||||
compilation_callback()
|
||||
.await
|
||||
.context("Compilation callback for deployed libraries failed")?
|
||||
.compiler_output
|
||||
},
|
||||
// If no deployed libraries are specified then we can follow the cached flow and attempt
|
||||
// to lookup the compilation artifacts in the cache.
|
||||
None => {
|
||||
debug!("Deployed libraries undefined, attempting to make use of cache");
|
||||
|
||||
// Lock this specific cache key such that we do not get inconsistent state. We want
|
||||
// that when multiple cases come in asking for the compilation artifacts then they
|
||||
// don't all trigger a compilation if there's a cache miss. Hence, the lock here.
|
||||
let read_guard = self.cache_key_lock.read().await;
|
||||
let mutex = match read_guard.get(&cache_key).cloned() {
|
||||
Some(value) => {
|
||||
drop(read_guard);
|
||||
value
|
||||
}
|
||||
None => {
|
||||
drop(read_guard);
|
||||
self.cache_key_lock
|
||||
.write()
|
||||
.await
|
||||
.entry(cache_key.clone())
|
||||
.or_default()
|
||||
.clone()
|
||||
}
|
||||
};
|
||||
let _guard = mutex.lock().await;
|
||||
// Lock this specific cache key such that we do not get inconsistent state. We want
|
||||
// that when multiple cases come in asking for the compilation artifacts then they
|
||||
// don't all trigger a compilation if there's a cache miss. Hence, the lock here.
|
||||
let read_guard = self.cache_key_lock.read().await;
|
||||
let mutex = match read_guard.get(&cache_key).cloned() {
|
||||
Some(value) => {
|
||||
drop(read_guard);
|
||||
value
|
||||
},
|
||||
None => {
|
||||
drop(read_guard);
|
||||
self.cache_key_lock
|
||||
.write()
|
||||
.await
|
||||
.entry(cache_key.clone())
|
||||
.or_default()
|
||||
.clone()
|
||||
},
|
||||
};
|
||||
let _guard = mutex.lock().await;
|
||||
|
||||
match self.artifacts_cache.get(&cache_key).await {
|
||||
Some(cache_value) => {
|
||||
if deployed_libraries.is_some() {
|
||||
reporter
|
||||
.report_post_link_contracts_compilation_succeeded_event(
|
||||
compiler.version().clone(),
|
||||
compiler.path(),
|
||||
true,
|
||||
None,
|
||||
cache_value.compiler_output.clone(),
|
||||
)
|
||||
.expect("Can't happen");
|
||||
} else {
|
||||
reporter
|
||||
.report_pre_link_contracts_compilation_succeeded_event(
|
||||
compiler.version().clone(),
|
||||
compiler.path(),
|
||||
true,
|
||||
None,
|
||||
cache_value.compiler_output.clone(),
|
||||
)
|
||||
.expect("Can't happen");
|
||||
}
|
||||
cache_value.compiler_output
|
||||
}
|
||||
None => {
|
||||
let compiler_output = compilation_callback()
|
||||
.await
|
||||
.context("Compilation callback failed (cache miss path)")?
|
||||
.compiler_output;
|
||||
self.artifacts_cache
|
||||
.insert(
|
||||
&cache_key,
|
||||
&CacheValue {
|
||||
compiler_output: compiler_output.clone(),
|
||||
},
|
||||
)
|
||||
.await
|
||||
.context(
|
||||
"Failed to write the cached value of the compilation artifacts",
|
||||
)?;
|
||||
compiler_output
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
match self.artifacts_cache.get(&cache_key).await {
|
||||
Some(cache_value) => {
|
||||
if deployed_libraries.is_some() {
|
||||
reporter
|
||||
.report_post_link_contracts_compilation_succeeded_event(
|
||||
compiler.version().clone(),
|
||||
compiler.path(),
|
||||
true,
|
||||
None,
|
||||
cache_value.compiler_output.clone(),
|
||||
)
|
||||
.expect("Can't happen");
|
||||
} else {
|
||||
reporter
|
||||
.report_pre_link_contracts_compilation_succeeded_event(
|
||||
compiler.version().clone(),
|
||||
compiler.path(),
|
||||
true,
|
||||
None,
|
||||
cache_value.compiler_output.clone(),
|
||||
)
|
||||
.expect("Can't happen");
|
||||
}
|
||||
cache_value.compiler_output
|
||||
},
|
||||
None => {
|
||||
let compiler_output = compilation_callback()
|
||||
.await
|
||||
.context("Compilation callback failed (cache miss path)")?
|
||||
.compiler_output;
|
||||
self.artifacts_cache
|
||||
.insert(
|
||||
&cache_key,
|
||||
&CacheValue { compiler_output: compiler_output.clone() },
|
||||
)
|
||||
.await
|
||||
.context(
|
||||
"Failed to write the cached value of the compilation artifacts",
|
||||
)?;
|
||||
compiler_output
|
||||
},
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
Ok(compiled_contracts)
|
||||
}
|
||||
Ok(compiled_contracts)
|
||||
}
|
||||
}
|
||||
|
||||
async fn compile_contracts(
|
||||
metadata_directory: impl AsRef<Path>,
|
||||
mut files_to_compile: impl Iterator<Item = PathBuf>,
|
||||
mode: &Mode,
|
||||
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||
compiler: &dyn SolidityCompiler,
|
||||
reporter: &ExecutionSpecificReporter,
|
||||
metadata_directory: impl AsRef<Path>,
|
||||
mut files_to_compile: impl Iterator<Item = PathBuf>,
|
||||
mode: &Mode,
|
||||
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||
compiler: &dyn SolidityCompiler,
|
||||
reporter: &ExecutionSpecificReporter,
|
||||
) -> Result<CompilerOutput> {
|
||||
// Puts a limit on how many compilations we can perform at any given instance which helps us
|
||||
// with some of the errors we've been seeing with high concurrency on MacOS (we have not tried
|
||||
// it on Linux so we don't know if these issues also persist there or not.)
|
||||
static SPAWN_GATE: LazyLock<Semaphore> = LazyLock::new(|| Semaphore::new(5));
|
||||
let _permit = SPAWN_GATE.acquire().await?;
|
||||
// Puts a limit on how many compilations we can perform at any given instance which helps us
|
||||
// with some of the errors we've been seeing with high concurrency on MacOS (we have not tried
|
||||
// it on Linux so we don't know if these issues also persist there or not.)
|
||||
static SPAWN_GATE: LazyLock<Semaphore> = LazyLock::new(|| Semaphore::new(5));
|
||||
let _permit = SPAWN_GATE.acquire().await?;
|
||||
|
||||
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
|
||||
.with_allowed_extension("sol")
|
||||
.with_use_cached_fs(true)
|
||||
.collect::<Vec<_>>();
|
||||
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
|
||||
.with_allowed_extension("sol")
|
||||
.with_use_cached_fs(true)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let compilation = Compiler::new()
|
||||
.with_allow_path(metadata_directory)
|
||||
// Handling the modes
|
||||
.with_optimization(mode.optimize_setting)
|
||||
.with_pipeline(mode.pipeline)
|
||||
// Adding the contract sources to the compiler.
|
||||
.try_then(|compiler| {
|
||||
files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path))
|
||||
})?
|
||||
// Adding the deployed libraries to the compiler.
|
||||
.then(|compiler| {
|
||||
deployed_libraries
|
||||
.iter()
|
||||
.flat_map(|value| value.iter())
|
||||
.map(|(instance, (ident, address, abi))| (instance, ident, address, abi))
|
||||
.flat_map(|(_, ident, address, _)| {
|
||||
all_sources_in_dir
|
||||
.iter()
|
||||
.map(move |path| (ident, address, path))
|
||||
})
|
||||
.fold(compiler, |compiler, (ident, address, path)| {
|
||||
compiler.with_library(path, ident.as_str(), *address)
|
||||
})
|
||||
});
|
||||
let compilation = Compiler::new()
|
||||
.with_allow_path(metadata_directory)
|
||||
// Handling the modes
|
||||
.with_optimization(mode.optimize_setting)
|
||||
.with_pipeline(mode.pipeline)
|
||||
// Adding the contract sources to the compiler.
|
||||
.try_then(|compiler| {
|
||||
files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path))
|
||||
})?
|
||||
// Adding the deployed libraries to the compiler.
|
||||
.then(|compiler| {
|
||||
deployed_libraries
|
||||
.iter()
|
||||
.flat_map(|value| value.iter())
|
||||
.map(|(instance, (ident, address, abi))| (instance, ident, address, abi))
|
||||
.flat_map(|(_, ident, address, _)| {
|
||||
all_sources_in_dir.iter().map(move |path| (ident, address, path))
|
||||
})
|
||||
.fold(compiler, |compiler, (ident, address, path)| {
|
||||
compiler.with_library(path, ident.as_str(), *address)
|
||||
})
|
||||
});
|
||||
|
||||
let input = compilation.input().clone();
|
||||
let output = compilation.try_build(compiler).await;
|
||||
let input = compilation.input().clone();
|
||||
let output = compilation.try_build(compiler).await;
|
||||
|
||||
match (output.as_ref(), deployed_libraries.is_some()) {
|
||||
(Ok(output), true) => {
|
||||
reporter
|
||||
.report_post_link_contracts_compilation_succeeded_event(
|
||||
compiler.version().clone(),
|
||||
compiler.path(),
|
||||
false,
|
||||
input,
|
||||
output.clone(),
|
||||
)
|
||||
.expect("Can't happen");
|
||||
}
|
||||
(Ok(output), false) => {
|
||||
reporter
|
||||
.report_pre_link_contracts_compilation_succeeded_event(
|
||||
compiler.version().clone(),
|
||||
compiler.path(),
|
||||
false,
|
||||
input,
|
||||
output.clone(),
|
||||
)
|
||||
.expect("Can't happen");
|
||||
}
|
||||
(Err(err), true) => {
|
||||
reporter
|
||||
.report_post_link_contracts_compilation_failed_event(
|
||||
compiler.version().clone(),
|
||||
compiler.path().to_path_buf(),
|
||||
input,
|
||||
format!("{err:#}"),
|
||||
)
|
||||
.expect("Can't happen");
|
||||
}
|
||||
(Err(err), false) => {
|
||||
reporter
|
||||
.report_pre_link_contracts_compilation_failed_event(
|
||||
compiler.version().clone(),
|
||||
compiler.path().to_path_buf(),
|
||||
input,
|
||||
format!("{err:#}"),
|
||||
)
|
||||
.expect("Can't happen");
|
||||
}
|
||||
}
|
||||
match (output.as_ref(), deployed_libraries.is_some()) {
|
||||
(Ok(output), true) => {
|
||||
reporter
|
||||
.report_post_link_contracts_compilation_succeeded_event(
|
||||
compiler.version().clone(),
|
||||
compiler.path(),
|
||||
false,
|
||||
input,
|
||||
output.clone(),
|
||||
)
|
||||
.expect("Can't happen");
|
||||
},
|
||||
(Ok(output), false) => {
|
||||
reporter
|
||||
.report_pre_link_contracts_compilation_succeeded_event(
|
||||
compiler.version().clone(),
|
||||
compiler.path(),
|
||||
false,
|
||||
input,
|
||||
output.clone(),
|
||||
)
|
||||
.expect("Can't happen");
|
||||
},
|
||||
(Err(err), true) => {
|
||||
reporter
|
||||
.report_post_link_contracts_compilation_failed_event(
|
||||
compiler.version().clone(),
|
||||
compiler.path().to_path_buf(),
|
||||
input,
|
||||
format!("{err:#}"),
|
||||
)
|
||||
.expect("Can't happen");
|
||||
},
|
||||
(Err(err), false) => {
|
||||
reporter
|
||||
.report_pre_link_contracts_compilation_failed_event(
|
||||
compiler.version().clone(),
|
||||
compiler.path().to_path_buf(),
|
||||
input,
|
||||
format!("{err:#}"),
|
||||
)
|
||||
.expect("Can't happen");
|
||||
},
|
||||
}
|
||||
|
||||
output
|
||||
output
|
||||
}
|
||||
|
||||
struct ArtifactsCache {
|
||||
path: PathBuf,
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl ArtifactsCache {
|
||||
pub fn new(path: impl AsRef<Path>) -> Self {
|
||||
Self {
|
||||
path: path.as_ref().to_path_buf(),
|
||||
}
|
||||
}
|
||||
pub fn new(path: impl AsRef<Path>) -> Self {
|
||||
Self { path: path.as_ref().to_path_buf() }
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub async fn with_invalidated_cache(self) -> Result<Self> {
|
||||
cacache::clear(self.path.as_path())
|
||||
.await
|
||||
.map_err(Into::<Error>::into)
|
||||
.with_context(|| format!("Failed to clear cache at {}", self.path.display()))?;
|
||||
Ok(self)
|
||||
}
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub async fn with_invalidated_cache(self) -> Result<Self> {
|
||||
cacache::clear(self.path.as_path())
|
||||
.await
|
||||
.map_err(Into::<Error>::into)
|
||||
.with_context(|| format!("Failed to clear cache at {}", self.path.display()))?;
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub async fn insert(&self, key: &CacheKey<'_>, value: &CacheValue) -> Result<()> {
|
||||
let key = bson::to_vec(key).context("Failed to serialize cache key (bson)")?;
|
||||
let value = bson::to_vec(value).context("Failed to serialize cache value (bson)")?;
|
||||
cacache::write(self.path.as_path(), key.encode_hex(), value)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("Failed to write cache entry under {}", self.path.display())
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub async fn insert(&self, key: &CacheKey<'_>, value: &CacheValue) -> Result<()> {
|
||||
let key = bson::to_vec(key).context("Failed to serialize cache key (bson)")?;
|
||||
let value = bson::to_vec(value).context("Failed to serialize cache value (bson)")?;
|
||||
cacache::write(self.path.as_path(), key.encode_hex(), value)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("Failed to write cache entry under {}", self.path.display())
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get(&self, key: &CacheKey<'_>) -> Option<CacheValue> {
|
||||
let key = bson::to_vec(key).ok()?;
|
||||
let value = cacache::read(self.path.as_path(), key.encode_hex())
|
||||
.await
|
||||
.ok()?;
|
||||
let value = bson::from_slice::<CacheValue>(&value).ok()?;
|
||||
Some(value)
|
||||
}
|
||||
pub async fn get(&self, key: &CacheKey<'_>) -> Option<CacheValue> {
|
||||
let key = bson::to_vec(key).ok()?;
|
||||
let value = cacache::read(self.path.as_path(), key.encode_hex()).await.ok()?;
|
||||
let value = bson::from_slice::<CacheValue>(&value).ok()?;
|
||||
Some(value)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub async fn get_or_insert_with(
|
||||
&self,
|
||||
key: &CacheKey<'_>,
|
||||
callback: impl AsyncFnOnce() -> Result<CacheValue>,
|
||||
) -> Result<CacheValue> {
|
||||
match self.get(key).await {
|
||||
Some(value) => {
|
||||
debug!("Cache hit");
|
||||
Ok(value)
|
||||
}
|
||||
None => {
|
||||
debug!("Cache miss");
|
||||
let value = callback().await?;
|
||||
self.insert(key, &value).await?;
|
||||
Ok(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub async fn get_or_insert_with(
|
||||
&self,
|
||||
key: &CacheKey<'_>,
|
||||
callback: impl AsyncFnOnce() -> Result<CacheValue>,
|
||||
) -> Result<CacheValue> {
|
||||
match self.get(key).await {
|
||||
Some(value) => {
|
||||
debug!("Cache hit");
|
||||
Ok(value)
|
||||
},
|
||||
None => {
|
||||
debug!("Cache miss");
|
||||
let value = callback().await?;
|
||||
self.insert(key, &value).await?;
|
||||
Ok(value)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)]
|
||||
struct CacheKey<'a> {
|
||||
/// The identifier of the used compiler.
|
||||
compiler_identifier: CompilerIdentifier,
|
||||
/// The identifier of the used compiler.
|
||||
compiler_identifier: CompilerIdentifier,
|
||||
|
||||
/// The version of the compiler that was used to compile the artifacts.
|
||||
compiler_version: Version,
|
||||
/// The version of the compiler that was used to compile the artifacts.
|
||||
compiler_version: Version,
|
||||
|
||||
/// The path of the metadata file that the compilation artifacts are for.
|
||||
metadata_file_path: &'a Path,
|
||||
/// The path of the metadata file that the compilation artifacts are for.
|
||||
metadata_file_path: &'a Path,
|
||||
|
||||
/// The mode that the compilation artifacts where compiled with.
|
||||
solc_mode: Cow<'a, Mode>,
|
||||
/// The mode that the compilation artifacts where compiled with.
|
||||
solc_mode: Cow<'a, Mode>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
struct CacheValue {
|
||||
/// The compiler output from the compilation run.
|
||||
compiler_output: CompilerOutput,
|
||||
/// The compiler output from the compilation run.
|
||||
compiler_output: CompilerOutput,
|
||||
}
|
||||
|
||||
impl CacheValue {
|
||||
pub fn new(compiler_output: CompilerOutput) -> Self {
|
||||
Self { compiler_output }
|
||||
}
|
||||
pub fn new(compiler_output: CompilerOutput) -> Self {
|
||||
Self { compiler_output }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,28 +6,28 @@ use tracing::{info, info_span, instrument};
|
||||
/// corpus files and produces a map containing all of the [`MetadataFile`]s discovered.
|
||||
#[instrument(level = "debug", name = "Collecting Corpora", skip_all)]
|
||||
pub fn collect_metadata_files(
|
||||
context: impl AsRef<CorpusConfiguration>,
|
||||
context: impl AsRef<CorpusConfiguration>,
|
||||
) -> anyhow::Result<Vec<MetadataFile>> {
|
||||
let mut metadata_files = Vec::new();
|
||||
let mut metadata_files = Vec::new();
|
||||
|
||||
let corpus_configuration = AsRef::<CorpusConfiguration>::as_ref(&context);
|
||||
for path in &corpus_configuration.paths {
|
||||
let span = info_span!("Processing corpus file", path = %path.display());
|
||||
let _guard = span.enter();
|
||||
let corpus_configuration = AsRef::<CorpusConfiguration>::as_ref(&context);
|
||||
for path in &corpus_configuration.paths {
|
||||
let span = info_span!("Processing corpus file", path = %path.display());
|
||||
let _guard = span.enter();
|
||||
|
||||
let corpus = Corpus::try_from_path(path)?;
|
||||
info!(
|
||||
name = corpus.name(),
|
||||
number_of_contained_paths = corpus.path_count(),
|
||||
"Deserialized corpus file"
|
||||
);
|
||||
metadata_files.extend(corpus.enumerate_tests());
|
||||
}
|
||||
let corpus = Corpus::try_from_path(path)?;
|
||||
info!(
|
||||
name = corpus.name(),
|
||||
number_of_contained_paths = corpus.path_count(),
|
||||
"Deserialized corpus file"
|
||||
);
|
||||
metadata_files.extend(corpus.enumerate_tests());
|
||||
}
|
||||
|
||||
// There's a possibility that there are certain paths that all lead to the same metadata files
|
||||
// and therefore it's important that we sort them and then deduplicate them.
|
||||
metadata_files.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
|
||||
metadata_files.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
|
||||
// There's a possibility that there are certain paths that all lead to the same metadata files
|
||||
// and therefore it's important that we sort them and then deduplicate them.
|
||||
metadata_files.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
|
||||
metadata_files.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
|
||||
|
||||
Ok(metadata_files)
|
||||
Ok(metadata_files)
|
||||
}
|
||||
|
||||
@@ -2,58 +2,53 @@
|
||||
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use crate::Platform;
|
||||
use anyhow::Context as _;
|
||||
use revive_dt_config::*;
|
||||
use revive_dt_core::Platform;
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
|
||||
/// The node pool starts one or more [Node] which then can be accessed
|
||||
/// in a round robbin fashion.
|
||||
pub struct NodePool {
|
||||
next: AtomicUsize,
|
||||
nodes: Vec<Box<dyn EthereumNode + Send + Sync>>,
|
||||
next: AtomicUsize,
|
||||
nodes: Vec<Box<dyn EthereumNode + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl NodePool {
|
||||
/// Create a new Pool. This will start as many nodes as there are workers in `config`.
|
||||
pub async fn new(context: Context, platform: &dyn Platform) -> anyhow::Result<Self> {
|
||||
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
|
||||
let nodes = concurrency_configuration.number_of_nodes;
|
||||
/// Create a new Pool. This will start as many nodes as there are workers in `config`.
|
||||
pub async fn new(context: Context, platform: &dyn Platform) -> anyhow::Result<Self> {
|
||||
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
|
||||
let nodes = concurrency_configuration.number_of_nodes;
|
||||
|
||||
let mut handles = Vec::with_capacity(nodes);
|
||||
for _ in 0..nodes {
|
||||
let context = context.clone();
|
||||
handles.push(platform.new_node(context)?);
|
||||
}
|
||||
let mut handles = Vec::with_capacity(nodes);
|
||||
for _ in 0..nodes {
|
||||
let context = context.clone();
|
||||
handles.push(platform.new_node(context)?);
|
||||
}
|
||||
|
||||
let mut nodes = Vec::with_capacity(nodes);
|
||||
for handle in handles {
|
||||
nodes.push(
|
||||
handle
|
||||
.join()
|
||||
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))
|
||||
.context("Failed to join node spawn thread")?
|
||||
.context("Node failed to spawn")?,
|
||||
);
|
||||
}
|
||||
let mut nodes = Vec::with_capacity(nodes);
|
||||
for handle in handles {
|
||||
nodes.push(
|
||||
handle
|
||||
.join()
|
||||
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))
|
||||
.context("Failed to join node spawn thread")?
|
||||
.context("Node failed to spawn")?,
|
||||
);
|
||||
}
|
||||
|
||||
let pre_transactions_tasks = nodes
|
||||
.iter_mut()
|
||||
.map(|node| node.pre_transactions())
|
||||
.collect::<Vec<_>>();
|
||||
futures::future::try_join_all(pre_transactions_tasks)
|
||||
.await
|
||||
.context("Failed to run the pre-transactions task")?;
|
||||
let pre_transactions_tasks =
|
||||
nodes.iter_mut().map(|node| node.pre_transactions()).collect::<Vec<_>>();
|
||||
futures::future::try_join_all(pre_transactions_tasks)
|
||||
.await
|
||||
.context("Failed to run the pre-transactions task")?;
|
||||
|
||||
Ok(Self {
|
||||
nodes,
|
||||
next: Default::default(),
|
||||
})
|
||||
}
|
||||
Ok(Self { nodes, next: Default::default() })
|
||||
}
|
||||
|
||||
/// Get a handle to the next node.
|
||||
pub fn round_robbin(&self) -> &dyn EthereumNode {
|
||||
let current = self.next.fetch_add(1, Ordering::SeqCst) % self.nodes.len();
|
||||
self.nodes.get(current).unwrap().as_ref()
|
||||
}
|
||||
/// Get a handle to the next node.
|
||||
pub fn round_robbin(&self) -> &dyn EthereumNode {
|
||||
let current = self.next.fetch_add(1, Ordering::SeqCst) % self.nodes.len();
|
||||
self.nodes.get(current).unwrap().as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
+252
-271
@@ -1,178 +1,163 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::Arc;
|
||||
use std::{borrow::Cow, path::Path};
|
||||
use std::{borrow::Cow, collections::BTreeMap, path::Path, sync::Arc};
|
||||
|
||||
use futures::{Stream, StreamExt, stream};
|
||||
use indexmap::{IndexMap, indexmap};
|
||||
use revive_dt_common::iterators::EitherIter;
|
||||
use revive_dt_common::types::PlatformIdentifier;
|
||||
use revive_dt_common::{iterators::EitherIter, types::PlatformIdentifier};
|
||||
use revive_dt_config::Context;
|
||||
use revive_dt_format::mode::ParsedMode;
|
||||
use serde_json::{Value, json};
|
||||
|
||||
use revive_dt_compiler::Mode;
|
||||
use revive_dt_compiler::SolidityCompiler;
|
||||
use revive_dt_compiler::{Mode, SolidityCompiler};
|
||||
use revive_dt_format::{
|
||||
case::{Case, CaseIdx},
|
||||
metadata::MetadataFile,
|
||||
case::{Case, CaseIdx},
|
||||
metadata::MetadataFile,
|
||||
};
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
use revive_dt_report::{ExecutionSpecificReporter, Reporter};
|
||||
use revive_dt_report::{TestSpecificReporter, TestSpecifier};
|
||||
use revive_dt_report::{ExecutionSpecificReporter, Reporter, TestSpecificReporter, TestSpecifier};
|
||||
use tracing::{debug, error, info};
|
||||
|
||||
use crate::Platform;
|
||||
use crate::helpers::NodePool;
|
||||
use crate::{Platform, helpers::NodePool};
|
||||
|
||||
pub async fn create_test_definitions_stream<'a>(
|
||||
// This is only required for creating the compiler objects and is not used anywhere else in the
|
||||
// function.
|
||||
context: &Context,
|
||||
metadata_files: impl IntoIterator<Item = &'a MetadataFile>,
|
||||
platforms_and_nodes: &'a BTreeMap<PlatformIdentifier, (&dyn Platform, NodePool)>,
|
||||
reporter: Reporter,
|
||||
// This is only required for creating the compiler objects and is not used anywhere else in the
|
||||
// function.
|
||||
context: &Context,
|
||||
metadata_files: impl IntoIterator<Item = &'a MetadataFile>,
|
||||
platforms_and_nodes: &'a BTreeMap<PlatformIdentifier, (&dyn Platform, NodePool)>,
|
||||
reporter: Reporter,
|
||||
) -> impl Stream<Item = TestDefinition<'a>> {
|
||||
stream::iter(
|
||||
metadata_files
|
||||
.into_iter()
|
||||
// Flatten over the cases.
|
||||
.flat_map(|metadata_file| {
|
||||
metadata_file
|
||||
.cases
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(move |(case_idx, case)| (metadata_file, case_idx, case))
|
||||
})
|
||||
// Flatten over the modes, prefer the case modes over the metadata file modes.
|
||||
.flat_map(move |(metadata_file, case_idx, case)| {
|
||||
let reporter = reporter.clone();
|
||||
stream::iter(
|
||||
metadata_files
|
||||
.into_iter()
|
||||
// Flatten over the cases.
|
||||
.flat_map(|metadata_file| {
|
||||
metadata_file
|
||||
.cases
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(move |(case_idx, case)| (metadata_file, case_idx, case))
|
||||
})
|
||||
// Flatten over the modes, prefer the case modes over the metadata file modes.
|
||||
.flat_map(move |(metadata_file, case_idx, case)| {
|
||||
let reporter = reporter.clone();
|
||||
|
||||
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
|
||||
let modes = match modes {
|
||||
Some(modes) => EitherIter::A(
|
||||
ParsedMode::many_to_modes(modes.iter()).map(Cow::<'static, _>::Owned),
|
||||
),
|
||||
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
|
||||
};
|
||||
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
|
||||
let modes = match modes {
|
||||
Some(modes) => EitherIter::A(
|
||||
ParsedMode::many_to_modes(modes.iter()).map(Cow::<'static, _>::Owned),
|
||||
),
|
||||
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
|
||||
};
|
||||
|
||||
modes.into_iter().map(move |mode| {
|
||||
(
|
||||
metadata_file,
|
||||
case_idx,
|
||||
case,
|
||||
mode.clone(),
|
||||
reporter.test_specific_reporter(Arc::new(TestSpecifier {
|
||||
solc_mode: mode.as_ref().clone(),
|
||||
metadata_file_path: metadata_file.metadata_file_path.clone(),
|
||||
case_idx: CaseIdx::new(case_idx),
|
||||
})),
|
||||
)
|
||||
})
|
||||
})
|
||||
// Inform the reporter of each one of the test cases that were discovered which we expect to
|
||||
// run.
|
||||
.inspect(|(_, _, _, _, reporter)| {
|
||||
reporter
|
||||
.report_test_case_discovery_event()
|
||||
.expect("Can't fail");
|
||||
}),
|
||||
)
|
||||
// Creating the Test Definition objects from all of the various objects we have and creating
|
||||
// their required dependencies (e.g., compiler).
|
||||
.filter_map(
|
||||
move |(metadata_file, case_idx, case, mode, reporter)| async move {
|
||||
let mut platforms = BTreeMap::new();
|
||||
for (platform, node_pool) in platforms_and_nodes.values() {
|
||||
let node = node_pool.round_robbin();
|
||||
let compiler = platform
|
||||
.new_compiler(context.clone(), mode.version.clone().map(Into::into))
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
platform_identifier = %platform.platform_identifier(),
|
||||
"Failed to instantiate the compiler"
|
||||
)
|
||||
})
|
||||
.ok()?;
|
||||
modes.into_iter().map(move |mode| {
|
||||
(
|
||||
metadata_file,
|
||||
case_idx,
|
||||
case,
|
||||
mode.clone(),
|
||||
reporter.test_specific_reporter(Arc::new(TestSpecifier {
|
||||
solc_mode: mode.as_ref().clone(),
|
||||
metadata_file_path: metadata_file.metadata_file_path.clone(),
|
||||
case_idx: CaseIdx::new(case_idx),
|
||||
})),
|
||||
)
|
||||
})
|
||||
})
|
||||
// Inform the reporter of each one of the test cases that were discovered which we
|
||||
// expect to run.
|
||||
.inspect(|(_, _, _, _, reporter)| {
|
||||
reporter.report_test_case_discovery_event().expect("Can't fail");
|
||||
}),
|
||||
)
|
||||
// Creating the Test Definition objects from all of the various objects we have and creating
|
||||
// their required dependencies (e.g., compiler).
|
||||
.filter_map(move |(metadata_file, case_idx, case, mode, reporter)| async move {
|
||||
let mut platforms = BTreeMap::new();
|
||||
for (platform, node_pool) in platforms_and_nodes.values() {
|
||||
let node = node_pool.round_robbin();
|
||||
let compiler = platform
|
||||
.new_compiler(context.clone(), mode.version.clone().map(Into::into))
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
platform_identifier = %platform.platform_identifier(),
|
||||
"Failed to instantiate the compiler"
|
||||
)
|
||||
})
|
||||
.ok()?;
|
||||
|
||||
reporter
|
||||
.report_node_assigned_event(
|
||||
node.id(),
|
||||
platform.platform_identifier(),
|
||||
node.connection_string(),
|
||||
)
|
||||
.expect("Can't fail");
|
||||
reporter
|
||||
.report_node_assigned_event(
|
||||
node.id(),
|
||||
platform.platform_identifier(),
|
||||
node.connection_string(),
|
||||
)
|
||||
.expect("Can't fail");
|
||||
|
||||
let reporter =
|
||||
reporter.execution_specific_reporter(node.id(), platform.platform_identifier());
|
||||
let reporter =
|
||||
reporter.execution_specific_reporter(node.id(), platform.platform_identifier());
|
||||
|
||||
platforms.insert(
|
||||
platform.platform_identifier(),
|
||||
TestPlatformInformation {
|
||||
platform: *platform,
|
||||
node,
|
||||
compiler,
|
||||
reporter,
|
||||
},
|
||||
);
|
||||
}
|
||||
platforms.insert(
|
||||
platform.platform_identifier(),
|
||||
TestPlatformInformation { platform: *platform, node, compiler, reporter },
|
||||
);
|
||||
}
|
||||
|
||||
Some(TestDefinition {
|
||||
/* Metadata file information */
|
||||
metadata: metadata_file,
|
||||
metadata_file_path: metadata_file.metadata_file_path.as_path(),
|
||||
Some(TestDefinition {
|
||||
/* Metadata file information */
|
||||
metadata: metadata_file,
|
||||
metadata_file_path: metadata_file.metadata_file_path.as_path(),
|
||||
|
||||
/* Mode Information */
|
||||
mode: mode.clone(),
|
||||
/* Mode Information */
|
||||
mode: mode.clone(),
|
||||
|
||||
/* Case Information */
|
||||
case_idx: CaseIdx::new(case_idx),
|
||||
case,
|
||||
/* Case Information */
|
||||
case_idx: CaseIdx::new(case_idx),
|
||||
case,
|
||||
|
||||
/* Platform and Node Assignment Information */
|
||||
platforms,
|
||||
/* Platform and Node Assignment Information */
|
||||
platforms,
|
||||
|
||||
/* Reporter */
|
||||
reporter,
|
||||
})
|
||||
},
|
||||
)
|
||||
// Filter out the test cases which are incompatible or that can't run in the current setup.
|
||||
.filter_map(move |test| async move {
|
||||
match test.check_compatibility() {
|
||||
Ok(()) => Some(test),
|
||||
Err((reason, additional_information)) => {
|
||||
debug!(
|
||||
metadata_file_path = %test.metadata.metadata_file_path.display(),
|
||||
case_idx = %test.case_idx,
|
||||
mode = %test.mode,
|
||||
reason,
|
||||
additional_information =
|
||||
serde_json::to_string(&additional_information).unwrap(),
|
||||
"Ignoring Test Case"
|
||||
);
|
||||
test.reporter
|
||||
.report_test_ignored_event(
|
||||
reason.to_string(),
|
||||
additional_information
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.into(), v))
|
||||
.collect::<IndexMap<_, _>>(),
|
||||
)
|
||||
.expect("Can't fail");
|
||||
None
|
||||
}
|
||||
}
|
||||
})
|
||||
.inspect(|test| {
|
||||
info!(
|
||||
metadata_file_path = %test.metadata_file_path.display(),
|
||||
case_idx = %test.case_idx,
|
||||
mode = %test.mode,
|
||||
"Created a test case definition"
|
||||
);
|
||||
})
|
||||
/* Reporter */
|
||||
reporter,
|
||||
})
|
||||
})
|
||||
// Filter out the test cases which are incompatible or that can't run in the current setup.
|
||||
.filter_map(move |test| async move {
|
||||
match test.check_compatibility() {
|
||||
Ok(()) => Some(test),
|
||||
Err((reason, additional_information)) => {
|
||||
debug!(
|
||||
metadata_file_path = %test.metadata.metadata_file_path.display(),
|
||||
case_idx = %test.case_idx,
|
||||
mode = %test.mode,
|
||||
reason,
|
||||
additional_information =
|
||||
serde_json::to_string(&additional_information).unwrap(),
|
||||
"Ignoring Test Case"
|
||||
);
|
||||
test.reporter
|
||||
.report_test_ignored_event(
|
||||
reason.to_string(),
|
||||
additional_information
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.into(), v))
|
||||
.collect::<IndexMap<_, _>>(),
|
||||
)
|
||||
.expect("Can't fail");
|
||||
None
|
||||
},
|
||||
}
|
||||
})
|
||||
.inspect(|test| {
|
||||
info!(
|
||||
metadata_file_path = %test.metadata_file_path.display(),
|
||||
case_idx = %test.case_idx,
|
||||
mode = %test.mode,
|
||||
"Created a test case definition"
|
||||
);
|
||||
})
|
||||
}
|
||||
|
||||
/// This is a full description of a differential test to run alongside the full metadata file, the
|
||||
@@ -180,146 +165,142 @@ pub async fn create_test_definitions_stream<'a>(
|
||||
/// these platforms that they should run on, the compilers to use, and everything else needed making
|
||||
/// it a complete description.
|
||||
pub struct TestDefinition<'a> {
|
||||
/* Metadata file information */
|
||||
pub metadata: &'a MetadataFile,
|
||||
pub metadata_file_path: &'a Path,
|
||||
/* Metadata file information */
|
||||
pub metadata: &'a MetadataFile,
|
||||
pub metadata_file_path: &'a Path,
|
||||
|
||||
/* Mode Information */
|
||||
pub mode: Cow<'a, Mode>,
|
||||
/* Mode Information */
|
||||
pub mode: Cow<'a, Mode>,
|
||||
|
||||
/* Case Information */
|
||||
pub case_idx: CaseIdx,
|
||||
pub case: &'a Case,
|
||||
/* Case Information */
|
||||
pub case_idx: CaseIdx,
|
||||
pub case: &'a Case,
|
||||
|
||||
/* Platform and Node Assignment Information */
|
||||
pub platforms: BTreeMap<PlatformIdentifier, TestPlatformInformation<'a>>,
|
||||
/* Platform and Node Assignment Information */
|
||||
pub platforms: BTreeMap<PlatformIdentifier, TestPlatformInformation<'a>>,
|
||||
|
||||
/* Reporter */
|
||||
pub reporter: TestSpecificReporter,
|
||||
/* Reporter */
|
||||
pub reporter: TestSpecificReporter,
|
||||
}
|
||||
|
||||
impl<'a> TestDefinition<'a> {
|
||||
/// Checks if this test can be ran with the current configuration.
|
||||
pub fn check_compatibility(&self) -> TestCheckFunctionResult {
|
||||
self.check_metadata_file_ignored()?;
|
||||
self.check_case_file_ignored()?;
|
||||
self.check_target_compatibility()?;
|
||||
self.check_evm_version_compatibility()?;
|
||||
self.check_compiler_compatibility()?;
|
||||
Ok(())
|
||||
}
|
||||
/// Checks if this test can be ran with the current configuration.
|
||||
pub fn check_compatibility(&self) -> TestCheckFunctionResult {
|
||||
self.check_metadata_file_ignored()?;
|
||||
self.check_case_file_ignored()?;
|
||||
self.check_target_compatibility()?;
|
||||
self.check_evm_version_compatibility()?;
|
||||
self.check_compiler_compatibility()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks if the metadata file is ignored or not.
|
||||
fn check_metadata_file_ignored(&self) -> TestCheckFunctionResult {
|
||||
if self.metadata.ignore.is_some_and(|ignore| ignore) {
|
||||
Err(("Metadata file is ignored.", indexmap! {}))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
/// Checks if the metadata file is ignored or not.
|
||||
fn check_metadata_file_ignored(&self) -> TestCheckFunctionResult {
|
||||
if self.metadata.ignore.is_some_and(|ignore| ignore) {
|
||||
Err(("Metadata file is ignored.", indexmap! {}))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the case file is ignored or not.
|
||||
fn check_case_file_ignored(&self) -> TestCheckFunctionResult {
|
||||
if self.case.ignore.is_some_and(|ignore| ignore) {
|
||||
Err(("Case is ignored.", indexmap! {}))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
/// Checks if the case file is ignored or not.
|
||||
fn check_case_file_ignored(&self) -> TestCheckFunctionResult {
|
||||
if self.case.ignore.is_some_and(|ignore| ignore) {
|
||||
Err(("Case is ignored.", indexmap! {}))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the platforms all support the desired targets in the metadata file.
|
||||
fn check_target_compatibility(&self) -> TestCheckFunctionResult {
|
||||
let mut error_map = indexmap! {
|
||||
"test_desired_targets" => json!(self.metadata.targets.as_ref()),
|
||||
};
|
||||
let mut is_allowed = true;
|
||||
for (_, platform_information) in self.platforms.iter() {
|
||||
let is_allowed_for_platform = match self.metadata.targets.as_ref() {
|
||||
None => true,
|
||||
Some(required_vm_identifiers) => {
|
||||
required_vm_identifiers.contains(&platform_information.platform.vm_identifier())
|
||||
}
|
||||
};
|
||||
is_allowed &= is_allowed_for_platform;
|
||||
error_map.insert(
|
||||
platform_information.platform.platform_identifier().into(),
|
||||
json!(is_allowed_for_platform),
|
||||
);
|
||||
}
|
||||
/// Checks if the platforms all support the desired targets in the metadata file.
|
||||
fn check_target_compatibility(&self) -> TestCheckFunctionResult {
|
||||
let mut error_map = indexmap! {
|
||||
"test_desired_targets" => json!(self.metadata.targets.as_ref()),
|
||||
};
|
||||
let mut is_allowed = true;
|
||||
for (_, platform_information) in self.platforms.iter() {
|
||||
let is_allowed_for_platform = match self.metadata.targets.as_ref() {
|
||||
None => true,
|
||||
Some(required_vm_identifiers) =>
|
||||
required_vm_identifiers.contains(&platform_information.platform.vm_identifier()),
|
||||
};
|
||||
is_allowed &= is_allowed_for_platform;
|
||||
error_map.insert(
|
||||
platform_information.platform.platform_identifier().into(),
|
||||
json!(is_allowed_for_platform),
|
||||
);
|
||||
}
|
||||
|
||||
if is_allowed {
|
||||
Ok(())
|
||||
} else {
|
||||
Err((
|
||||
"One of the platforms do do not support the targets allowed by the test.",
|
||||
error_map,
|
||||
))
|
||||
}
|
||||
}
|
||||
if is_allowed {
|
||||
Ok(())
|
||||
} else {
|
||||
Err((
|
||||
"One of the platforms do do not support the targets allowed by the test.",
|
||||
error_map,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// Checks for the compatibility of the EVM version with the platforms specified.
|
||||
fn check_evm_version_compatibility(&self) -> TestCheckFunctionResult {
|
||||
let Some(evm_version_requirement) = self.metadata.required_evm_version else {
|
||||
return Ok(());
|
||||
};
|
||||
// Checks for the compatibility of the EVM version with the platforms specified.
|
||||
fn check_evm_version_compatibility(&self) -> TestCheckFunctionResult {
|
||||
let Some(evm_version_requirement) = self.metadata.required_evm_version else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let mut error_map = indexmap! {
|
||||
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
||||
};
|
||||
let mut is_allowed = true;
|
||||
for (_, platform_information) in self.platforms.iter() {
|
||||
let is_allowed_for_platform =
|
||||
evm_version_requirement.matches(&platform_information.node.evm_version());
|
||||
is_allowed &= is_allowed_for_platform;
|
||||
error_map.insert(
|
||||
platform_information.platform.platform_identifier().into(),
|
||||
json!(is_allowed_for_platform),
|
||||
);
|
||||
}
|
||||
let mut error_map = indexmap! {
|
||||
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
||||
};
|
||||
let mut is_allowed = true;
|
||||
for (_, platform_information) in self.platforms.iter() {
|
||||
let is_allowed_for_platform =
|
||||
evm_version_requirement.matches(&platform_information.node.evm_version());
|
||||
is_allowed &= is_allowed_for_platform;
|
||||
error_map.insert(
|
||||
platform_information.platform.platform_identifier().into(),
|
||||
json!(is_allowed_for_platform),
|
||||
);
|
||||
}
|
||||
|
||||
if is_allowed {
|
||||
Ok(())
|
||||
} else {
|
||||
Err((
|
||||
"EVM version is incompatible for the platforms specified",
|
||||
error_map,
|
||||
))
|
||||
}
|
||||
}
|
||||
if is_allowed {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(("EVM version is incompatible for the platforms specified", error_map))
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the platforms compilers support the mode that the test is for.
|
||||
fn check_compiler_compatibility(&self) -> TestCheckFunctionResult {
|
||||
let mut error_map = indexmap! {
|
||||
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
||||
};
|
||||
let mut is_allowed = true;
|
||||
for (_, platform_information) in self.platforms.iter() {
|
||||
let is_allowed_for_platform = platform_information
|
||||
.compiler
|
||||
.supports_mode(self.mode.optimize_setting, self.mode.pipeline);
|
||||
is_allowed &= is_allowed_for_platform;
|
||||
error_map.insert(
|
||||
platform_information.platform.platform_identifier().into(),
|
||||
json!(is_allowed_for_platform),
|
||||
);
|
||||
}
|
||||
/// Checks if the platforms compilers support the mode that the test is for.
|
||||
fn check_compiler_compatibility(&self) -> TestCheckFunctionResult {
|
||||
let mut error_map = indexmap! {
|
||||
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
||||
};
|
||||
let mut is_allowed = true;
|
||||
for (_, platform_information) in self.platforms.iter() {
|
||||
let is_allowed_for_platform = platform_information
|
||||
.compiler
|
||||
.supports_mode(self.mode.optimize_setting, self.mode.pipeline);
|
||||
is_allowed &= is_allowed_for_platform;
|
||||
error_map.insert(
|
||||
platform_information.platform.platform_identifier().into(),
|
||||
json!(is_allowed_for_platform),
|
||||
);
|
||||
}
|
||||
|
||||
if is_allowed {
|
||||
Ok(())
|
||||
} else {
|
||||
Err((
|
||||
"Compilers do not support this mode either for the provided platforms.",
|
||||
error_map,
|
||||
))
|
||||
}
|
||||
}
|
||||
if is_allowed {
|
||||
Ok(())
|
||||
} else {
|
||||
Err((
|
||||
"Compilers do not support this mode either for the provided platforms.",
|
||||
error_map,
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TestPlatformInformation<'a> {
|
||||
pub platform: &'a dyn Platform,
|
||||
pub node: &'a dyn EthereumNode,
|
||||
pub compiler: Box<dyn SolidityCompiler>,
|
||||
pub reporter: ExecutionSpecificReporter,
|
||||
pub platform: &'a dyn Platform,
|
||||
pub node: &'a dyn EthereumNode,
|
||||
pub compiler: Box<dyn SolidityCompiler>,
|
||||
pub reporter: ExecutionSpecificReporter,
|
||||
}
|
||||
|
||||
type TestCheckFunctionResult = Result<(), (&'static str, IndexMap<&'static str, Value>)>;
|
||||
|
||||
+377
-403
@@ -3,9 +3,12 @@
|
||||
//! This crate defines the testing configuration and
|
||||
//! provides a helper utility to execute tests.
|
||||
|
||||
pub mod differential_tests;
|
||||
pub mod helpers;
|
||||
|
||||
use std::{
|
||||
pin::Pin,
|
||||
thread::{self, JoinHandle},
|
||||
pin::Pin,
|
||||
thread::{self, JoinHandle},
|
||||
};
|
||||
|
||||
use alloy::genesis::Genesis;
|
||||
@@ -14,516 +17,487 @@ use revive_dt_common::types::*;
|
||||
use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc};
|
||||
use revive_dt_config::*;
|
||||
use revive_dt_node::{
|
||||
Node, node_implementations::geth::GethNode,
|
||||
node_implementations::lighthouse_geth::LighthouseGethNode,
|
||||
node_implementations::substrate::SubstrateNode, node_implementations::zombienet::ZombieNode,
|
||||
Node,
|
||||
node_implementations::{
|
||||
geth::GethNode, lighthouse_geth::LighthouseGethNode, substrate::SubstrateNode,
|
||||
zombienet::ZombieNode,
|
||||
},
|
||||
};
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
use tracing::info;
|
||||
|
||||
pub use helpers::CachedCompiler;
|
||||
|
||||
/// A trait that describes the interface for the platforms that are supported by the tool.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub trait Platform {
|
||||
/// Returns the identifier of this platform. This is a combination of the node and the compiler
|
||||
/// used.
|
||||
fn platform_identifier(&self) -> PlatformIdentifier;
|
||||
/// Returns the identifier of this platform. This is a combination of the node and the compiler
|
||||
/// used.
|
||||
fn platform_identifier(&self) -> PlatformIdentifier;
|
||||
|
||||
/// Returns a full identifier for the platform.
|
||||
fn full_identifier(&self) -> (NodeIdentifier, VmIdentifier, CompilerIdentifier) {
|
||||
(
|
||||
self.node_identifier(),
|
||||
self.vm_identifier(),
|
||||
self.compiler_identifier(),
|
||||
)
|
||||
}
|
||||
/// Returns a full identifier for the platform.
|
||||
fn full_identifier(&self) -> (NodeIdentifier, VmIdentifier, CompilerIdentifier) {
|
||||
(self.node_identifier(), self.vm_identifier(), self.compiler_identifier())
|
||||
}
|
||||
|
||||
/// Returns the identifier of the node used.
|
||||
fn node_identifier(&self) -> NodeIdentifier;
|
||||
/// Returns the identifier of the node used.
|
||||
fn node_identifier(&self) -> NodeIdentifier;
|
||||
|
||||
/// Returns the identifier of the vm used.
|
||||
fn vm_identifier(&self) -> VmIdentifier;
|
||||
/// Returns the identifier of the vm used.
|
||||
fn vm_identifier(&self) -> VmIdentifier;
|
||||
|
||||
/// Returns the identifier of the compiler used.
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier;
|
||||
/// Returns the identifier of the compiler used.
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier;
|
||||
|
||||
/// Creates a new node for the platform by spawning a new thread, creating the node object,
|
||||
/// initializing it, spawning it, and waiting for it to start up.
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>>;
|
||||
/// Creates a new node for the platform by spawning a new thread, creating the node object,
|
||||
/// initializing it, spawning it, and waiting for it to start up.
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>>;
|
||||
|
||||
/// Creates a new compiler for the provided platform
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>>;
|
||||
/// Creates a new compiler for the provided platform
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct GethEvmSolcPlatform;
|
||||
|
||||
impl Platform for GethEvmSolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::GethEvmSolc
|
||||
}
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::GethEvmSolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::Geth
|
||||
}
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::Geth
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = GethNode::new(context);
|
||||
let node = spawn_node::<GethNode>(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = GethNode::new(context);
|
||||
let node = spawn_node::<GethNode>(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct LighthouseGethEvmSolcPlatform;
|
||||
|
||||
impl Platform for LighthouseGethEvmSolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::LighthouseGethEvmSolc
|
||||
}
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::LighthouseGethEvmSolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::LighthouseGeth
|
||||
}
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::LighthouseGeth
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = LighthouseGethNode::new(context);
|
||||
let node = spawn_node::<LighthouseGethNode>(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = LighthouseGethNode::new(context);
|
||||
let node = spawn_node::<LighthouseGethNode>(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct KitchensinkPolkavmResolcPlatform;
|
||||
|
||||
impl Platform for KitchensinkPolkavmResolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::KitchensinkPolkavmResolc
|
||||
}
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::KitchensinkPolkavmResolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::Kitchensink
|
||||
}
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::Kitchensink
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::PolkaVM
|
||||
}
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::PolkaVM
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Resolc
|
||||
}
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Resolc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context)
|
||||
.path
|
||||
.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = SubstrateNode::new(
|
||||
kitchensink_path,
|
||||
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
|
||||
context,
|
||||
);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context).path.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = SubstrateNode::new(
|
||||
kitchensink_path,
|
||||
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
|
||||
context,
|
||||
);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Resolc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Resolc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct KitchensinkRevmSolcPlatform;
|
||||
|
||||
impl Platform for KitchensinkRevmSolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::KitchensinkRevmSolc
|
||||
}
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::KitchensinkRevmSolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::Kitchensink
|
||||
}
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::Kitchensink
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context)
|
||||
.path
|
||||
.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = SubstrateNode::new(
|
||||
kitchensink_path,
|
||||
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
|
||||
context,
|
||||
);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context).path.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = SubstrateNode::new(
|
||||
kitchensink_path,
|
||||
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
|
||||
context,
|
||||
);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct ReviveDevNodePolkavmResolcPlatform;
|
||||
|
||||
impl Platform for ReviveDevNodePolkavmResolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::ReviveDevNodePolkavmResolc
|
||||
}
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::ReviveDevNodePolkavmResolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::ReviveDevNode
|
||||
}
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::ReviveDevNode
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::PolkaVM
|
||||
}
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::PolkaVM
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Resolc
|
||||
}
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Resolc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
|
||||
.path
|
||||
.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = SubstrateNode::new(
|
||||
revive_dev_node_path,
|
||||
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
||||
context,
|
||||
);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let revive_dev_node_path =
|
||||
AsRef::<ReviveDevNodeConfiguration>::as_ref(&context).path.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = SubstrateNode::new(
|
||||
revive_dev_node_path,
|
||||
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
||||
context,
|
||||
);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Resolc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Resolc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct ReviveDevNodeRevmSolcPlatform;
|
||||
|
||||
impl Platform for ReviveDevNodeRevmSolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::ReviveDevNodeRevmSolc
|
||||
}
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::ReviveDevNodeRevmSolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::ReviveDevNode
|
||||
}
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::ReviveDevNode
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
|
||||
.path
|
||||
.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = SubstrateNode::new(
|
||||
revive_dev_node_path,
|
||||
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
||||
context,
|
||||
);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let revive_dev_node_path =
|
||||
AsRef::<ReviveDevNodeConfiguration>::as_ref(&context).path.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = SubstrateNode::new(
|
||||
revive_dev_node_path,
|
||||
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
||||
context,
|
||||
);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct ZombienetPolkavmResolcPlatform;
|
||||
|
||||
impl Platform for ZombienetPolkavmResolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::ZombienetPolkavmResolc
|
||||
}
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::ZombienetPolkavmResolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::Zombienet
|
||||
}
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::Zombienet
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::PolkaVM
|
||||
}
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::PolkaVM
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Resolc
|
||||
}
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Resolc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
||||
.path
|
||||
.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = ZombieNode::new(polkadot_parachain_path, context);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let polkadot_parachain_path =
|
||||
AsRef::<PolkadotParachainConfiguration>::as_ref(&context).path.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = ZombieNode::new(polkadot_parachain_path, context);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct ZombienetRevmSolcPlatform;
|
||||
|
||||
impl Platform for ZombienetRevmSolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::ZombienetRevmSolc
|
||||
}
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::ZombienetRevmSolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::Zombienet
|
||||
}
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::Zombienet
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
||||
.path
|
||||
.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = ZombieNode::new(polkadot_parachain_path, context);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let polkadot_parachain_path =
|
||||
AsRef::<PolkadotParachainConfiguration>::as_ref(&context).path.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = ZombieNode::new(polkadot_parachain_path, context);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PlatformIdentifier> for Box<dyn Platform> {
|
||||
fn from(value: PlatformIdentifier) -> Self {
|
||||
match value {
|
||||
PlatformIdentifier::GethEvmSolc => Box::new(GethEvmSolcPlatform) as Box<_>,
|
||||
PlatformIdentifier::LighthouseGethEvmSolc => {
|
||||
Box::new(LighthouseGethEvmSolcPlatform) as Box<_>
|
||||
}
|
||||
PlatformIdentifier::KitchensinkPolkavmResolc => {
|
||||
Box::new(KitchensinkPolkavmResolcPlatform) as Box<_>
|
||||
}
|
||||
PlatformIdentifier::KitchensinkRevmSolc => {
|
||||
Box::new(KitchensinkRevmSolcPlatform) as Box<_>
|
||||
}
|
||||
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
||||
Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_>
|
||||
}
|
||||
PlatformIdentifier::ReviveDevNodeRevmSolc => {
|
||||
Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_>
|
||||
}
|
||||
PlatformIdentifier::ZombienetPolkavmResolc => {
|
||||
Box::new(ZombienetPolkavmResolcPlatform) as Box<_>
|
||||
}
|
||||
PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>,
|
||||
}
|
||||
}
|
||||
fn from(value: PlatformIdentifier) -> Self {
|
||||
match value {
|
||||
PlatformIdentifier::GethEvmSolc => Box::new(GethEvmSolcPlatform) as Box<_>,
|
||||
PlatformIdentifier::LighthouseGethEvmSolc =>
|
||||
Box::new(LighthouseGethEvmSolcPlatform) as Box<_>,
|
||||
PlatformIdentifier::KitchensinkPolkavmResolc =>
|
||||
Box::new(KitchensinkPolkavmResolcPlatform) as Box<_>,
|
||||
PlatformIdentifier::KitchensinkRevmSolc =>
|
||||
Box::new(KitchensinkRevmSolcPlatform) as Box<_>,
|
||||
PlatformIdentifier::ReviveDevNodePolkavmResolc =>
|
||||
Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_>,
|
||||
PlatformIdentifier::ReviveDevNodeRevmSolc =>
|
||||
Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_>,
|
||||
PlatformIdentifier::ZombienetPolkavmResolc =>
|
||||
Box::new(ZombienetPolkavmResolcPlatform) as Box<_>,
|
||||
PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PlatformIdentifier> for &dyn Platform {
|
||||
fn from(value: PlatformIdentifier) -> Self {
|
||||
match value {
|
||||
PlatformIdentifier::GethEvmSolc => &GethEvmSolcPlatform as &dyn Platform,
|
||||
PlatformIdentifier::LighthouseGethEvmSolc => {
|
||||
&LighthouseGethEvmSolcPlatform as &dyn Platform
|
||||
}
|
||||
PlatformIdentifier::KitchensinkPolkavmResolc => {
|
||||
&KitchensinkPolkavmResolcPlatform as &dyn Platform
|
||||
}
|
||||
PlatformIdentifier::KitchensinkRevmSolc => {
|
||||
&KitchensinkRevmSolcPlatform as &dyn Platform
|
||||
}
|
||||
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
||||
&ReviveDevNodePolkavmResolcPlatform as &dyn Platform
|
||||
}
|
||||
PlatformIdentifier::ReviveDevNodeRevmSolc => {
|
||||
&ReviveDevNodeRevmSolcPlatform as &dyn Platform
|
||||
}
|
||||
PlatformIdentifier::ZombienetPolkavmResolc => {
|
||||
&ZombienetPolkavmResolcPlatform as &dyn Platform
|
||||
}
|
||||
PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform,
|
||||
}
|
||||
}
|
||||
fn from(value: PlatformIdentifier) -> Self {
|
||||
match value {
|
||||
PlatformIdentifier::GethEvmSolc => &GethEvmSolcPlatform as &dyn Platform,
|
||||
PlatformIdentifier::LighthouseGethEvmSolc =>
|
||||
&LighthouseGethEvmSolcPlatform as &dyn Platform,
|
||||
PlatformIdentifier::KitchensinkPolkavmResolc =>
|
||||
&KitchensinkPolkavmResolcPlatform as &dyn Platform,
|
||||
PlatformIdentifier::KitchensinkRevmSolc =>
|
||||
&KitchensinkRevmSolcPlatform as &dyn Platform,
|
||||
PlatformIdentifier::ReviveDevNodePolkavmResolc =>
|
||||
&ReviveDevNodePolkavmResolcPlatform as &dyn Platform,
|
||||
PlatformIdentifier::ReviveDevNodeRevmSolc =>
|
||||
&ReviveDevNodeRevmSolcPlatform as &dyn Platform,
|
||||
PlatformIdentifier::ZombienetPolkavmResolc =>
|
||||
&ZombienetPolkavmResolcPlatform as &dyn Platform,
|
||||
PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn spawn_node<T: Node + EthereumNode + Send + Sync>(
|
||||
mut node: T,
|
||||
genesis: Genesis,
|
||||
mut node: T,
|
||||
genesis: Genesis,
|
||||
) -> anyhow::Result<T> {
|
||||
info!(
|
||||
id = node.id(),
|
||||
connection_string = node.connection_string(),
|
||||
"Spawning node"
|
||||
);
|
||||
node.spawn(genesis)
|
||||
.context("Failed to spawn node process")?;
|
||||
info!(
|
||||
id = node.id(),
|
||||
connection_string = node.connection_string(),
|
||||
"Spawned node"
|
||||
);
|
||||
Ok(node)
|
||||
info!(id = node.id(), connection_string = node.connection_string(), "Spawning node");
|
||||
node.spawn(genesis).context("Failed to spawn node process")?;
|
||||
info!(id = node.id(), connection_string = node.connection_string(), "Spawned node");
|
||||
Ok(node)
|
||||
}
|
||||
|
||||
+55
-55
@@ -13,69 +13,69 @@ use revive_dt_core::Platform;
|
||||
use revive_dt_format::metadata::Metadata;
|
||||
|
||||
use crate::{
|
||||
differential_benchmarks::handle_differential_benchmarks,
|
||||
differential_tests::handle_differential_tests,
|
||||
differential_benchmarks::handle_differential_benchmarks,
|
||||
differential_tests::handle_differential_tests,
|
||||
};
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let (writer, _guard) = tracing_appender::non_blocking::NonBlockingBuilder::default()
|
||||
.lossy(false)
|
||||
// Assuming that each line contains 255 characters and that each character is one byte, then
|
||||
// this means that our buffer is about 4GBs large.
|
||||
.buffered_lines_limit(0x1000000)
|
||||
.thread_name("buffered writer")
|
||||
.finish(std::io::stdout());
|
||||
let (writer, _guard) = tracing_appender::non_blocking::NonBlockingBuilder::default()
|
||||
.lossy(false)
|
||||
// Assuming that each line contains 255 characters and that each character is one byte, then
|
||||
// this means that our buffer is about 4GBs large.
|
||||
.buffered_lines_limit(0x1000000)
|
||||
.thread_name("buffered writer")
|
||||
.finish(std::io::stdout());
|
||||
|
||||
let subscriber = FmtSubscriber::builder()
|
||||
.with_writer(writer)
|
||||
.with_thread_ids(false)
|
||||
.with_thread_names(false)
|
||||
.with_env_filter(EnvFilter::from_default_env())
|
||||
.with_ansi(false)
|
||||
.pretty()
|
||||
.finish();
|
||||
tracing::subscriber::set_global_default(subscriber)?;
|
||||
info!("Differential testing tool is starting");
|
||||
let subscriber = FmtSubscriber::builder()
|
||||
.with_writer(writer)
|
||||
.with_thread_ids(false)
|
||||
.with_thread_names(false)
|
||||
.with_env_filter(EnvFilter::from_default_env())
|
||||
.with_ansi(false)
|
||||
.pretty()
|
||||
.finish();
|
||||
tracing::subscriber::set_global_default(subscriber)?;
|
||||
info!("Differential testing tool is starting");
|
||||
|
||||
let context = Context::try_parse()?;
|
||||
let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task();
|
||||
let context = Context::try_parse()?;
|
||||
let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task();
|
||||
|
||||
match context {
|
||||
Context::Test(context) => tokio::runtime::Builder::new_multi_thread()
|
||||
.worker_threads(context.concurrency_configuration.number_of_threads)
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Failed building the Runtime")
|
||||
.block_on(async move {
|
||||
let differential_tests_handling_task =
|
||||
handle_differential_tests(*context, reporter);
|
||||
match context {
|
||||
Context::Test(context) => tokio::runtime::Builder::new_multi_thread()
|
||||
.worker_threads(context.concurrency_configuration.number_of_threads)
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Failed building the Runtime")
|
||||
.block_on(async move {
|
||||
let differential_tests_handling_task =
|
||||
handle_differential_tests(*context, reporter);
|
||||
|
||||
futures::future::try_join(differential_tests_handling_task, report_aggregator_task)
|
||||
.await?;
|
||||
futures::future::try_join(differential_tests_handling_task, report_aggregator_task)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}),
|
||||
Context::Benchmark(context) => tokio::runtime::Builder::new_multi_thread()
|
||||
.worker_threads(context.concurrency_configuration.number_of_threads)
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Failed building the Runtime")
|
||||
.block_on(async move {
|
||||
let differential_benchmarks_handling_task =
|
||||
handle_differential_benchmarks(*context, reporter);
|
||||
Ok(())
|
||||
}),
|
||||
Context::Benchmark(context) => tokio::runtime::Builder::new_multi_thread()
|
||||
.worker_threads(context.concurrency_configuration.number_of_threads)
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Failed building the Runtime")
|
||||
.block_on(async move {
|
||||
let differential_benchmarks_handling_task =
|
||||
handle_differential_benchmarks(*context, reporter);
|
||||
|
||||
futures::future::try_join(
|
||||
differential_benchmarks_handling_task,
|
||||
report_aggregator_task,
|
||||
)
|
||||
.await?;
|
||||
futures::future::try_join(
|
||||
differential_benchmarks_handling_task,
|
||||
report_aggregator_task,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}),
|
||||
Context::ExportJsonSchema => {
|
||||
let schema = schema_for!(Metadata);
|
||||
println!("{}", serde_json::to_string_pretty(&schema).unwrap());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}),
|
||||
Context::ExportJsonSchema => {
|
||||
let schema = schema_for!(Metadata);
|
||||
println!("{}", serde_json::to_string_pretty(&schema).unwrap());
|
||||
Ok(())
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
+81
-87
@@ -7,108 +7,102 @@ use crate::{mode::ParsedMode, steps::*};
|
||||
|
||||
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)]
|
||||
pub struct Case {
|
||||
/// An optional name of the test case.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub name: Option<String>,
|
||||
/// An optional name of the test case.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub name: Option<String>,
|
||||
|
||||
/// An optional comment on the case which has no impact on the execution in any way.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
/// An optional comment on the case which has no impact on the execution in any way.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
|
||||
/// This represents a mode that has been parsed from test metadata.
|
||||
///
|
||||
/// Mode strings can take the following form (in pseudo-regex):
|
||||
///
|
||||
/// ```text
|
||||
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
||||
/// ```
|
||||
///
|
||||
/// If this is provided then it takes higher priority than the modes specified in the metadata
|
||||
/// file.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub modes: Option<Vec<ParsedMode>>,
|
||||
/// This represents a mode that has been parsed from test metadata.
|
||||
///
|
||||
/// Mode strings can take the following form (in pseudo-regex):
|
||||
///
|
||||
/// ```text
|
||||
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
||||
/// ```
|
||||
///
|
||||
/// If this is provided then it takes higher priority than the modes specified in the metadata
|
||||
/// file.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub modes: Option<Vec<ParsedMode>>,
|
||||
|
||||
/// The set of steps to run as part of this test case.
|
||||
#[serde(rename = "inputs")]
|
||||
pub steps: Vec<Step>,
|
||||
/// The set of steps to run as part of this test case.
|
||||
#[serde(rename = "inputs")]
|
||||
pub steps: Vec<Step>,
|
||||
|
||||
/// An optional name of the group of tests that this test belongs to.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub group: Option<String>,
|
||||
/// An optional name of the group of tests that this test belongs to.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub group: Option<String>,
|
||||
|
||||
/// An optional set of expectations and assertions to make about the transaction after it ran.
|
||||
///
|
||||
/// If this is not specified then the only assertion that will be ran is that the transaction
|
||||
/// was successful.
|
||||
///
|
||||
/// This expectation that's on the case itself will be attached to the final step of the case.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub expected: Option<Expected>,
|
||||
/// An optional set of expectations and assertions to make about the transaction after it ran.
|
||||
///
|
||||
/// If this is not specified then the only assertion that will be ran is that the transaction
|
||||
/// was successful.
|
||||
///
|
||||
/// This expectation that's on the case itself will be attached to the final step of the case.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub expected: Option<Expected>,
|
||||
|
||||
/// An optional boolean which defines if the case as a whole should be ignored. If null then the
|
||||
/// case will not be ignored.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ignore: Option<bool>,
|
||||
/// An optional boolean which defines if the case as a whole should be ignored. If null then
|
||||
/// the case will not be ignored.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ignore: Option<bool>,
|
||||
}
|
||||
|
||||
impl Case {
|
||||
pub fn steps_iterator(&self) -> impl Iterator<Item = Step> {
|
||||
let steps_len = self.steps.len();
|
||||
self.steps
|
||||
.clone()
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(move |(idx, mut step)| {
|
||||
let Step::FunctionCall(ref mut input) = step else {
|
||||
return step;
|
||||
};
|
||||
pub fn steps_iterator(&self) -> impl Iterator<Item = Step> {
|
||||
let steps_len = self.steps.len();
|
||||
self.steps.clone().into_iter().enumerate().map(move |(idx, mut step)| {
|
||||
let Step::FunctionCall(ref mut input) = step else {
|
||||
return step;
|
||||
};
|
||||
|
||||
if idx + 1 == steps_len {
|
||||
if input.expected.is_none() {
|
||||
input.expected = self.expected.clone();
|
||||
}
|
||||
if idx + 1 == steps_len {
|
||||
if input.expected.is_none() {
|
||||
input.expected = self.expected.clone();
|
||||
}
|
||||
|
||||
// TODO: What does it mean for us to have an `expected` field on the case itself
|
||||
// but the final input also has an expected field that doesn't match the one on
|
||||
// the case? What are we supposed to do with that final expected field on the
|
||||
// case?
|
||||
// TODO: What does it mean for us to have an `expected` field on the case itself
|
||||
// but the final input also has an expected field that doesn't match the one on
|
||||
// the case? What are we supposed to do with that final expected field on the
|
||||
// case?
|
||||
|
||||
step
|
||||
} else {
|
||||
step
|
||||
}
|
||||
})
|
||||
}
|
||||
step
|
||||
} else {
|
||||
step
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn steps_iterator_for_benchmarks(
|
||||
&self,
|
||||
default_repeat_count: usize,
|
||||
) -> Box<dyn Iterator<Item = Step> + '_> {
|
||||
let contains_repeat = self
|
||||
.steps_iterator()
|
||||
.any(|step| matches!(&step, Step::Repeat(..)));
|
||||
if contains_repeat {
|
||||
Box::new(self.steps_iterator()) as Box<_>
|
||||
} else {
|
||||
Box::new(std::iter::once(Step::Repeat(Box::new(RepeatStep {
|
||||
comment: None,
|
||||
repeat: default_repeat_count,
|
||||
steps: self.steps_iterator().collect(),
|
||||
})))) as Box<_>
|
||||
}
|
||||
}
|
||||
pub fn steps_iterator_for_benchmarks(
|
||||
&self,
|
||||
default_repeat_count: usize,
|
||||
) -> Box<dyn Iterator<Item = Step> + '_> {
|
||||
let contains_repeat = self.steps_iterator().any(|step| matches!(&step, Step::Repeat(..)));
|
||||
if contains_repeat {
|
||||
Box::new(self.steps_iterator()) as Box<_>
|
||||
} else {
|
||||
Box::new(std::iter::once(Step::Repeat(Box::new(RepeatStep {
|
||||
comment: None,
|
||||
repeat: default_repeat_count,
|
||||
steps: self.steps_iterator().collect(),
|
||||
})))) as Box<_>
|
||||
}
|
||||
}
|
||||
|
||||
pub fn solc_modes(&self) -> Vec<Mode> {
|
||||
match &self.modes {
|
||||
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
|
||||
None => Mode::all().cloned().collect(),
|
||||
}
|
||||
}
|
||||
pub fn solc_modes(&self) -> Vec<Mode> {
|
||||
match &self.modes {
|
||||
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
|
||||
None => Mode::all().cloned().collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
define_wrapper_type!(
|
||||
/// A wrapper type for the index of test cases found in metadata file.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct CaseIdx(usize) impl Display, FromStr;
|
||||
/// A wrapper type for the index of test cases found in metadata file.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct CaseIdx(usize) impl Display, FromStr;
|
||||
);
|
||||
|
||||
+98
-106
@@ -1,6 +1,6 @@
|
||||
use std::{
|
||||
fs::File,
|
||||
path::{Path, PathBuf},
|
||||
fs::File,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use revive_dt_common::iterators::FilesWithExtensionIterator;
|
||||
@@ -13,119 +13,111 @@ use anyhow::Context as _;
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum Corpus {
|
||||
SinglePath { name: String, path: PathBuf },
|
||||
MultiplePaths { name: String, paths: Vec<PathBuf> },
|
||||
SinglePath { name: String, path: PathBuf },
|
||||
MultiplePaths { name: String, paths: Vec<PathBuf> },
|
||||
}
|
||||
|
||||
impl Corpus {
|
||||
pub fn try_from_path(file_path: impl AsRef<Path>) -> anyhow::Result<Self> {
|
||||
let mut corpus = File::open(file_path.as_ref())
|
||||
.map_err(anyhow::Error::from)
|
||||
.and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into))
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to open and deserialize corpus file at {}",
|
||||
file_path.as_ref().display()
|
||||
)
|
||||
})?;
|
||||
pub fn try_from_path(file_path: impl AsRef<Path>) -> anyhow::Result<Self> {
|
||||
let mut corpus = File::open(file_path.as_ref())
|
||||
.map_err(anyhow::Error::from)
|
||||
.and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into))
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to open and deserialize corpus file at {}",
|
||||
file_path.as_ref().display()
|
||||
)
|
||||
})?;
|
||||
|
||||
let corpus_directory = file_path
|
||||
.as_ref()
|
||||
.canonicalize()
|
||||
.context("Failed to canonicalize the path to the corpus file")?
|
||||
.parent()
|
||||
.context("Corpus file has no parent")?
|
||||
.to_path_buf();
|
||||
let corpus_directory = file_path
|
||||
.as_ref()
|
||||
.canonicalize()
|
||||
.context("Failed to canonicalize the path to the corpus file")?
|
||||
.parent()
|
||||
.context("Corpus file has no parent")?
|
||||
.to_path_buf();
|
||||
|
||||
for path in corpus.paths_iter_mut() {
|
||||
*path = corpus_directory.join(path.as_path())
|
||||
}
|
||||
for path in corpus.paths_iter_mut() {
|
||||
*path = corpus_directory.join(path.as_path())
|
||||
}
|
||||
|
||||
Ok(corpus)
|
||||
}
|
||||
Ok(corpus)
|
||||
}
|
||||
|
||||
pub fn enumerate_tests(&self) -> Vec<MetadataFile> {
|
||||
let mut tests = self
|
||||
.paths_iter()
|
||||
.flat_map(|root_path| {
|
||||
if !root_path.is_dir() {
|
||||
Box::new(std::iter::once(root_path.to_path_buf()))
|
||||
as Box<dyn Iterator<Item = _>>
|
||||
} else {
|
||||
Box::new(
|
||||
FilesWithExtensionIterator::new(root_path)
|
||||
.with_use_cached_fs(true)
|
||||
.with_allowed_extension("sol")
|
||||
.with_allowed_extension("json"),
|
||||
)
|
||||
}
|
||||
.map(move |metadata_file_path| (root_path, metadata_file_path))
|
||||
})
|
||||
.filter_map(|(root_path, metadata_file_path)| {
|
||||
Metadata::try_from_file(&metadata_file_path)
|
||||
.or_else(|| {
|
||||
debug!(
|
||||
discovered_from = %root_path.display(),
|
||||
metadata_file_path = %metadata_file_path.display(),
|
||||
"Skipping file since it doesn't contain valid metadata"
|
||||
);
|
||||
None
|
||||
})
|
||||
.map(|metadata| MetadataFile {
|
||||
metadata_file_path,
|
||||
corpus_file_path: root_path.to_path_buf(),
|
||||
content: metadata,
|
||||
})
|
||||
.inspect(|metadata_file| {
|
||||
debug!(
|
||||
metadata_file_path = %metadata_file.relative_path().display(),
|
||||
"Loaded metadata file"
|
||||
)
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
|
||||
tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
|
||||
info!(
|
||||
len = tests.len(),
|
||||
corpus_name = self.name(),
|
||||
"Found tests in Corpus"
|
||||
);
|
||||
tests
|
||||
}
|
||||
pub fn enumerate_tests(&self) -> Vec<MetadataFile> {
|
||||
let mut tests = self
|
||||
.paths_iter()
|
||||
.flat_map(|root_path| {
|
||||
if !root_path.is_dir() {
|
||||
Box::new(std::iter::once(root_path.to_path_buf()))
|
||||
as Box<dyn Iterator<Item = _>>
|
||||
} else {
|
||||
Box::new(
|
||||
FilesWithExtensionIterator::new(root_path)
|
||||
.with_use_cached_fs(true)
|
||||
.with_allowed_extension("sol")
|
||||
.with_allowed_extension("json"),
|
||||
)
|
||||
}
|
||||
.map(move |metadata_file_path| (root_path, metadata_file_path))
|
||||
})
|
||||
.filter_map(|(root_path, metadata_file_path)| {
|
||||
Metadata::try_from_file(&metadata_file_path)
|
||||
.or_else(|| {
|
||||
debug!(
|
||||
discovered_from = %root_path.display(),
|
||||
metadata_file_path = %metadata_file_path.display(),
|
||||
"Skipping file since it doesn't contain valid metadata"
|
||||
);
|
||||
None
|
||||
})
|
||||
.map(|metadata| MetadataFile {
|
||||
metadata_file_path,
|
||||
corpus_file_path: root_path.to_path_buf(),
|
||||
content: metadata,
|
||||
})
|
||||
.inspect(|metadata_file| {
|
||||
debug!(
|
||||
metadata_file_path = %metadata_file.relative_path().display(),
|
||||
"Loaded metadata file"
|
||||
)
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
|
||||
tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
|
||||
info!(len = tests.len(), corpus_name = self.name(), "Found tests in Corpus");
|
||||
tests
|
||||
}
|
||||
|
||||
pub fn name(&self) -> &str {
|
||||
match self {
|
||||
Corpus::SinglePath { name, .. } | Corpus::MultiplePaths { name, .. } => name.as_str(),
|
||||
}
|
||||
}
|
||||
pub fn name(&self) -> &str {
|
||||
match self {
|
||||
Corpus::SinglePath { name, .. } | Corpus::MultiplePaths { name, .. } => name.as_str(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn paths_iter(&self) -> impl Iterator<Item = &Path> {
|
||||
match self {
|
||||
Corpus::SinglePath { path, .. } => {
|
||||
Box::new(std::iter::once(path.as_path())) as Box<dyn Iterator<Item = _>>
|
||||
}
|
||||
Corpus::MultiplePaths { paths, .. } => {
|
||||
Box::new(paths.iter().map(|path| path.as_path())) as Box<dyn Iterator<Item = _>>
|
||||
}
|
||||
}
|
||||
}
|
||||
pub fn paths_iter(&self) -> impl Iterator<Item = &Path> {
|
||||
match self {
|
||||
Corpus::SinglePath { path, .. } =>
|
||||
Box::new(std::iter::once(path.as_path())) as Box<dyn Iterator<Item = _>>,
|
||||
Corpus::MultiplePaths { paths, .. } =>
|
||||
Box::new(paths.iter().map(|path| path.as_path())) as Box<dyn Iterator<Item = _>>,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn paths_iter_mut(&mut self) -> impl Iterator<Item = &mut PathBuf> {
|
||||
match self {
|
||||
Corpus::SinglePath { path, .. } => {
|
||||
Box::new(std::iter::once(path)) as Box<dyn Iterator<Item = _>>
|
||||
}
|
||||
Corpus::MultiplePaths { paths, .. } => {
|
||||
Box::new(paths.iter_mut()) as Box<dyn Iterator<Item = _>>
|
||||
}
|
||||
}
|
||||
}
|
||||
pub fn paths_iter_mut(&mut self) -> impl Iterator<Item = &mut PathBuf> {
|
||||
match self {
|
||||
Corpus::SinglePath { path, .. } =>
|
||||
Box::new(std::iter::once(path)) as Box<dyn Iterator<Item = _>>,
|
||||
Corpus::MultiplePaths { paths, .. } =>
|
||||
Box::new(paths.iter_mut()) as Box<dyn Iterator<Item = _>>,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn path_count(&self) -> usize {
|
||||
match self {
|
||||
Corpus::SinglePath { .. } => 1,
|
||||
Corpus::MultiplePaths { paths, .. } => paths.len(),
|
||||
}
|
||||
}
|
||||
pub fn path_count(&self) -> usize {
|
||||
match self {
|
||||
Corpus::SinglePath { .. } => 1,
|
||||
Corpus::MultiplePaths { paths, .. } => paths.len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
+414
-458
File diff suppressed because it is too large
Load Diff
+177
-192
@@ -1,13 +1,12 @@
|
||||
use anyhow::Context as _;
|
||||
use regex::Regex;
|
||||
use revive_dt_common::iterators::EitherIter;
|
||||
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
|
||||
use revive_dt_common::{
|
||||
iterators::EitherIter,
|
||||
types::{Mode, ModeOptimizerSetting, ModePipeline},
|
||||
};
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashSet;
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
use std::sync::LazyLock;
|
||||
use std::{collections::HashSet, fmt::Display, str::FromStr, sync::LazyLock};
|
||||
|
||||
/// This represents a mode that has been parsed from test metadata.
|
||||
///
|
||||
@@ -21,17 +20,17 @@ use std::sync::LazyLock;
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)]
|
||||
#[serde(try_from = "String", into = "String")]
|
||||
pub struct ParsedMode {
|
||||
pub pipeline: Option<ModePipeline>,
|
||||
pub optimize_flag: Option<bool>,
|
||||
pub optimize_setting: Option<ModeOptimizerSetting>,
|
||||
pub version: Option<semver::VersionReq>,
|
||||
pub pipeline: Option<ModePipeline>,
|
||||
pub optimize_flag: Option<bool>,
|
||||
pub optimize_setting: Option<ModeOptimizerSetting>,
|
||||
pub version: Option<semver::VersionReq>,
|
||||
}
|
||||
|
||||
impl FromStr for ParsedMode {
|
||||
type Err = anyhow::Error;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
static REGEX: LazyLock<Regex> = LazyLock::new(|| {
|
||||
Regex::new(r"(?x)
|
||||
type Err = anyhow::Error;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
static REGEX: LazyLock<Regex> = LazyLock::new(|| {
|
||||
Regex::new(r"(?x)
|
||||
^
|
||||
(?:(?P<pipeline>[YEILV])(?P<optimize_flag>[+-])?)? # Pipeline to use eg Y, E+, E-
|
||||
\s*
|
||||
@@ -40,218 +39,204 @@ impl FromStr for ParsedMode {
|
||||
(?P<version>[>=<]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8
|
||||
$
|
||||
").unwrap()
|
||||
});
|
||||
});
|
||||
|
||||
let Some(caps) = REGEX.captures(s) else {
|
||||
anyhow::bail!("Cannot parse mode '{s}' from string");
|
||||
};
|
||||
let Some(caps) = REGEX.captures(s) else {
|
||||
anyhow::bail!("Cannot parse mode '{s}' from string");
|
||||
};
|
||||
|
||||
let pipeline = match caps.name("pipeline") {
|
||||
Some(m) => Some(
|
||||
ModePipeline::from_str(m.as_str())
|
||||
.context("Failed to parse mode pipeline from string")?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
let pipeline = match caps.name("pipeline") {
|
||||
Some(m) => Some(
|
||||
ModePipeline::from_str(m.as_str())
|
||||
.context("Failed to parse mode pipeline from string")?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
|
||||
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
|
||||
|
||||
let optimize_setting = match caps.name("optimize_setting") {
|
||||
Some(m) => Some(
|
||||
ModeOptimizerSetting::from_str(m.as_str())
|
||||
.context("Failed to parse optimizer setting from string")?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
let optimize_setting = match caps.name("optimize_setting") {
|
||||
Some(m) => Some(
|
||||
ModeOptimizerSetting::from_str(m.as_str())
|
||||
.context("Failed to parse optimizer setting from string")?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let version = match caps.name("version") {
|
||||
Some(m) => Some(
|
||||
semver::VersionReq::parse(m.as_str())
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"Cannot parse the version requirement '{}': {e}",
|
||||
m.as_str()
|
||||
)
|
||||
})
|
||||
.context("Failed to parse semver requirement from mode string")?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
let version = match caps.name("version") {
|
||||
Some(m) => Some(
|
||||
semver::VersionReq::parse(m.as_str())
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"Cannot parse the version requirement '{}': {e}",
|
||||
m.as_str()
|
||||
)
|
||||
})
|
||||
.context("Failed to parse semver requirement from mode string")?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
|
||||
Ok(ParsedMode {
|
||||
pipeline,
|
||||
optimize_flag,
|
||||
optimize_setting,
|
||||
version,
|
||||
})
|
||||
}
|
||||
Ok(ParsedMode { pipeline, optimize_flag, optimize_setting, version })
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ParsedMode {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let mut has_written = false;
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let mut has_written = false;
|
||||
|
||||
if let Some(pipeline) = self.pipeline {
|
||||
pipeline.fmt(f)?;
|
||||
if let Some(optimize_flag) = self.optimize_flag {
|
||||
f.write_str(if optimize_flag { "+" } else { "-" })?;
|
||||
}
|
||||
has_written = true;
|
||||
}
|
||||
if let Some(pipeline) = self.pipeline {
|
||||
pipeline.fmt(f)?;
|
||||
if let Some(optimize_flag) = self.optimize_flag {
|
||||
f.write_str(if optimize_flag { "+" } else { "-" })?;
|
||||
}
|
||||
has_written = true;
|
||||
}
|
||||
|
||||
if let Some(optimize_setting) = self.optimize_setting {
|
||||
if has_written {
|
||||
f.write_str(" ")?;
|
||||
}
|
||||
optimize_setting.fmt(f)?;
|
||||
has_written = true;
|
||||
}
|
||||
if let Some(optimize_setting) = self.optimize_setting {
|
||||
if has_written {
|
||||
f.write_str(" ")?;
|
||||
}
|
||||
optimize_setting.fmt(f)?;
|
||||
has_written = true;
|
||||
}
|
||||
|
||||
if let Some(version) = &self.version {
|
||||
if has_written {
|
||||
f.write_str(" ")?;
|
||||
}
|
||||
version.fmt(f)?;
|
||||
}
|
||||
if let Some(version) = &self.version {
|
||||
if has_written {
|
||||
f.write_str(" ")?;
|
||||
}
|
||||
version.fmt(f)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ParsedMode> for String {
|
||||
fn from(parsed_mode: ParsedMode) -> Self {
|
||||
parsed_mode.to_string()
|
||||
}
|
||||
fn from(parsed_mode: ParsedMode) -> Self {
|
||||
parsed_mode.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for ParsedMode {
|
||||
type Error = anyhow::Error;
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
ParsedMode::from_str(&value)
|
||||
}
|
||||
type Error = anyhow::Error;
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
ParsedMode::from_str(&value)
|
||||
}
|
||||
}
|
||||
|
||||
impl ParsedMode {
|
||||
/// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try.
|
||||
pub fn to_modes(&self) -> impl Iterator<Item = Mode> {
|
||||
let pipeline_iter = self.pipeline.as_ref().map_or_else(
|
||||
|| EitherIter::A(ModePipeline::test_cases()),
|
||||
|p| EitherIter::B(std::iter::once(*p)),
|
||||
);
|
||||
/// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try.
|
||||
pub fn to_modes(&self) -> impl Iterator<Item = Mode> {
|
||||
let pipeline_iter = self.pipeline.as_ref().map_or_else(
|
||||
|| EitherIter::A(ModePipeline::test_cases()),
|
||||
|p| EitherIter::B(std::iter::once(*p)),
|
||||
);
|
||||
|
||||
let optimize_flag_setting = self.optimize_flag.map(|flag| {
|
||||
if flag {
|
||||
ModeOptimizerSetting::M3
|
||||
} else {
|
||||
ModeOptimizerSetting::M0
|
||||
}
|
||||
});
|
||||
let optimize_flag_setting = self
|
||||
.optimize_flag
|
||||
.map(|flag| if flag { ModeOptimizerSetting::M3 } else { ModeOptimizerSetting::M0 });
|
||||
|
||||
let optimize_flag_iter = match optimize_flag_setting {
|
||||
Some(setting) => EitherIter::A(std::iter::once(setting)),
|
||||
None => EitherIter::B(ModeOptimizerSetting::test_cases()),
|
||||
};
|
||||
let optimize_flag_iter = match optimize_flag_setting {
|
||||
Some(setting) => EitherIter::A(std::iter::once(setting)),
|
||||
None => EitherIter::B(ModeOptimizerSetting::test_cases()),
|
||||
};
|
||||
|
||||
let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else(
|
||||
|| EitherIter::A(optimize_flag_iter),
|
||||
|s| EitherIter::B(std::iter::once(*s)),
|
||||
);
|
||||
let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else(
|
||||
|| EitherIter::A(optimize_flag_iter),
|
||||
|s| EitherIter::B(std::iter::once(*s)),
|
||||
);
|
||||
|
||||
pipeline_iter.flat_map(move |pipeline| {
|
||||
optimize_settings_iter
|
||||
.clone()
|
||||
.map(move |optimize_setting| Mode {
|
||||
pipeline,
|
||||
optimize_setting,
|
||||
version: self.version.clone(),
|
||||
})
|
||||
})
|
||||
}
|
||||
pipeline_iter.flat_map(move |pipeline| {
|
||||
optimize_settings_iter.clone().map(move |optimize_setting| Mode {
|
||||
pipeline,
|
||||
optimize_setting,
|
||||
version: self.version.clone(),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s.
|
||||
/// This avoids any duplicate entries.
|
||||
pub fn many_to_modes<'a>(
|
||||
parsed: impl Iterator<Item = &'a ParsedMode>,
|
||||
) -> impl Iterator<Item = Mode> {
|
||||
let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect();
|
||||
modes.into_iter()
|
||||
}
|
||||
/// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s.
|
||||
/// This avoids any duplicate entries.
|
||||
pub fn many_to_modes<'a>(
|
||||
parsed: impl Iterator<Item = &'a ParsedMode>,
|
||||
) -> impl Iterator<Item = Mode> {
|
||||
let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect();
|
||||
modes.into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parsed_mode_from_str() {
|
||||
let strings = vec![
|
||||
("Mz", "Mz"),
|
||||
("Y", "Y"),
|
||||
("Y+", "Y+"),
|
||||
("Y-", "Y-"),
|
||||
("E", "E"),
|
||||
("E+", "E+"),
|
||||
("E-", "E-"),
|
||||
("Y M0", "Y M0"),
|
||||
("Y M1", "Y M1"),
|
||||
("Y M2", "Y M2"),
|
||||
("Y M3", "Y M3"),
|
||||
("Y Ms", "Y Ms"),
|
||||
("Y Mz", "Y Mz"),
|
||||
("E M0", "E M0"),
|
||||
("E M1", "E M1"),
|
||||
("E M2", "E M2"),
|
||||
("E M3", "E M3"),
|
||||
("E Ms", "E Ms"),
|
||||
("E Mz", "E Mz"),
|
||||
// When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning)
|
||||
("Y 0.8.0", "Y ^0.8.0"),
|
||||
("E+ 0.8.0", "E+ ^0.8.0"),
|
||||
("Y M3 >=0.8.0", "Y M3 >=0.8.0"),
|
||||
("E Mz <0.7.0", "E Mz <0.7.0"),
|
||||
// We can parse +- _and_ M1/M2 but the latter takes priority.
|
||||
("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"),
|
||||
("E- M2 0.7.0", "E- M2 ^0.7.0"),
|
||||
// We don't see this in the wild but it is parsed.
|
||||
("<=0.8", "<=0.8"),
|
||||
];
|
||||
#[test]
|
||||
fn test_parsed_mode_from_str() {
|
||||
let strings = vec![
|
||||
("Mz", "Mz"),
|
||||
("Y", "Y"),
|
||||
("Y+", "Y+"),
|
||||
("Y-", "Y-"),
|
||||
("E", "E"),
|
||||
("E+", "E+"),
|
||||
("E-", "E-"),
|
||||
("Y M0", "Y M0"),
|
||||
("Y M1", "Y M1"),
|
||||
("Y M2", "Y M2"),
|
||||
("Y M3", "Y M3"),
|
||||
("Y Ms", "Y Ms"),
|
||||
("Y Mz", "Y Mz"),
|
||||
("E M0", "E M0"),
|
||||
("E M1", "E M1"),
|
||||
("E M2", "E M2"),
|
||||
("E M3", "E M3"),
|
||||
("E Ms", "E Ms"),
|
||||
("E Mz", "E Mz"),
|
||||
// When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning)
|
||||
("Y 0.8.0", "Y ^0.8.0"),
|
||||
("E+ 0.8.0", "E+ ^0.8.0"),
|
||||
("Y M3 >=0.8.0", "Y M3 >=0.8.0"),
|
||||
("E Mz <0.7.0", "E Mz <0.7.0"),
|
||||
// We can parse +- _and_ M1/M2 but the latter takes priority.
|
||||
("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"),
|
||||
("E- M2 0.7.0", "E- M2 ^0.7.0"),
|
||||
// We don't see this in the wild but it is parsed.
|
||||
("<=0.8", "<=0.8"),
|
||||
];
|
||||
|
||||
for (actual, expected) in strings {
|
||||
let parsed = ParsedMode::from_str(actual)
|
||||
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||
assert_eq!(
|
||||
expected,
|
||||
parsed.to_string(),
|
||||
"Mode string '{actual}' did not parse to '{expected}': got '{parsed}'"
|
||||
);
|
||||
}
|
||||
}
|
||||
for (actual, expected) in strings {
|
||||
let parsed = ParsedMode::from_str(actual)
|
||||
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||
assert_eq!(
|
||||
expected,
|
||||
parsed.to_string(),
|
||||
"Mode string '{actual}' did not parse to '{expected}': got '{parsed}'"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parsed_mode_to_test_modes() {
|
||||
let strings = vec![
|
||||
("Mz", vec!["Y Mz", "E Mz"]),
|
||||
("Y", vec!["Y M0", "Y M3"]),
|
||||
("E", vec!["E M0", "E M3"]),
|
||||
("Y+", vec!["Y M3"]),
|
||||
("Y-", vec!["Y M0"]),
|
||||
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
|
||||
(
|
||||
"<=0.8",
|
||||
vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"],
|
||||
),
|
||||
];
|
||||
#[test]
|
||||
fn test_parsed_mode_to_test_modes() {
|
||||
let strings = vec![
|
||||
("Mz", vec!["Y Mz", "E Mz"]),
|
||||
("Y", vec!["Y M0", "Y M3"]),
|
||||
("E", vec!["E M0", "E M3"]),
|
||||
("Y+", vec!["Y M3"]),
|
||||
("Y-", vec!["Y M0"]),
|
||||
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
|
||||
("<=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"]),
|
||||
];
|
||||
|
||||
for (actual, expected) in strings {
|
||||
let parsed = ParsedMode::from_str(actual)
|
||||
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||
let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect();
|
||||
let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect();
|
||||
for (actual, expected) in strings {
|
||||
let parsed = ParsedMode::from_str(actual)
|
||||
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||
let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect();
|
||||
let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect();
|
||||
|
||||
assert_eq!(
|
||||
expected_set, actual_set,
|
||||
"Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'"
|
||||
);
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
expected_set, actual_set,
|
||||
"Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
+989
-1058
File diff suppressed because it is too large
Load Diff
+135
-136
@@ -1,10 +1,10 @@
|
||||
use std::collections::HashMap;
|
||||
use std::pin::Pin;
|
||||
use std::{collections::HashMap, pin::Pin};
|
||||
|
||||
use alloy::eips::BlockNumberOrTag;
|
||||
use alloy::json_abi::JsonAbi;
|
||||
use alloy::primitives::TxHash;
|
||||
use alloy::primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, U256};
|
||||
use alloy::{
|
||||
eips::BlockNumberOrTag,
|
||||
json_abi::JsonAbi,
|
||||
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, U256},
|
||||
};
|
||||
use anyhow::Result;
|
||||
|
||||
use crate::metadata::{ContractIdent, ContractInstance};
|
||||
@@ -12,165 +12,164 @@ use crate::metadata::{ContractIdent, ContractInstance};
|
||||
/// A trait of the interface are required to implement to be used by the resolution logic that this
|
||||
/// crate implements to go from string calldata and into the bytes calldata.
|
||||
pub trait ResolverApi {
|
||||
/// Returns the ID of the chain that the node is on.
|
||||
fn chain_id(&self) -> Pin<Box<dyn Future<Output = Result<ChainId>> + '_>>;
|
||||
/// Returns the ID of the chain that the node is on.
|
||||
fn chain_id(&self) -> Pin<Box<dyn Future<Output = Result<ChainId>> + '_>>;
|
||||
|
||||
/// Returns the gas price for the specified transaction.
|
||||
fn transaction_gas_price(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
) -> Pin<Box<dyn Future<Output = Result<u128>> + '_>>;
|
||||
/// Returns the gas price for the specified transaction.
|
||||
fn transaction_gas_price(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
) -> Pin<Box<dyn Future<Output = Result<u128>> + '_>>;
|
||||
|
||||
// TODO: This is currently a u128 due to substrate needing more than 64 bits for its gas limit
|
||||
// when we implement the changes to the gas we need to adjust this to be a u64.
|
||||
/// Returns the gas limit of the specified block.
|
||||
fn block_gas_limit(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<u128>> + '_>>;
|
||||
// TODO: This is currently a u128 due to substrate needing more than 64 bits for its gas limit
|
||||
// when we implement the changes to the gas we need to adjust this to be a u64.
|
||||
/// Returns the gas limit of the specified block.
|
||||
fn block_gas_limit(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<u128>> + '_>>;
|
||||
|
||||
/// Returns the coinbase of the specified block.
|
||||
fn block_coinbase(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<Address>> + '_>>;
|
||||
/// Returns the coinbase of the specified block.
|
||||
fn block_coinbase(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<Address>> + '_>>;
|
||||
|
||||
/// Returns the difficulty of the specified block.
|
||||
fn block_difficulty(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<U256>> + '_>>;
|
||||
/// Returns the difficulty of the specified block.
|
||||
fn block_difficulty(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<U256>> + '_>>;
|
||||
|
||||
/// Returns the base fee of the specified block.
|
||||
fn block_base_fee(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<u64>> + '_>>;
|
||||
/// Returns the base fee of the specified block.
|
||||
fn block_base_fee(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<u64>> + '_>>;
|
||||
|
||||
/// Returns the hash of the specified block.
|
||||
fn block_hash(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<BlockHash>> + '_>>;
|
||||
/// Returns the hash of the specified block.
|
||||
fn block_hash(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<BlockHash>> + '_>>;
|
||||
|
||||
/// Returns the timestamp of the specified block,
|
||||
fn block_timestamp(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<BlockTimestamp>> + '_>>;
|
||||
/// Returns the timestamp of the specified block,
|
||||
fn block_timestamp(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = Result<BlockTimestamp>> + '_>>;
|
||||
|
||||
/// Returns the number of the last block.
|
||||
fn last_block_number(&self) -> Pin<Box<dyn Future<Output = Result<BlockNumber>> + '_>>;
|
||||
/// Returns the number of the last block.
|
||||
fn last_block_number(&self) -> Pin<Box<dyn Future<Output = Result<BlockNumber>> + '_>>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
/// Contextual information required by the code that's performing the resolution.
|
||||
pub struct ResolutionContext<'a> {
|
||||
/// When provided the contracts provided here will be used for resolutions.
|
||||
deployed_contracts: Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||
/// When provided the contracts provided here will be used for resolutions.
|
||||
deployed_contracts: Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||
|
||||
/// When provided the variables in here will be used for performing resolutions.
|
||||
variables: Option<&'a HashMap<String, U256>>,
|
||||
/// When provided the variables in here will be used for performing resolutions.
|
||||
variables: Option<&'a HashMap<String, U256>>,
|
||||
|
||||
/// When provided this block number will be treated as the tip of the chain.
|
||||
block_number: Option<&'a BlockNumber>,
|
||||
/// When provided this block number will be treated as the tip of the chain.
|
||||
block_number: Option<&'a BlockNumber>,
|
||||
|
||||
/// When provided the resolver will use this transaction hash for all of its resolutions.
|
||||
transaction_hash: Option<&'a TxHash>,
|
||||
/// When provided the resolver will use this transaction hash for all of its resolutions.
|
||||
transaction_hash: Option<&'a TxHash>,
|
||||
}
|
||||
|
||||
impl<'a> ResolutionContext<'a> {
|
||||
pub fn new() -> Self {
|
||||
Default::default()
|
||||
}
|
||||
pub fn new() -> Self {
|
||||
Default::default()
|
||||
}
|
||||
|
||||
pub fn new_from_parts(
|
||||
deployed_contracts: impl Into<
|
||||
Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||
>,
|
||||
variables: impl Into<Option<&'a HashMap<String, U256>>>,
|
||||
block_number: impl Into<Option<&'a BlockNumber>>,
|
||||
transaction_hash: impl Into<Option<&'a TxHash>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
deployed_contracts: deployed_contracts.into(),
|
||||
variables: variables.into(),
|
||||
block_number: block_number.into(),
|
||||
transaction_hash: transaction_hash.into(),
|
||||
}
|
||||
}
|
||||
pub fn new_from_parts(
|
||||
deployed_contracts: impl Into<
|
||||
Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||
>,
|
||||
variables: impl Into<Option<&'a HashMap<String, U256>>>,
|
||||
block_number: impl Into<Option<&'a BlockNumber>>,
|
||||
transaction_hash: impl Into<Option<&'a TxHash>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
deployed_contracts: deployed_contracts.into(),
|
||||
variables: variables.into(),
|
||||
block_number: block_number.into(),
|
||||
transaction_hash: transaction_hash.into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_deployed_contracts(
|
||||
mut self,
|
||||
deployed_contracts: impl Into<
|
||||
Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||
>,
|
||||
) -> Self {
|
||||
self.deployed_contracts = deployed_contracts.into();
|
||||
self
|
||||
}
|
||||
pub fn with_deployed_contracts(
|
||||
mut self,
|
||||
deployed_contracts: impl Into<
|
||||
Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||
>,
|
||||
) -> Self {
|
||||
self.deployed_contracts = deployed_contracts.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_variables(
|
||||
mut self,
|
||||
variables: impl Into<Option<&'a HashMap<String, U256>>>,
|
||||
) -> Self {
|
||||
self.variables = variables.into();
|
||||
self
|
||||
}
|
||||
pub fn with_variables(
|
||||
mut self,
|
||||
variables: impl Into<Option<&'a HashMap<String, U256>>>,
|
||||
) -> Self {
|
||||
self.variables = variables.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_block_number(mut self, block_number: impl Into<Option<&'a BlockNumber>>) -> Self {
|
||||
self.block_number = block_number.into();
|
||||
self
|
||||
}
|
||||
pub fn with_block_number(mut self, block_number: impl Into<Option<&'a BlockNumber>>) -> Self {
|
||||
self.block_number = block_number.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_transaction_hash(
|
||||
mut self,
|
||||
transaction_hash: impl Into<Option<&'a TxHash>>,
|
||||
) -> Self {
|
||||
self.transaction_hash = transaction_hash.into();
|
||||
self
|
||||
}
|
||||
pub fn with_transaction_hash(
|
||||
mut self,
|
||||
transaction_hash: impl Into<Option<&'a TxHash>>,
|
||||
) -> Self {
|
||||
self.transaction_hash = transaction_hash.into();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn resolve_block_number(&self, number: BlockNumberOrTag) -> BlockNumberOrTag {
|
||||
match self.block_number {
|
||||
Some(block_number) => match number {
|
||||
BlockNumberOrTag::Latest => BlockNumberOrTag::Number(*block_number),
|
||||
n @ (BlockNumberOrTag::Finalized
|
||||
| BlockNumberOrTag::Safe
|
||||
| BlockNumberOrTag::Earliest
|
||||
| BlockNumberOrTag::Pending
|
||||
| BlockNumberOrTag::Number(_)) => n,
|
||||
},
|
||||
None => number,
|
||||
}
|
||||
}
|
||||
pub fn resolve_block_number(&self, number: BlockNumberOrTag) -> BlockNumberOrTag {
|
||||
match self.block_number {
|
||||
Some(block_number) => match number {
|
||||
BlockNumberOrTag::Latest => BlockNumberOrTag::Number(*block_number),
|
||||
n @ (BlockNumberOrTag::Finalized |
|
||||
BlockNumberOrTag::Safe |
|
||||
BlockNumberOrTag::Earliest |
|
||||
BlockNumberOrTag::Pending |
|
||||
BlockNumberOrTag::Number(_)) => n,
|
||||
},
|
||||
None => number,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deployed_contract(
|
||||
&self,
|
||||
instance: &ContractInstance,
|
||||
) -> Option<&(ContractIdent, Address, JsonAbi)> {
|
||||
self.deployed_contracts
|
||||
.and_then(|deployed_contracts| deployed_contracts.get(instance))
|
||||
}
|
||||
pub fn deployed_contract(
|
||||
&self,
|
||||
instance: &ContractInstance,
|
||||
) -> Option<&(ContractIdent, Address, JsonAbi)> {
|
||||
self.deployed_contracts
|
||||
.and_then(|deployed_contracts| deployed_contracts.get(instance))
|
||||
}
|
||||
|
||||
pub fn deployed_contract_address(&self, instance: &ContractInstance) -> Option<&Address> {
|
||||
self.deployed_contract(instance).map(|(_, a, _)| a)
|
||||
}
|
||||
pub fn deployed_contract_address(&self, instance: &ContractInstance) -> Option<&Address> {
|
||||
self.deployed_contract(instance).map(|(_, a, _)| a)
|
||||
}
|
||||
|
||||
pub fn deployed_contract_abi(&self, instance: &ContractInstance) -> Option<&JsonAbi> {
|
||||
self.deployed_contract(instance).map(|(_, _, a)| a)
|
||||
}
|
||||
pub fn deployed_contract_abi(&self, instance: &ContractInstance) -> Option<&JsonAbi> {
|
||||
self.deployed_contract(instance).map(|(_, _, a)| a)
|
||||
}
|
||||
|
||||
pub fn variable(&self, name: impl AsRef<str>) -> Option<&U256> {
|
||||
self.variables
|
||||
.and_then(|variables| variables.get(name.as_ref()))
|
||||
}
|
||||
pub fn variable(&self, name: impl AsRef<str>) -> Option<&U256> {
|
||||
self.variables.and_then(|variables| variables.get(name.as_ref()))
|
||||
}
|
||||
|
||||
pub fn tip_block_number(&self) -> Option<&'a BlockNumber> {
|
||||
self.block_number
|
||||
}
|
||||
pub fn tip_block_number(&self) -> Option<&'a BlockNumber> {
|
||||
self.block_number
|
||||
}
|
||||
|
||||
pub fn transaction_hash(&self) -> Option<&'a TxHash> {
|
||||
self.transaction_hash
|
||||
}
|
||||
pub fn transaction_hash(&self) -> Option<&'a TxHash> {
|
||||
self.transaction_hash
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
[package]
|
||||
name = "ml-test-runner"
|
||||
description = "ML-based test runner for executing differential tests file by file"
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "ml-test-runner"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
revive-dt-common = { workspace = true }
|
||||
revive-dt-compiler = { workspace = true }
|
||||
revive-dt-config = { workspace = true }
|
||||
revive-dt-core = { workspace = true }
|
||||
revive-dt-format = { workspace = true }
|
||||
revive-dt-node = { workspace = true }
|
||||
revive-dt-node-interaction = { workspace = true }
|
||||
revive-dt-report = { workspace = true }
|
||||
|
||||
alloy = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
temp-dir = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -0,0 +1,74 @@
|
||||
# ML Test Runner
|
||||
|
||||
A test runner for executing Revive differential tests file-by-file with cargo-test-style output.
|
||||
|
||||
This is similar to the `retester` binary but designed for ML-based test execution with a focus on:
|
||||
- Running tests file-by-file (rather than in bulk)
|
||||
- Caching passed tests to skip them in future runs
|
||||
- Providing cargo-test-style output for easy integration with ML pipelines
|
||||
- Single platform testing (rather than differential testing)
|
||||
|
||||
## Features
|
||||
|
||||
- **File-by-file execution**: Run tests on individual `.sol` files, corpus files (`.json`), or recursively walk directories
|
||||
- **Cached results**: Skip tests that have already passed using `--cached-passed`
|
||||
- **Fail fast**: Stop on first failure with `--bail`
|
||||
- **Cargo-like output**: Familiar test output format with colored pass/fail indicators
|
||||
- **Platform support**: Test against `geth` or `kitchensink` platforms
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Run a single .sol file (compile-only mode, default)
|
||||
./ml-test-runner path/to/test.sol --platform geth
|
||||
|
||||
# Run all tests in a corpus file
|
||||
./ml-test-runner path/to/corpus.json --platform kitchensink
|
||||
|
||||
# Walk a directory recursively for .sol files
|
||||
./ml-test-runner path/to/tests/ --platform geth
|
||||
|
||||
# Use cached results and bail on first failure
|
||||
./ml-test-runner path/to/tests/ --cached-passed ./cache.txt --bail
|
||||
|
||||
# Start the platform and execute tests (full mode)
|
||||
./ml-test-runner path/to/tests/ --platform geth --start-platform
|
||||
|
||||
# Enable verbose logging (info, debug, or trace level)
|
||||
RUST_LOG=info ./ml-test-runner path/to/tests/
|
||||
RUST_LOG=debug ./ml-test-runner path/to/tests/ --start-platform
|
||||
RUST_LOG=trace ./ml-test-runner path/to/tests/ --start-platform
|
||||
```
|
||||
|
||||
## Arguments
|
||||
|
||||
- `<PATH>` - Path to test file (`.sol`), corpus file (`.json`), or folder of `.sol` files
|
||||
- `--cached-passed <FILE>` - File to track tests that have already passed
|
||||
- `--bail` - Stop after the first file failure
|
||||
- `--platform <PLATFORM>` - Platform to test against (`geth`, `kitchensink`, or `zombienet`, default: `geth`)
|
||||
- `--start-platform` - Start the platform and execute tests (default: `false`, compile-only mode)
|
||||
|
||||
## Output Format
|
||||
|
||||
The runner produces cargo-test-style output:
|
||||
|
||||
```
|
||||
test path/to/test1.sol ... ok
|
||||
test path/to/test2.sol ... FAILED
|
||||
test path/to/test3.sol ... cached
|
||||
|
||||
failures:
|
||||
|
||||
---- path/to/test2.sol ----
|
||||
Error: ...
|
||||
|
||||
test result: FAILED. 1 passed; 1 failed; 1 cached; finished in 2.34s
|
||||
```
|
||||
|
||||
## Building
|
||||
|
||||
```bash
|
||||
cargo build --release -p ml-test-runner
|
||||
```
|
||||
|
||||
The binary will be available at `target/release/ml-test-runner`.
|
||||
@@ -0,0 +1,639 @@
|
||||
use anyhow::Context;
|
||||
use clap::Parser;
|
||||
use revive_dt_common::{
|
||||
iterators::FilesWithExtensionIterator,
|
||||
types::{PlatformIdentifier, PrivateKeyAllocator},
|
||||
};
|
||||
use revive_dt_config::TestExecutionContext;
|
||||
use revive_dt_core::{
|
||||
CachedCompiler, Platform,
|
||||
helpers::{TestDefinition, TestPlatformInformation},
|
||||
};
|
||||
use revive_dt_format::{
|
||||
case::CaseIdx,
|
||||
corpus::Corpus,
|
||||
metadata::{Metadata, MetadataFile},
|
||||
};
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::{BTreeMap, HashSet},
|
||||
fs::File,
|
||||
io::{BufRead, BufReader, BufWriter, Write},
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use temp_dir::TempDir;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::info;
|
||||
use tracing_subscriber::{EnvFilter, FmtSubscriber};
|
||||
|
||||
/// ML-based test runner for executing differential tests file by file
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(name = "ml-test-runner")]
|
||||
struct MlTestRunnerArgs {
|
||||
/// Path to test file (.sol), corpus file (.json), or folder containing .sol files
|
||||
#[arg(value_name = "PATH")]
|
||||
path: PathBuf,
|
||||
|
||||
/// File to cache tests that have already passed
|
||||
#[arg(long = "cached-passed")]
|
||||
cached_passed: Option<PathBuf>,
|
||||
|
||||
/// File to store tests that have failed (defaults to .<platform>-failed)
|
||||
#[arg(long = "cached-failed")]
|
||||
cached_failed: Option<PathBuf>,
|
||||
|
||||
/// Stop after the first file failure
|
||||
#[arg(long = "bail")]
|
||||
bail: bool,
|
||||
|
||||
/// Platform to test against (e.g., geth-evm-solc, kitchensink-polkavm-resolc)
|
||||
#[arg(long = "platform", default_value = "geth-evm-solc")]
|
||||
platform: PlatformIdentifier,
|
||||
|
||||
/// Start the platform and wait for RPC readiness
|
||||
#[arg(long = "start-platform", default_value = "false")]
|
||||
start_platform: bool,
|
||||
|
||||
/// Private key to use for wallet initialization (hex string with or without 0x prefix)
|
||||
#[arg(
|
||||
long = "private-key",
|
||||
default_value = "0x5fb92d6e98884f76de468fa3f6278f8807c48bebc13595d45af5bdc4da702133"
|
||||
)]
|
||||
private_key: String,
|
||||
|
||||
/// RPC port to connect to when using existing node
|
||||
#[arg(long = "rpc-port", default_value = "8545")]
|
||||
rpc_port: u16,
|
||||
|
||||
/// Show verbose output including cached tests and detailed error messages
|
||||
#[arg(long = "verbose", short = 'v')]
|
||||
verbose: bool,
|
||||
}
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let args = MlTestRunnerArgs::parse();
|
||||
|
||||
// Only set up tracing if RUST_LOG is explicitly set or --verbose is passed
|
||||
if std::env::var("RUST_LOG").is_ok() || args.verbose {
|
||||
let subscriber = FmtSubscriber::builder()
|
||||
.with_env_filter(EnvFilter::from_default_env())
|
||||
.with_writer(std::io::stderr)
|
||||
.finish();
|
||||
tracing::subscriber::set_global_default(subscriber)
|
||||
.expect("Failed to set tracing subscriber");
|
||||
}
|
||||
|
||||
info!("ML test runner starting");
|
||||
info!("Platform: {:?}", args.platform);
|
||||
info!("Start platform: {}", args.start_platform);
|
||||
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Failed building the Runtime")
|
||||
.block_on(run(args))
|
||||
}
|
||||
|
||||
/// Wait for HTTP server to be ready by attempting to connect to the specified port
|
||||
async fn wait_for_http_server(port: u16) -> anyhow::Result<()> {
|
||||
const MAX_RETRIES: u32 = 60;
|
||||
const RETRY_DELAY: Duration = Duration::from_secs(1);
|
||||
|
||||
for attempt in 1..=MAX_RETRIES {
|
||||
match tokio::net::TcpStream::connect(format!("127.0.0.1:{}", port)).await {
|
||||
Ok(_) => {
|
||||
info!("Successfully connected to HTTP server on port {} (attempt {})", port, attempt);
|
||||
return Ok(());
|
||||
},
|
||||
Err(e) => {
|
||||
if attempt == MAX_RETRIES {
|
||||
anyhow::bail!(
|
||||
"Failed to connect to HTTP server on port {} after {} attempts: {}",
|
||||
port,
|
||||
MAX_RETRIES,
|
||||
e
|
||||
);
|
||||
}
|
||||
if attempt % 10 == 0 {
|
||||
info!(
|
||||
"Still waiting for HTTP server on port {} (attempt {}/{})",
|
||||
port, attempt, MAX_RETRIES
|
||||
);
|
||||
}
|
||||
tokio::time::sleep(RETRY_DELAY).await;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
async fn run(args: MlTestRunnerArgs) -> anyhow::Result<()> {
|
||||
let start_time = Instant::now();
|
||||
|
||||
info!("Discovering test files from: {}", args.path.display());
|
||||
let test_files = discover_test_files(&args.path)?;
|
||||
info!("Found {} test file(s)", test_files.len());
|
||||
|
||||
let cached_passed = if let Some(cache_file) = &args.cached_passed {
|
||||
let cached = load_cached_passed(cache_file)?;
|
||||
info!("Loaded {} cached passed test(s)", cached.len());
|
||||
cached
|
||||
} else {
|
||||
HashSet::new()
|
||||
};
|
||||
|
||||
let cached_passed = Arc::new(Mutex::new(cached_passed));
|
||||
|
||||
// Set up cached-failed file (defaults to .<platform>-failed)
|
||||
let cached_failed_path = args
|
||||
.cached_failed
|
||||
.clone()
|
||||
.unwrap_or_else(|| PathBuf::from(format!(".{:?}-failed", args.platform)));
|
||||
|
||||
let cached_failed = Arc::new(Mutex::new(HashSet::<String>::new()));
|
||||
|
||||
// Get the platform based on CLI args
|
||||
let platform: &dyn Platform = match args.platform {
|
||||
PlatformIdentifier::GethEvmSolc => &revive_dt_core::GethEvmSolcPlatform,
|
||||
PlatformIdentifier::LighthouseGethEvmSolc => &revive_dt_core::LighthouseGethEvmSolcPlatform,
|
||||
PlatformIdentifier::KitchensinkPolkavmResolc =>
|
||||
&revive_dt_core::KitchensinkPolkavmResolcPlatform,
|
||||
PlatformIdentifier::KitchensinkRevmSolc => &revive_dt_core::KitchensinkRevmSolcPlatform,
|
||||
PlatformIdentifier::ReviveDevNodePolkavmResolc =>
|
||||
&revive_dt_core::ReviveDevNodePolkavmResolcPlatform,
|
||||
PlatformIdentifier::ReviveDevNodeRevmSolc => &revive_dt_core::ReviveDevNodeRevmSolcPlatform,
|
||||
PlatformIdentifier::ZombienetPolkavmResolc =>
|
||||
&revive_dt_core::ZombienetPolkavmResolcPlatform,
|
||||
PlatformIdentifier::ZombienetRevmSolc => &revive_dt_core::ZombienetRevmSolcPlatform,
|
||||
};
|
||||
|
||||
let test_context = TestExecutionContext::default();
|
||||
let context = revive_dt_config::Context::Test(Box::new(test_context));
|
||||
|
||||
let node: &'static dyn revive_dt_node_interaction::EthereumNode = if args.start_platform {
|
||||
info!("Starting blockchain node...");
|
||||
let node_handle =
|
||||
platform.new_node(context.clone()).context("Failed to spawn node thread")?;
|
||||
|
||||
info!("Waiting for node to start...");
|
||||
let node = node_handle
|
||||
.join()
|
||||
.map_err(|e| anyhow::anyhow!("Node thread panicked: {:?}", e))?
|
||||
.context("Failed to start node")?;
|
||||
|
||||
info!("Node started with ID: {}, connection: {}", node.id(), node.connection_string());
|
||||
let node = Box::leak(node);
|
||||
|
||||
info!("Running pre-transactions...");
|
||||
node.pre_transactions().await.context("Failed to run pre-transactions")?;
|
||||
info!("Pre-transactions completed");
|
||||
|
||||
node
|
||||
} else {
|
||||
info!("Using existing node at port {}", args.rpc_port);
|
||||
|
||||
// Wait for the HTTP server to be ready
|
||||
info!("Waiting for HTTP server to be ready on port {}...", args.rpc_port);
|
||||
wait_for_http_server(args.rpc_port).await?;
|
||||
info!("HTTP server is ready");
|
||||
|
||||
let existing_node: Box<dyn revive_dt_node_interaction::EthereumNode> = match args.platform {
|
||||
PlatformIdentifier::GethEvmSolc | PlatformIdentifier::LighthouseGethEvmSolc =>
|
||||
Box::new(
|
||||
revive_dt_node::node_implementations::geth::GethNode::new_existing(
|
||||
&args.private_key,
|
||||
args.rpc_port,
|
||||
)
|
||||
.await?,
|
||||
),
|
||||
PlatformIdentifier::KitchensinkPolkavmResolc |
|
||||
PlatformIdentifier::KitchensinkRevmSolc |
|
||||
PlatformIdentifier::ReviveDevNodePolkavmResolc |
|
||||
PlatformIdentifier::ReviveDevNodeRevmSolc |
|
||||
PlatformIdentifier::ZombienetPolkavmResolc |
|
||||
PlatformIdentifier::ZombienetRevmSolc => Box::new(
|
||||
revive_dt_node::node_implementations::substrate::SubstrateNode::new_existing(
|
||||
&args.private_key,
|
||||
args.rpc_port,
|
||||
)
|
||||
.await?,
|
||||
),
|
||||
};
|
||||
Box::leak(existing_node)
|
||||
};
|
||||
|
||||
let mut passed_files = 0;
|
||||
let mut failed_files = 0;
|
||||
let mut skipped_files = 0;
|
||||
let mut failures = Vec::new();
|
||||
|
||||
const GREEN: &str = "\x1B[32m";
|
||||
const RED: &str = "\x1B[31m";
|
||||
const YELLOW: &str = "\x1B[33m";
|
||||
const COLOUR_RESET: &str = "\x1B[0m";
|
||||
const BOLD: &str = "\x1B[1m";
|
||||
const BOLD_RESET: &str = "\x1B[22m";
|
||||
|
||||
for test_file in test_files {
|
||||
let file_display = test_file.display().to_string();
|
||||
|
||||
info!("\n\n == Executing test file: {file_display} == \n\n");
|
||||
// Check if already passed
|
||||
{
|
||||
let cache = cached_passed.lock().await;
|
||||
if cache.contains(&file_display) {
|
||||
if args.verbose {
|
||||
println!("test {file_display} ... {YELLOW}cached{COLOUR_RESET}");
|
||||
}
|
||||
skipped_files += 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
info!("Loading metadata from: {}", test_file.display());
|
||||
let metadata_file = match load_metadata_file(&test_file) {
|
||||
Ok(mf) => {
|
||||
info!("Loaded metadata with {} case(s)", mf.cases.len());
|
||||
mf
|
||||
},
|
||||
Err(e) => {
|
||||
// Skip files without metadata instead of treating them as failures
|
||||
info!("Skipping {} (no metadata): {}", file_display, e);
|
||||
skipped_files += 1;
|
||||
continue;
|
||||
},
|
||||
};
|
||||
|
||||
// Execute test with 10 second timeout
|
||||
let test_result = tokio::time::timeout(
|
||||
Duration::from_secs(20),
|
||||
execute_test_file(&metadata_file, platform, node, &context),
|
||||
)
|
||||
.await;
|
||||
|
||||
let result = match test_result {
|
||||
Ok(Ok(_)) => Ok(()),
|
||||
Ok(Err(e)) => Err(e),
|
||||
Err(_) => Err(anyhow::anyhow!("Test timed out after 20 seconds")),
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(_) => {
|
||||
println!("test {file_display} ... {GREEN}ok{COLOUR_RESET}");
|
||||
passed_files += 1;
|
||||
|
||||
// Update cache
|
||||
if let Some(cache_file) = &args.cached_passed {
|
||||
let mut cache = cached_passed.lock().await;
|
||||
cache.insert(file_display);
|
||||
if let Err(e) = save_cached_passed(cache_file, &cache) {
|
||||
info!("Failed to save cache: {}", e);
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
println!("test {file_display} ... {RED}FAILED{COLOUR_RESET}");
|
||||
failed_files += 1;
|
||||
let error_detail = if args.verbose { format!("{:?}", e) } else { format!("{}", e) };
|
||||
failures.push((file_display.clone(), error_detail));
|
||||
|
||||
// Update cached-failed
|
||||
{
|
||||
let mut cache = cached_failed.lock().await;
|
||||
cache.insert(file_display);
|
||||
if let Err(e) = save_cached_failed(&cached_failed_path, &cache) {
|
||||
info!("Failed to save cached-failed: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
if args.bail {
|
||||
info!("Bailing after first failure");
|
||||
break;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Print summary
|
||||
println!();
|
||||
if !failures.is_empty() && args.verbose {
|
||||
println!("{BOLD}failures:{BOLD_RESET}");
|
||||
println!();
|
||||
for (file, error) in &failures {
|
||||
println!("---- {} ----", file);
|
||||
println!("{}", error);
|
||||
println!();
|
||||
}
|
||||
}
|
||||
|
||||
let elapsed = start_time.elapsed();
|
||||
println!(
|
||||
"test result: {}. {} passed; {} failed; {} cached; finished in {:.2}s",
|
||||
if failed_files == 0 {
|
||||
format!("{GREEN}ok{COLOUR_RESET}")
|
||||
} else {
|
||||
format!("{RED}FAILED{COLOUR_RESET}")
|
||||
},
|
||||
passed_files,
|
||||
failed_files,
|
||||
skipped_files,
|
||||
elapsed.as_secs_f64()
|
||||
);
|
||||
|
||||
if failed_files > 0 {
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Discover test files from the given path
|
||||
fn discover_test_files(path: &Path) -> anyhow::Result<Vec<PathBuf>> {
|
||||
if !path.exists() {
|
||||
anyhow::bail!("Path does not exist: {}", path.display());
|
||||
}
|
||||
|
||||
let mut files = Vec::new();
|
||||
|
||||
if path.is_file() {
|
||||
let extension = path.extension().and_then(|s| s.to_str()).unwrap_or("");
|
||||
|
||||
match extension {
|
||||
"sol" => {
|
||||
// Single .sol file
|
||||
files.push(path.to_path_buf());
|
||||
},
|
||||
"json" => {
|
||||
// Corpus file - enumerate its tests
|
||||
let corpus = Corpus::try_from_path(path)?;
|
||||
let metadata_files = corpus.enumerate_tests();
|
||||
for metadata in metadata_files {
|
||||
files.push(metadata.metadata_file_path);
|
||||
}
|
||||
},
|
||||
_ => anyhow::bail!("Unsupported file extension: {}. Expected .sol or .json", extension),
|
||||
}
|
||||
} else if path.is_dir() {
|
||||
// First, find all test.json files
|
||||
let mut test_json_dirs = HashSet::new();
|
||||
for json_file in FilesWithExtensionIterator::new(path)
|
||||
.with_allowed_extension("json")
|
||||
.with_use_cached_fs(true)
|
||||
{
|
||||
if json_file.file_name().and_then(|s| s.to_str()) == Some("test.json") {
|
||||
if let Some(parent) = json_file.parent() {
|
||||
test_json_dirs.insert(parent.to_path_buf());
|
||||
}
|
||||
|
||||
// Try to parse as corpus file first, then as metadata file
|
||||
if let Ok(corpus) = Corpus::try_from_path(&json_file) {
|
||||
// It's a corpus file - enumerate its tests
|
||||
let metadata_files = corpus.enumerate_tests();
|
||||
for metadata in metadata_files {
|
||||
files.push(metadata.metadata_file_path);
|
||||
}
|
||||
} else {
|
||||
// It's a metadata file - use it directly
|
||||
files.push(json_file);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Then, find .sol files that are NOT in directories with test.json
|
||||
for sol_file in FilesWithExtensionIterator::new(path)
|
||||
.with_allowed_extension("sol")
|
||||
.with_use_cached_fs(true)
|
||||
{
|
||||
if let Some(parent) = sol_file.parent() {
|
||||
if !test_json_dirs.contains(parent) {
|
||||
files.push(sol_file);
|
||||
}
|
||||
} else {
|
||||
files.push(sol_file);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
anyhow::bail!("Path is neither a file nor a directory: {}", path.display());
|
||||
}
|
||||
|
||||
Ok(files)
|
||||
}
|
||||
|
||||
/// Load metadata from a test file
|
||||
fn load_metadata_file(path: &Path) -> anyhow::Result<MetadataFile> {
|
||||
let metadata = Metadata::try_from_file(path)
|
||||
.ok_or_else(|| anyhow::anyhow!("Failed to load metadata from {}", path.display()))?;
|
||||
|
||||
Ok(MetadataFile {
|
||||
metadata_file_path: path.to_path_buf(),
|
||||
corpus_file_path: path.to_path_buf(),
|
||||
content: metadata,
|
||||
})
|
||||
}
|
||||
|
||||
/// Execute all test cases in a metadata file
|
||||
async fn execute_test_file(
|
||||
metadata_file: &MetadataFile,
|
||||
platform: &dyn Platform,
|
||||
node: &'static dyn revive_dt_node_interaction::EthereumNode,
|
||||
context: &revive_dt_config::Context,
|
||||
) -> anyhow::Result<()> {
|
||||
if metadata_file.cases.is_empty() {
|
||||
anyhow::bail!("No test cases found in file");
|
||||
}
|
||||
|
||||
info!("Processing {} test case(s)", metadata_file.cases.len());
|
||||
|
||||
let temp_dir = TempDir::new()?;
|
||||
info!("Created temporary directory: {}", temp_dir.path().display());
|
||||
|
||||
info!("Initializing cached compiler");
|
||||
let cached_compiler = CachedCompiler::new(temp_dir.path().join("compilation_cache"), false)
|
||||
.await
|
||||
.map(Arc::new)
|
||||
.context("Failed to create cached compiler")?;
|
||||
|
||||
let private_key_allocator =
|
||||
Arc::new(Mutex::new(PrivateKeyAllocator::new(alloy::primitives::U256::from(100))));
|
||||
|
||||
let (reporter, report_task) =
|
||||
revive_dt_report::ReportAggregator::new(context.clone()).into_task();
|
||||
|
||||
tokio::spawn(report_task);
|
||||
|
||||
info!("Building test definitions for {} case(s)", metadata_file.cases.len());
|
||||
let mut test_definitions = Vec::new();
|
||||
for (case_idx, case) in metadata_file.cases.iter().enumerate() {
|
||||
info!("Building test definition for case {}", case_idx);
|
||||
let test_def = build_test_definition(
|
||||
metadata_file,
|
||||
case,
|
||||
case_idx,
|
||||
platform,
|
||||
node,
|
||||
&context,
|
||||
&reporter,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if let Some(test_def) = test_def {
|
||||
info!("Test definition for case {} created successfully", case_idx);
|
||||
test_definitions.push(test_def);
|
||||
}
|
||||
}
|
||||
|
||||
info!("Executing {} test definition(s)", test_definitions.len());
|
||||
for (idx, test_definition) in test_definitions.iter().enumerate() {
|
||||
info!("─────────────────────────────────────────────────────────────────");
|
||||
info!(
|
||||
"Executing case {}/{}: case_idx={}, mode={}, steps={}",
|
||||
idx + 1,
|
||||
test_definitions.len(),
|
||||
test_definition.case_idx,
|
||||
test_definition.mode,
|
||||
test_definition.case.steps.len()
|
||||
);
|
||||
|
||||
info!("Creating driver for case {}", test_definition.case_idx);
|
||||
let driver = revive_dt_core::differential_tests::Driver::new_root(
|
||||
test_definition,
|
||||
private_key_allocator.clone(),
|
||||
&cached_compiler,
|
||||
)
|
||||
.await
|
||||
.context("Failed to create driver")?;
|
||||
|
||||
info!(
|
||||
"Running {} step(s) for case {}",
|
||||
test_definition.case.steps.len(),
|
||||
test_definition.case_idx
|
||||
);
|
||||
let steps_executed = driver
|
||||
.execute_all()
|
||||
.await
|
||||
.context(format!("Failed to execute case {}", test_definition.case_idx))?;
|
||||
info!(
|
||||
"✓ Case {} completed successfully, executed {} step(s)",
|
||||
test_definition.case_idx, steps_executed
|
||||
);
|
||||
}
|
||||
info!("─────────────────────────────────────────────────────────────────");
|
||||
info!("All {} test case(s) executed successfully", test_definitions.len());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Build a test definition for a single test case
|
||||
async fn build_test_definition<'a>(
|
||||
metadata_file: &'a MetadataFile,
|
||||
case: &'a revive_dt_format::case::Case,
|
||||
case_idx: usize,
|
||||
platform: &'a dyn Platform,
|
||||
node: &'a dyn revive_dt_node_interaction::EthereumNode,
|
||||
context: &revive_dt_config::Context,
|
||||
reporter: &revive_dt_report::Reporter,
|
||||
) -> anyhow::Result<Option<TestDefinition<'a>>> {
|
||||
let mode = case
|
||||
.modes
|
||||
.as_ref()
|
||||
.or(metadata_file.modes.as_ref())
|
||||
.and_then(|modes| modes.first())
|
||||
.and_then(|parsed_mode| parsed_mode.to_modes().next())
|
||||
.map(Cow::Owned)
|
||||
.or_else(|| revive_dt_compiler::Mode::all().next().map(Cow::Borrowed))
|
||||
.unwrap();
|
||||
|
||||
let compiler = platform
|
||||
.new_compiler(context.clone(), mode.version.clone().map(Into::into))
|
||||
.await
|
||||
.context("Failed to create compiler")?;
|
||||
|
||||
let test_reporter =
|
||||
reporter.test_specific_reporter(Arc::new(revive_dt_report::TestSpecifier {
|
||||
solc_mode: mode.as_ref().clone(),
|
||||
metadata_file_path: metadata_file.metadata_file_path.clone(),
|
||||
case_idx: CaseIdx::new(case_idx),
|
||||
}));
|
||||
|
||||
let execution_reporter =
|
||||
test_reporter.execution_specific_reporter(node.id(), platform.platform_identifier());
|
||||
|
||||
let mut platforms = BTreeMap::new();
|
||||
platforms.insert(
|
||||
platform.platform_identifier(),
|
||||
TestPlatformInformation { platform, node, compiler, reporter: execution_reporter },
|
||||
);
|
||||
|
||||
let test_definition = TestDefinition {
|
||||
metadata: metadata_file,
|
||||
metadata_file_path: &metadata_file.metadata_file_path,
|
||||
mode,
|
||||
case_idx: CaseIdx::new(case_idx),
|
||||
case,
|
||||
platforms,
|
||||
reporter: test_reporter,
|
||||
};
|
||||
|
||||
if let Err((reason, _)) = test_definition.check_compatibility() {
|
||||
info!("Skipping case {}: {}", case_idx, reason);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
Ok(Some(test_definition))
|
||||
}
|
||||
|
||||
/// Load cached passed tests from file
|
||||
fn load_cached_passed(path: &Path) -> anyhow::Result<HashSet<String>> {
|
||||
if !path.exists() {
|
||||
return Ok(HashSet::new());
|
||||
}
|
||||
|
||||
let file = File::open(path).context("Failed to open cached-passed file")?;
|
||||
let reader = BufReader::new(file);
|
||||
|
||||
let mut cache = HashSet::new();
|
||||
for line in reader.lines() {
|
||||
let line = line?;
|
||||
let trimmed = line.trim();
|
||||
if !trimmed.is_empty() {
|
||||
cache.insert(trimmed.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(cache)
|
||||
}
|
||||
|
||||
/// Save cached passed tests to file
|
||||
fn save_cached_passed(path: &Path, cache: &HashSet<String>) -> anyhow::Result<()> {
|
||||
let file = File::create(path).context("Failed to create cached-passed file")?;
|
||||
let mut writer = BufWriter::new(file);
|
||||
|
||||
let mut entries: Vec<_> = cache.iter().collect();
|
||||
entries.sort();
|
||||
|
||||
for entry in entries {
|
||||
writeln!(writer, "{}", entry)?;
|
||||
}
|
||||
|
||||
writer.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Save cached failed tests to file
|
||||
fn save_cached_failed(path: &Path, cache: &HashSet<String>) -> anyhow::Result<()> {
|
||||
let file = File::create(path).context("Failed to create cached-failed file")?;
|
||||
let mut writer = BufWriter::new(file);
|
||||
|
||||
let mut entries: Vec<_> = cache.iter().collect();
|
||||
entries.sort();
|
||||
|
||||
for entry in entries {
|
||||
writeln!(writer, "{}", entry)?;
|
||||
}
|
||||
|
||||
writer.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,11 +1,14 @@
|
||||
//! This crate implements all node interactions.
|
||||
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::{pin::Pin, sync::Arc};
|
||||
|
||||
use alloy::primitives::{Address, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256};
|
||||
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace};
|
||||
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest};
|
||||
use alloy::{
|
||||
primitives::{Address, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
|
||||
rpc::types::{
|
||||
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
|
||||
trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace},
|
||||
},
|
||||
};
|
||||
use anyhow::Result;
|
||||
|
||||
use futures::Stream;
|
||||
@@ -15,81 +18,85 @@ use revive_dt_format::traits::ResolverApi;
|
||||
/// An interface for all interactions with Ethereum compatible nodes.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub trait EthereumNode {
|
||||
/// A function to run post spawning the nodes and before any transactions are run on the node.
|
||||
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>>;
|
||||
/// A function to run post spawning the nodes and before any transactions are run on the node.
|
||||
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>>;
|
||||
|
||||
fn id(&self) -> usize;
|
||||
fn id(&self) -> usize;
|
||||
|
||||
/// Returns the nodes connection string.
|
||||
fn connection_string(&self) -> &str;
|
||||
/// Returns the nodes connection string.
|
||||
fn connection_string(&self) -> &str;
|
||||
|
||||
fn submit_transaction(
|
||||
&self,
|
||||
transaction: TransactionRequest,
|
||||
) -> Pin<Box<dyn Future<Output = Result<TxHash>> + '_>>;
|
||||
fn submit_transaction(
|
||||
&self,
|
||||
transaction: TransactionRequest,
|
||||
) -> Pin<Box<dyn Future<Output = Result<TxHash>> + '_>>;
|
||||
|
||||
fn get_receipt(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
) -> Pin<Box<dyn Future<Output = Result<TransactionReceipt>> + '_>>;
|
||||
fn get_receipt(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
) -> Pin<Box<dyn Future<Output = Result<TransactionReceipt>> + '_>>;
|
||||
|
||||
/// Execute the [TransactionRequest] and return a [TransactionReceipt].
|
||||
fn execute_transaction(
|
||||
&self,
|
||||
transaction: TransactionRequest,
|
||||
) -> Pin<Box<dyn Future<Output = Result<TransactionReceipt>> + '_>>;
|
||||
/// Execute the [TransactionRequest] and return a [TransactionReceipt].
|
||||
fn execute_transaction(
|
||||
&self,
|
||||
transaction: TransactionRequest,
|
||||
) -> Pin<Box<dyn Future<Output = Result<TransactionReceipt>> + '_>>;
|
||||
|
||||
/// Trace the transaction in the [TransactionReceipt] and return a [GethTrace].
|
||||
fn trace_transaction(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
trace_options: GethDebugTracingOptions,
|
||||
) -> Pin<Box<dyn Future<Output = Result<GethTrace>> + '_>>;
|
||||
/// Trace the transaction in the [TransactionReceipt] and return a [GethTrace].
|
||||
fn trace_transaction(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
trace_options: GethDebugTracingOptions,
|
||||
) -> Pin<Box<dyn Future<Output = Result<GethTrace>> + '_>>;
|
||||
|
||||
/// Returns the state diff of the transaction hash in the [TransactionReceipt].
|
||||
fn state_diff(&self, tx_hash: TxHash) -> Pin<Box<dyn Future<Output = Result<DiffMode>> + '_>>;
|
||||
/// Returns the state diff of the transaction hash in the [TransactionReceipt].
|
||||
fn state_diff(&self, tx_hash: TxHash) -> Pin<Box<dyn Future<Output = Result<DiffMode>> + '_>>;
|
||||
|
||||
/// Returns the balance of the provided [`Address`] back.
|
||||
fn balance_of(&self, address: Address) -> Pin<Box<dyn Future<Output = Result<U256>> + '_>>;
|
||||
/// Returns the balance of the provided [`Address`] back.
|
||||
fn balance_of(&self, address: Address) -> Pin<Box<dyn Future<Output = Result<U256>> + '_>>;
|
||||
|
||||
/// Returns the latest storage proof of the provided [`Address`]
|
||||
fn latest_state_proof(
|
||||
&self,
|
||||
address: Address,
|
||||
keys: Vec<StorageKey>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<EIP1186AccountProofResponse>> + '_>>;
|
||||
/// Returns the latest storage proof of the provided [`Address`]
|
||||
fn latest_state_proof(
|
||||
&self,
|
||||
address: Address,
|
||||
keys: Vec<StorageKey>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<EIP1186AccountProofResponse>> + '_>>;
|
||||
|
||||
/// Returns the resolver that is to use with this ethereum node.
|
||||
fn resolver(&self) -> Pin<Box<dyn Future<Output = Result<Arc<dyn ResolverApi + '_>>> + '_>>;
|
||||
/// Returns the resolver that is to use with this ethereum node.
|
||||
fn resolver(&self) -> Pin<Box<dyn Future<Output = Result<Arc<dyn ResolverApi + '_>>> + '_>>;
|
||||
|
||||
/// Returns the EVM version of the node.
|
||||
fn evm_version(&self) -> EVMVersion;
|
||||
/// Returns the EVM version of the node.
|
||||
fn evm_version(&self) -> EVMVersion;
|
||||
|
||||
/// Returns a stream of the blocks that were mined by the node.
|
||||
fn subscribe_to_full_blocks_information(
|
||||
&self,
|
||||
) -> Pin<
|
||||
Box<
|
||||
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
|
||||
+ '_,
|
||||
>,
|
||||
>;
|
||||
/// Returns a stream of the blocks that were mined by the node.
|
||||
fn subscribe_to_full_blocks_information(
|
||||
&self,
|
||||
) -> Pin<
|
||||
Box<
|
||||
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
|
||||
+ '_,
|
||||
>,
|
||||
>;
|
||||
|
||||
/// Checks if the provided address is in the wallet. If it is, returns the address.
|
||||
/// Otherwise, returns the default signer's address.
|
||||
fn resolve_signer_or_default(&self, address: Address) -> Address;
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct MinedBlockInformation {
|
||||
/// The block number.
|
||||
pub block_number: BlockNumber,
|
||||
/// The block number.
|
||||
pub block_number: BlockNumber,
|
||||
|
||||
/// The block timestamp.
|
||||
pub block_timestamp: BlockTimestamp,
|
||||
/// The block timestamp.
|
||||
pub block_timestamp: BlockTimestamp,
|
||||
|
||||
/// The amount of gas mined in the block.
|
||||
pub mined_gas: u128,
|
||||
/// The amount of gas mined in the block.
|
||||
pub mined_gas: u128,
|
||||
|
||||
/// The gas limit of the block.
|
||||
pub block_gas_limit: u128,
|
||||
/// The gas limit of the block.
|
||||
pub block_gas_limit: u128,
|
||||
|
||||
/// The hashes of the transactions that were mined as part of the block.
|
||||
pub transaction_hashes: Vec<TxHash>,
|
||||
/// The hashes of the transactions that were mined as part of the block.
|
||||
pub transaction_hashes: Vec<TxHash>,
|
||||
}
|
||||
|
||||
+132
-151
@@ -1,9 +1,9 @@
|
||||
use std::{
|
||||
fs::{File, OpenOptions},
|
||||
io::{BufRead, BufReader, Write},
|
||||
path::Path,
|
||||
process::{Child, Command},
|
||||
time::{Duration, Instant},
|
||||
fs::{File, OpenOptions},
|
||||
io::{BufRead, BufReader, Write},
|
||||
path::Path,
|
||||
process::{Child, Command},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use anyhow::{Context, Result, bail};
|
||||
@@ -12,180 +12,161 @@ use anyhow::{Context, Result, bail};
|
||||
/// when the process is dropped.
|
||||
#[derive(Debug)]
|
||||
pub struct Process {
|
||||
/// The handle of the child process.
|
||||
child: Child,
|
||||
/// The handle of the child process.
|
||||
child: Child,
|
||||
|
||||
/// The file that stdout is being logged to.
|
||||
stdout_logs_file: File,
|
||||
/// The file that stdout is being logged to.
|
||||
stdout_logs_file: File,
|
||||
|
||||
/// The file that stderr is being logged to.
|
||||
stderr_logs_file: File,
|
||||
/// The file that stderr is being logged to.
|
||||
stderr_logs_file: File,
|
||||
}
|
||||
|
||||
impl Process {
|
||||
pub fn new(
|
||||
log_file_prefix: impl Into<Option<&'static str>>,
|
||||
logs_directory: impl AsRef<Path>,
|
||||
binary_path: impl AsRef<Path>,
|
||||
command_building_callback: impl FnOnce(&mut Command, File, File),
|
||||
process_readiness_wait_behavior: ProcessReadinessWaitBehavior,
|
||||
) -> Result<Self> {
|
||||
let log_file_prefix = log_file_prefix.into();
|
||||
pub fn new(
|
||||
log_file_prefix: impl Into<Option<&'static str>>,
|
||||
logs_directory: impl AsRef<Path>,
|
||||
binary_path: impl AsRef<Path>,
|
||||
command_building_callback: impl FnOnce(&mut Command, File, File),
|
||||
process_readiness_wait_behavior: ProcessReadinessWaitBehavior,
|
||||
) -> Result<Self> {
|
||||
let log_file_prefix = log_file_prefix.into();
|
||||
|
||||
let (stdout_file_name, stderr_file_name) = match log_file_prefix {
|
||||
Some(prefix) => (
|
||||
format!("{prefix}_stdout.log"),
|
||||
format!("{prefix}_stderr.log"),
|
||||
),
|
||||
None => ("stdout.log".to_string(), "stderr.log".to_string()),
|
||||
};
|
||||
let (stdout_file_name, stderr_file_name) = match log_file_prefix {
|
||||
Some(prefix) => (format!("{prefix}_stdout.log"), format!("{prefix}_stderr.log")),
|
||||
None => ("stdout.log".to_string(), "stderr.log".to_string()),
|
||||
};
|
||||
|
||||
let stdout_logs_file_path = logs_directory.as_ref().join(stdout_file_name);
|
||||
let stderr_logs_file_path = logs_directory.as_ref().join(stderr_file_name);
|
||||
let stdout_logs_file_path = logs_directory.as_ref().join(stdout_file_name);
|
||||
let stderr_logs_file_path = logs_directory.as_ref().join(stderr_file_name);
|
||||
|
||||
let stdout_logs_file = OpenOptions::new()
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.create(true)
|
||||
.open(stdout_logs_file_path.as_path())
|
||||
.context("Failed to open the stdout logs file")?;
|
||||
let stderr_logs_file = OpenOptions::new()
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.create(true)
|
||||
.open(stderr_logs_file_path.as_path())
|
||||
.context("Failed to open the stderr logs file")?;
|
||||
let stdout_logs_file = OpenOptions::new()
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.create(true)
|
||||
.open(stdout_logs_file_path.as_path())
|
||||
.context("Failed to open the stdout logs file")?;
|
||||
let stderr_logs_file = OpenOptions::new()
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.create(true)
|
||||
.open(stderr_logs_file_path.as_path())
|
||||
.context("Failed to open the stderr logs file")?;
|
||||
|
||||
let mut command = {
|
||||
let stdout_logs_file = stdout_logs_file
|
||||
.try_clone()
|
||||
.context("Failed to clone the stdout logs file")?;
|
||||
let stderr_logs_file = stderr_logs_file
|
||||
.try_clone()
|
||||
.context("Failed to clone the stderr logs file")?;
|
||||
let mut command = {
|
||||
let stdout_logs_file =
|
||||
stdout_logs_file.try_clone().context("Failed to clone the stdout logs file")?;
|
||||
let stderr_logs_file =
|
||||
stderr_logs_file.try_clone().context("Failed to clone the stderr logs file")?;
|
||||
|
||||
let mut command = Command::new(binary_path.as_ref());
|
||||
command_building_callback(&mut command, stdout_logs_file, stderr_logs_file);
|
||||
command
|
||||
};
|
||||
let mut child = command
|
||||
.spawn()
|
||||
.context("Failed to spawn the built command")?;
|
||||
let mut command = Command::new(binary_path.as_ref());
|
||||
command_building_callback(&mut command, stdout_logs_file, stderr_logs_file);
|
||||
command
|
||||
};
|
||||
let mut child = command.spawn().context("Failed to spawn the built command")?;
|
||||
|
||||
match process_readiness_wait_behavior {
|
||||
ProcessReadinessWaitBehavior::NoStartupWait => {}
|
||||
ProcessReadinessWaitBehavior::WaitDuration(duration) => std::thread::sleep(duration),
|
||||
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
|
||||
max_wait_duration,
|
||||
mut check_function,
|
||||
} => {
|
||||
let spawn_time = Instant::now();
|
||||
match process_readiness_wait_behavior {
|
||||
ProcessReadinessWaitBehavior::NoStartupWait => {},
|
||||
ProcessReadinessWaitBehavior::WaitDuration(duration) => std::thread::sleep(duration),
|
||||
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
|
||||
max_wait_duration,
|
||||
mut check_function,
|
||||
} => {
|
||||
let spawn_time = Instant::now();
|
||||
|
||||
let stdout_logs_file = OpenOptions::new()
|
||||
.read(true)
|
||||
.open(stdout_logs_file_path)
|
||||
.context("Failed to open the stdout logs file")?;
|
||||
let stderr_logs_file = OpenOptions::new()
|
||||
.read(true)
|
||||
.open(stderr_logs_file_path)
|
||||
.context("Failed to open the stderr logs file")?;
|
||||
let stdout_logs_file = OpenOptions::new()
|
||||
.read(true)
|
||||
.open(stdout_logs_file_path)
|
||||
.context("Failed to open the stdout logs file")?;
|
||||
let stderr_logs_file = OpenOptions::new()
|
||||
.read(true)
|
||||
.open(stderr_logs_file_path)
|
||||
.context("Failed to open the stderr logs file")?;
|
||||
|
||||
let mut stdout_lines = BufReader::new(stdout_logs_file).lines();
|
||||
let mut stderr_lines = BufReader::new(stderr_logs_file).lines();
|
||||
let mut stdout_lines = BufReader::new(stdout_logs_file).lines();
|
||||
let mut stderr_lines = BufReader::new(stderr_logs_file).lines();
|
||||
|
||||
let mut stdout = String::new();
|
||||
let mut stderr = String::new();
|
||||
let mut stdout = String::new();
|
||||
let mut stderr = String::new();
|
||||
|
||||
loop {
|
||||
let stdout_line = stdout_lines.next().and_then(Result::ok);
|
||||
let stderr_line = stderr_lines.next().and_then(Result::ok);
|
||||
loop {
|
||||
let stdout_line = stdout_lines.next().and_then(Result::ok);
|
||||
let stderr_line = stderr_lines.next().and_then(Result::ok);
|
||||
|
||||
if let Some(stdout_line) = stdout_line.as_ref() {
|
||||
stdout.push_str(stdout_line);
|
||||
stdout.push('\n');
|
||||
}
|
||||
if let Some(stderr_line) = stderr_line.as_ref() {
|
||||
stderr.push_str(stderr_line);
|
||||
stderr.push('\n');
|
||||
}
|
||||
if let Some(stdout_line) = stdout_line.as_ref() {
|
||||
stdout.push_str(stdout_line);
|
||||
stdout.push('\n');
|
||||
}
|
||||
if let Some(stderr_line) = stderr_line.as_ref() {
|
||||
stderr.push_str(stderr_line);
|
||||
stderr.push('\n');
|
||||
}
|
||||
|
||||
let check_result =
|
||||
check_function(stdout_line.as_deref(), stderr_line.as_deref()).context(
|
||||
format!(
|
||||
"Failed to wait for the process to be ready - {stdout} - {stderr}"
|
||||
),
|
||||
)?;
|
||||
let check_result =
|
||||
check_function(stdout_line.as_deref(), stderr_line.as_deref()).context(
|
||||
format!(
|
||||
"Failed to wait for the process to be ready - {stdout} - {stderr}"
|
||||
),
|
||||
)?;
|
||||
|
||||
if check_result {
|
||||
break;
|
||||
}
|
||||
if check_result {
|
||||
break;
|
||||
}
|
||||
|
||||
if Instant::now().duration_since(spawn_time) > max_wait_duration {
|
||||
bail!(
|
||||
"Waited for the process to start but it failed to start in time. stderr {stderr} - stdout {stdout}"
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
ProcessReadinessWaitBehavior::WaitForCommandToExit => {
|
||||
if !child
|
||||
.wait()
|
||||
.context("Failed waiting for process to finish")?
|
||||
.success()
|
||||
{
|
||||
anyhow::bail!("Failed to spawn command");
|
||||
}
|
||||
}
|
||||
}
|
||||
if Instant::now().duration_since(spawn_time) > max_wait_duration {
|
||||
bail!(
|
||||
"Waited for the process to start but it failed to start in time. stderr {stderr} - stdout {stdout}"
|
||||
)
|
||||
}
|
||||
}
|
||||
},
|
||||
ProcessReadinessWaitBehavior::WaitForCommandToExit => {
|
||||
if !child.wait().context("Failed waiting for process to finish")?.success() {
|
||||
anyhow::bail!("Failed to spawn command");
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
child,
|
||||
stdout_logs_file,
|
||||
stderr_logs_file,
|
||||
})
|
||||
}
|
||||
Ok(Self { child, stdout_logs_file, stderr_logs_file })
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Process {
|
||||
fn drop(&mut self) {
|
||||
self.child.kill().expect("Failed to kill the process");
|
||||
self.stdout_logs_file
|
||||
.flush()
|
||||
.expect("Failed to flush the stdout logs file");
|
||||
self.stderr_logs_file
|
||||
.flush()
|
||||
.expect("Failed to flush the stderr logs file");
|
||||
}
|
||||
fn drop(&mut self) {
|
||||
self.child.kill().expect("Failed to kill the process");
|
||||
self.stdout_logs_file.flush().expect("Failed to flush the stdout logs file");
|
||||
self.stderr_logs_file.flush().expect("Failed to flush the stderr logs file");
|
||||
}
|
||||
}
|
||||
|
||||
pub enum ProcessReadinessWaitBehavior {
|
||||
/// The process does not require any kind of wait after it's been spawned and can be used
|
||||
/// straight away.
|
||||
NoStartupWait,
|
||||
/// The process does not require any kind of wait after it's been spawned and can be used
|
||||
/// straight away.
|
||||
NoStartupWait,
|
||||
|
||||
/// Waits for the command to exit.
|
||||
WaitForCommandToExit,
|
||||
/// Waits for the command to exit.
|
||||
WaitForCommandToExit,
|
||||
|
||||
/// The process does require some amount of wait duration after it's been started.
|
||||
WaitDuration(Duration),
|
||||
/// The process does require some amount of wait duration after it's been started.
|
||||
WaitDuration(Duration),
|
||||
|
||||
/// The process requires a time bounded wait function which is a function of the lines that
|
||||
/// appear in the log files.
|
||||
TimeBoundedWaitFunction {
|
||||
/// The maximum amount of time to wait for the check function to return true.
|
||||
max_wait_duration: Duration,
|
||||
/// The process requires a time bounded wait function which is a function of the lines that
|
||||
/// appear in the log files.
|
||||
TimeBoundedWaitFunction {
|
||||
/// The maximum amount of time to wait for the check function to return true.
|
||||
max_wait_duration: Duration,
|
||||
|
||||
/// The function to use to check if the process spawned is ready to use or not. This
|
||||
/// function should return the following in the following cases:
|
||||
///
|
||||
/// - `Ok(true)`: Returned when the condition the process is waiting for has been fulfilled
|
||||
/// and the wait is completed.
|
||||
/// - `Ok(false)`: The process is not ready yet but it might be ready in the future.
|
||||
/// - `Err`: The process is not ready yet and will not be ready in the future as it appears
|
||||
/// that it has encountered an error when it was being spawned.
|
||||
///
|
||||
/// The first argument is a line from stdout and the second argument is a line from stderr.
|
||||
#[allow(clippy::type_complexity)]
|
||||
check_function: Box<dyn FnMut(Option<&str>, Option<&str>) -> anyhow::Result<bool>>,
|
||||
},
|
||||
/// The function to use to check if the process spawned is ready to use or not. This
|
||||
/// function should return the following in the following cases:
|
||||
///
|
||||
/// - `Ok(true)`: Returned when the condition the process is waiting for has been fulfilled
|
||||
/// and the wait is completed.
|
||||
/// - `Ok(false)`: The process is not ready yet but it might be ready in the future.
|
||||
/// - `Err`: The process is not ready yet and will not be ready in the future as it appears
|
||||
/// that it has encountered an error when it was being spawned.
|
||||
///
|
||||
/// The first argument is a line from stdout and the second argument is a line from stderr.
|
||||
#[allow(clippy::type_complexity)]
|
||||
check_function: Box<dyn FnMut(Option<&str>, Option<&str>) -> anyhow::Result<bool>>,
|
||||
},
|
||||
}
|
||||
|
||||
+10
-10
@@ -10,16 +10,16 @@ pub mod provider_utils;
|
||||
|
||||
/// An abstract interface for testing nodes.
|
||||
pub trait Node: EthereumNode {
|
||||
/// Spawns a node configured according to the genesis json.
|
||||
///
|
||||
/// Blocking until it's ready to accept transactions.
|
||||
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()>;
|
||||
/// Spawns a node configured according to the genesis json.
|
||||
///
|
||||
/// Blocking until it's ready to accept transactions.
|
||||
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()>;
|
||||
|
||||
/// Prune the node instance and related data.
|
||||
///
|
||||
/// Blocking until it's completely stopped.
|
||||
fn shutdown(&mut self) -> anyhow::Result<()>;
|
||||
/// Prune the node instance and related data.
|
||||
///
|
||||
/// Blocking until it's completely stopped.
|
||||
fn shutdown(&mut self) -> anyhow::Result<()>;
|
||||
|
||||
/// Returns the node version.
|
||||
fn version(&self) -> anyhow::Result<String>;
|
||||
/// Returns the node version.
|
||||
fn version(&self) -> anyhow::Result<String>;
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -6,64 +6,56 @@ use tower::{Layer, Service};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ConcurrencyLimiterLayer {
|
||||
semaphore: Arc<Semaphore>,
|
||||
semaphore: Arc<Semaphore>,
|
||||
}
|
||||
|
||||
impl ConcurrencyLimiterLayer {
|
||||
pub fn new(permit_count: usize) -> Self {
|
||||
Self {
|
||||
semaphore: Arc::new(Semaphore::new(permit_count)),
|
||||
}
|
||||
}
|
||||
pub fn new(permit_count: usize) -> Self {
|
||||
Self { semaphore: Arc::new(Semaphore::new(permit_count)) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Layer<S> for ConcurrencyLimiterLayer {
|
||||
type Service = ConcurrencyLimiterService<S>;
|
||||
type Service = ConcurrencyLimiterService<S>;
|
||||
|
||||
fn layer(&self, inner: S) -> Self::Service {
|
||||
ConcurrencyLimiterService {
|
||||
service: inner,
|
||||
semaphore: self.semaphore.clone(),
|
||||
}
|
||||
}
|
||||
fn layer(&self, inner: S) -> Self::Service {
|
||||
ConcurrencyLimiterService { service: inner, semaphore: self.semaphore.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ConcurrencyLimiterService<S> {
|
||||
service: S,
|
||||
semaphore: Arc<Semaphore>,
|
||||
service: S,
|
||||
semaphore: Arc<Semaphore>,
|
||||
}
|
||||
|
||||
impl<S, Request> Service<Request> for ConcurrencyLimiterService<S>
|
||||
where
|
||||
S: Service<Request> + Send,
|
||||
S::Future: Send + 'static,
|
||||
S: Service<Request> + Send,
|
||||
S::Future: Send + 'static,
|
||||
{
|
||||
type Response = S::Response;
|
||||
type Error = S::Error;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
type Response = S::Response;
|
||||
type Error = S::Error;
|
||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(
|
||||
&mut self,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Result<(), Self::Error>> {
|
||||
self.service.poll_ready(cx)
|
||||
}
|
||||
fn poll_ready(
|
||||
&mut self,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Result<(), Self::Error>> {
|
||||
self.service.poll_ready(cx)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Request) -> Self::Future {
|
||||
let semaphore = self.semaphore.clone();
|
||||
let future = self.service.call(req);
|
||||
fn call(&mut self, req: Request) -> Self::Future {
|
||||
let semaphore = self.semaphore.clone();
|
||||
let future = self.service.call(req);
|
||||
|
||||
Box::pin(async move {
|
||||
let _permit = semaphore
|
||||
.acquire()
|
||||
.await
|
||||
.expect("Semaphore has been closed");
|
||||
tracing::debug!(
|
||||
available_permits = semaphore.available_permits(),
|
||||
"Acquired Semaphore Permit"
|
||||
);
|
||||
future.await
|
||||
})
|
||||
}
|
||||
Box::pin(async move {
|
||||
let _permit = semaphore.acquire().await.expect("Semaphore has been closed");
|
||||
tracing::debug!(
|
||||
available_permits = semaphore.available_permits(),
|
||||
"Acquired Semaphore Permit"
|
||||
);
|
||||
future.await
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,84 +1,76 @@
|
||||
use alloy::{
|
||||
network::{Network, TransactionBuilder},
|
||||
providers::{
|
||||
Provider, SendableTx,
|
||||
fillers::{GasFiller, TxFiller},
|
||||
},
|
||||
transports::TransportResult,
|
||||
network::{Network, TransactionBuilder},
|
||||
providers::{
|
||||
Provider, SendableTx,
|
||||
fillers::{GasFiller, TxFiller},
|
||||
},
|
||||
transports::TransportResult,
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct FallbackGasFiller {
|
||||
inner: GasFiller,
|
||||
default_gas_limit: u64,
|
||||
default_max_fee_per_gas: u128,
|
||||
default_priority_fee: u128,
|
||||
inner: GasFiller,
|
||||
default_gas_limit: u64,
|
||||
default_max_fee_per_gas: u128,
|
||||
default_priority_fee: u128,
|
||||
}
|
||||
|
||||
impl FallbackGasFiller {
|
||||
pub fn new(
|
||||
default_gas_limit: u64,
|
||||
default_max_fee_per_gas: u128,
|
||||
default_priority_fee: u128,
|
||||
) -> Self {
|
||||
Self {
|
||||
inner: GasFiller,
|
||||
default_gas_limit,
|
||||
default_max_fee_per_gas,
|
||||
default_priority_fee,
|
||||
}
|
||||
}
|
||||
pub fn new(
|
||||
default_gas_limit: u64,
|
||||
default_max_fee_per_gas: u128,
|
||||
default_priority_fee: u128,
|
||||
) -> Self {
|
||||
Self { inner: GasFiller, default_gas_limit, default_max_fee_per_gas, default_priority_fee }
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for FallbackGasFiller {
|
||||
fn default() -> Self {
|
||||
FallbackGasFiller::new(25_000_000, 1_000_000_000, 1_000_000_000)
|
||||
}
|
||||
fn default() -> Self {
|
||||
FallbackGasFiller::new(10_000_000, 1_000_000_000, 1_000_000_000)
|
||||
}
|
||||
}
|
||||
|
||||
impl<N> TxFiller<N> for FallbackGasFiller
|
||||
where
|
||||
N: Network,
|
||||
N: Network,
|
||||
{
|
||||
type Fillable = Option<<GasFiller as TxFiller<N>>::Fillable>;
|
||||
type Fillable = Option<<GasFiller as TxFiller<N>>::Fillable>;
|
||||
|
||||
fn status(
|
||||
&self,
|
||||
tx: &<N as Network>::TransactionRequest,
|
||||
) -> alloy::providers::fillers::FillerControlFlow {
|
||||
<GasFiller as TxFiller<N>>::status(&self.inner, tx)
|
||||
}
|
||||
fn status(
|
||||
&self,
|
||||
tx: &<N as Network>::TransactionRequest,
|
||||
) -> alloy::providers::fillers::FillerControlFlow {
|
||||
<GasFiller as TxFiller<N>>::status(&self.inner, tx)
|
||||
}
|
||||
|
||||
fn fill_sync(&self, _: &mut alloy::providers::SendableTx<N>) {}
|
||||
fn fill_sync(&self, _: &mut alloy::providers::SendableTx<N>) {}
|
||||
|
||||
async fn prepare<P: Provider<N>>(
|
||||
&self,
|
||||
provider: &P,
|
||||
tx: &<N as Network>::TransactionRequest,
|
||||
) -> TransportResult<Self::Fillable> {
|
||||
// Try to fetch GasFiller’s “fillable” (gas_price, base_fee, estimate_gas, …)
|
||||
// If it errors (i.e. tx would revert under eth_estimateGas), swallow it.
|
||||
match self.inner.prepare(provider, tx).await {
|
||||
Ok(fill) => Ok(Some(fill)),
|
||||
Err(_) => Ok(None),
|
||||
}
|
||||
}
|
||||
async fn prepare<P: Provider<N>>(
|
||||
&self,
|
||||
provider: &P,
|
||||
tx: &<N as Network>::TransactionRequest,
|
||||
) -> TransportResult<Self::Fillable> {
|
||||
// Try to fetch GasFiller's "fillable" (gas_price, base_fee, estimate_gas, …)
|
||||
// Propagate errors so caller can handle them appropriately
|
||||
self.inner.prepare(provider, tx).await.map(Some)
|
||||
}
|
||||
|
||||
async fn fill(
|
||||
&self,
|
||||
fillable: Self::Fillable,
|
||||
mut tx: alloy::providers::SendableTx<N>,
|
||||
) -> TransportResult<SendableTx<N>> {
|
||||
if let Some(fill) = fillable {
|
||||
// our inner GasFiller succeeded — use it
|
||||
self.inner.fill(fill, tx).await
|
||||
} else {
|
||||
if let Some(builder) = tx.as_mut_builder() {
|
||||
builder.set_gas_limit(self.default_gas_limit);
|
||||
builder.set_max_fee_per_gas(self.default_max_fee_per_gas);
|
||||
builder.set_max_priority_fee_per_gas(self.default_priority_fee);
|
||||
}
|
||||
Ok(tx)
|
||||
}
|
||||
}
|
||||
async fn fill(
|
||||
&self,
|
||||
fillable: Self::Fillable,
|
||||
mut tx: alloy::providers::SendableTx<N>,
|
||||
) -> TransportResult<SendableTx<N>> {
|
||||
if let Some(fill) = fillable {
|
||||
// our inner GasFiller succeeded — use it
|
||||
self.inner.fill(fill, tx).await
|
||||
} else {
|
||||
if let Some(builder) = tx.as_mut_builder() {
|
||||
builder.set_gas_limit(self.default_gas_limit);
|
||||
builder.set_max_fee_per_gas(self.default_max_fee_per_gas);
|
||||
builder.set_max_priority_fee_per_gas(self.default_priority_fee);
|
||||
}
|
||||
Ok(tx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use std::{ops::ControlFlow, sync::LazyLock, time::Duration};
|
||||
|
||||
use alloy::{
|
||||
network::{Ethereum, Network, NetworkWallet, TransactionBuilder4844},
|
||||
providers::{
|
||||
Identity, PendingTransactionBuilder, Provider, ProviderBuilder, RootProvider,
|
||||
fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller},
|
||||
},
|
||||
rpc::client::ClientBuilder,
|
||||
network::{Ethereum, Network, NetworkWallet, TransactionBuilder4844},
|
||||
providers::{
|
||||
Identity, PendingTransactionBuilder, Provider, ProviderBuilder, RootProvider,
|
||||
fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller},
|
||||
},
|
||||
rpc::client::ClientBuilder,
|
||||
};
|
||||
use anyhow::{Context, Result};
|
||||
use revive_dt_common::futures::{PollingWaitBehavior, poll};
|
||||
@@ -15,114 +15,110 @@ use tracing::debug;
|
||||
use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller};
|
||||
|
||||
pub type ConcreteProvider<N, W> = FillProvider<
|
||||
JoinFill<
|
||||
JoinFill<JoinFill<JoinFill<Identity, FallbackGasFiller>, ChainIdFiller>, NonceFiller>,
|
||||
WalletFiller<W>,
|
||||
>,
|
||||
RootProvider<N>,
|
||||
N,
|
||||
JoinFill<
|
||||
JoinFill<JoinFill<JoinFill<Identity, FallbackGasFiller>, ChainIdFiller>, NonceFiller>,
|
||||
WalletFiller<W>,
|
||||
>,
|
||||
RootProvider<N>,
|
||||
N,
|
||||
>;
|
||||
|
||||
pub async fn construct_concurrency_limited_provider<N, W>(
|
||||
rpc_url: &str,
|
||||
fallback_gas_filler: FallbackGasFiller,
|
||||
chain_id_filler: ChainIdFiller,
|
||||
nonce_filler: NonceFiller,
|
||||
wallet: W,
|
||||
rpc_url: &str,
|
||||
fallback_gas_filler: FallbackGasFiller,
|
||||
chain_id_filler: ChainIdFiller,
|
||||
nonce_filler: NonceFiller,
|
||||
wallet: W,
|
||||
) -> Result<ConcreteProvider<N, W>>
|
||||
where
|
||||
N: Network<TransactionRequest: TransactionBuilder4844>,
|
||||
W: NetworkWallet<N>,
|
||||
Identity: TxFiller<N>,
|
||||
FallbackGasFiller: TxFiller<N>,
|
||||
ChainIdFiller: TxFiller<N>,
|
||||
NonceFiller: TxFiller<N>,
|
||||
WalletFiller<W>: TxFiller<N>,
|
||||
N: Network<TransactionRequest: TransactionBuilder4844>,
|
||||
W: NetworkWallet<N>,
|
||||
Identity: TxFiller<N>,
|
||||
FallbackGasFiller: TxFiller<N>,
|
||||
ChainIdFiller: TxFiller<N>,
|
||||
NonceFiller: TxFiller<N>,
|
||||
WalletFiller<W>: TxFiller<N>,
|
||||
{
|
||||
// This is a global limit on the RPC concurrency that applies to all of the providers created
|
||||
// by the framework. With this limit, it means that we can have a maximum of N concurrent
|
||||
// requests at any point of time and no more than that. This is done in an effort to stabilize
|
||||
// the framework from some of the interment issues that we've been seeing related to RPC calls.
|
||||
static GLOBAL_CONCURRENCY_LIMITER_LAYER: LazyLock<ConcurrencyLimiterLayer> =
|
||||
LazyLock::new(|| ConcurrencyLimiterLayer::new(10));
|
||||
// This is a global limit on the RPC concurrency that applies to all of the providers created
|
||||
// by the framework. With this limit, it means that we can have a maximum of N concurrent
|
||||
// requests at any point of time and no more than that. This is done in an effort to stabilize
|
||||
// the framework from some of the interment issues that we've been seeing related to RPC calls.
|
||||
static GLOBAL_CONCURRENCY_LIMITER_LAYER: LazyLock<ConcurrencyLimiterLayer> =
|
||||
LazyLock::new(|| ConcurrencyLimiterLayer::new(10));
|
||||
|
||||
let client = ClientBuilder::default()
|
||||
.layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone())
|
||||
.connect(rpc_url)
|
||||
.await
|
||||
.context("Failed to construct the RPC client")?;
|
||||
let client = ClientBuilder::default()
|
||||
.layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone())
|
||||
.connect(rpc_url)
|
||||
.await
|
||||
.context("Failed to construct the RPC client")?;
|
||||
|
||||
let provider = ProviderBuilder::new()
|
||||
.disable_recommended_fillers()
|
||||
.network::<N>()
|
||||
.filler(fallback_gas_filler)
|
||||
.filler(chain_id_filler)
|
||||
.filler(nonce_filler)
|
||||
.wallet(wallet)
|
||||
.connect_client(client);
|
||||
let provider = ProviderBuilder::new()
|
||||
.disable_recommended_fillers()
|
||||
.network::<N>()
|
||||
.filler(fallback_gas_filler)
|
||||
.filler(chain_id_filler)
|
||||
.filler(nonce_filler)
|
||||
.wallet(wallet)
|
||||
.connect_client(client);
|
||||
|
||||
Ok(provider)
|
||||
Ok(provider)
|
||||
}
|
||||
|
||||
pub async fn execute_transaction<N, W>(
|
||||
provider: ConcreteProvider<N, W>,
|
||||
transaction: N::TransactionRequest,
|
||||
provider: ConcreteProvider<N, W>,
|
||||
transaction: N::TransactionRequest,
|
||||
) -> Result<N::ReceiptResponse>
|
||||
where
|
||||
N: Network<
|
||||
TransactionRequest: TransactionBuilder4844,
|
||||
TxEnvelope = <Ethereum as Network>::TxEnvelope,
|
||||
>,
|
||||
W: NetworkWallet<N>,
|
||||
Identity: TxFiller<N>,
|
||||
FallbackGasFiller: TxFiller<N>,
|
||||
ChainIdFiller: TxFiller<N>,
|
||||
NonceFiller: TxFiller<N>,
|
||||
WalletFiller<W>: TxFiller<N>,
|
||||
N: Network<
|
||||
TransactionRequest: TransactionBuilder4844,
|
||||
TxEnvelope = <Ethereum as Network>::TxEnvelope,
|
||||
>,
|
||||
W: NetworkWallet<N>,
|
||||
Identity: TxFiller<N>,
|
||||
FallbackGasFiller: TxFiller<N>,
|
||||
ChainIdFiller: TxFiller<N>,
|
||||
NonceFiller: TxFiller<N>,
|
||||
WalletFiller<W>: TxFiller<N>,
|
||||
{
|
||||
let sendable_transaction = provider
|
||||
.fill(transaction)
|
||||
.await
|
||||
.context("Failed to fill transaction")?;
|
||||
let sendable_transaction =
|
||||
provider.fill(transaction).await.context("Failed to fill transaction")?;
|
||||
|
||||
let transaction_envelope = sendable_transaction
|
||||
.try_into_envelope()
|
||||
.context("Failed to convert transaction into an envelope")?;
|
||||
let tx_hash = *transaction_envelope.tx_hash();
|
||||
let transaction_envelope = sendable_transaction
|
||||
.try_into_envelope()
|
||||
.context("Failed to convert transaction into an envelope")?;
|
||||
let tx_hash = *transaction_envelope.tx_hash();
|
||||
|
||||
let mut pending_transaction = match provider.send_tx_envelope(transaction_envelope).await {
|
||||
Ok(pending_transaction) => pending_transaction,
|
||||
Err(error) => {
|
||||
let error_string = error.to_string();
|
||||
let mut pending_transaction = match provider.send_tx_envelope(transaction_envelope).await {
|
||||
Ok(pending_transaction) => pending_transaction,
|
||||
Err(error) => {
|
||||
let error_string = error.to_string();
|
||||
|
||||
if error_string.contains("Transaction Already Imported") {
|
||||
PendingTransactionBuilder::<N>::new(provider.root().clone(), tx_hash)
|
||||
} else {
|
||||
return Err(error).context(format!("Failed to submit transaction {tx_hash}"));
|
||||
}
|
||||
}
|
||||
};
|
||||
debug!(%tx_hash, "Submitted Transaction");
|
||||
if error_string.contains("Transaction Already Imported") {
|
||||
PendingTransactionBuilder::<N>::new(provider.root().clone(), tx_hash)
|
||||
} else {
|
||||
return Err(error).context(format!("Failed to submit transaction {tx_hash}"));
|
||||
}
|
||||
},
|
||||
};
|
||||
debug!(%tx_hash, "Submitted Transaction");
|
||||
|
||||
pending_transaction.set_timeout(Some(Duration::from_secs(120)));
|
||||
let tx_hash = pending_transaction.watch().await.context(format!(
|
||||
"Transaction inclusion watching timeout for {tx_hash}"
|
||||
))?;
|
||||
pending_transaction.set_timeout(Some(Duration::from_secs(120)));
|
||||
let tx_hash = pending_transaction
|
||||
.watch()
|
||||
.await
|
||||
.context(format!("Transaction inclusion watching timeout for {tx_hash}"))?;
|
||||
|
||||
poll(
|
||||
Duration::from_secs(60),
|
||||
PollingWaitBehavior::Constant(Duration::from_secs(3)),
|
||||
|| {
|
||||
let provider = provider.clone();
|
||||
debug!(%tx_hash, "Transaction included, polling for receipt");
|
||||
|
||||
async move {
|
||||
match provider.get_transaction_receipt(tx_hash).await {
|
||||
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
|
||||
_ => Ok(ControlFlow::Continue(())),
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
.await
|
||||
.context(format!("Polling for receipt failed for {tx_hash}"))
|
||||
poll(Duration::from_secs(30), PollingWaitBehavior::Constant(Duration::from_secs(3)), || {
|
||||
let provider = provider.clone();
|
||||
async move {
|
||||
match provider.get_transaction_receipt(tx_hash).await {
|
||||
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
|
||||
_ => Ok(ControlFlow::Continue(())),
|
||||
}
|
||||
}
|
||||
})
|
||||
.await
|
||||
.context(format!("Polling for receipt timed out for {tx_hash}"))
|
||||
}
|
||||
|
||||
+399
-448
@@ -2,10 +2,10 @@
|
||||
//! reporters and combines them into a single unified report.
|
||||
|
||||
use std::{
|
||||
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
|
||||
fs::OpenOptions,
|
||||
path::PathBuf,
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
|
||||
fs::OpenOptions,
|
||||
path::PathBuf,
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
|
||||
use alloy::primitives::Address;
|
||||
@@ -19,434 +19,385 @@ use semver::Version;
|
||||
use serde::Serialize;
|
||||
use serde_with::{DisplayFromStr, serde_as};
|
||||
use tokio::sync::{
|
||||
broadcast::{Sender, channel},
|
||||
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
|
||||
broadcast::{Sender, channel},
|
||||
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
|
||||
};
|
||||
use tracing::debug;
|
||||
|
||||
use crate::*;
|
||||
|
||||
pub struct ReportAggregator {
|
||||
/* Internal Report State */
|
||||
report: Report,
|
||||
remaining_cases: HashMap<MetadataFilePath, HashMap<Mode, HashSet<CaseIdx>>>,
|
||||
/* Channels */
|
||||
runner_tx: Option<UnboundedSender<RunnerEvent>>,
|
||||
runner_rx: UnboundedReceiver<RunnerEvent>,
|
||||
listener_tx: Sender<ReporterEvent>,
|
||||
/* Internal Report State */
|
||||
report: Report,
|
||||
remaining_cases: HashMap<MetadataFilePath, HashMap<Mode, HashSet<CaseIdx>>>,
|
||||
/* Channels */
|
||||
runner_tx: Option<UnboundedSender<RunnerEvent>>,
|
||||
runner_rx: UnboundedReceiver<RunnerEvent>,
|
||||
listener_tx: Sender<ReporterEvent>,
|
||||
}
|
||||
|
||||
impl ReportAggregator {
|
||||
pub fn new(context: Context) -> Self {
|
||||
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
|
||||
let (listener_tx, _) = channel::<ReporterEvent>(1024);
|
||||
Self {
|
||||
report: Report::new(context),
|
||||
remaining_cases: Default::default(),
|
||||
runner_tx: Some(runner_tx),
|
||||
runner_rx,
|
||||
listener_tx,
|
||||
}
|
||||
}
|
||||
pub fn new(context: Context) -> Self {
|
||||
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
|
||||
let (listener_tx, _) = channel::<ReporterEvent>(1024);
|
||||
Self {
|
||||
report: Report::new(context),
|
||||
remaining_cases: Default::default(),
|
||||
runner_tx: Some(runner_tx),
|
||||
runner_rx,
|
||||
listener_tx,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<()>>) {
|
||||
let reporter = self
|
||||
.runner_tx
|
||||
.take()
|
||||
.map(Into::into)
|
||||
.expect("Can't fail since this can only be called once");
|
||||
(reporter, async move { self.aggregate().await })
|
||||
}
|
||||
pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<()>>) {
|
||||
let reporter = self
|
||||
.runner_tx
|
||||
.take()
|
||||
.map(Into::into)
|
||||
.expect("Can't fail since this can only be called once");
|
||||
(reporter, async move { self.aggregate().await })
|
||||
}
|
||||
|
||||
async fn aggregate(mut self) -> Result<()> {
|
||||
debug!("Starting to aggregate report");
|
||||
async fn aggregate(mut self) -> Result<()> {
|
||||
debug!("Starting to aggregate report");
|
||||
|
||||
while let Some(event) = self.runner_rx.recv().await {
|
||||
debug!(?event, "Received Event");
|
||||
match event {
|
||||
RunnerEvent::SubscribeToEvents(event) => {
|
||||
self.handle_subscribe_to_events_event(*event);
|
||||
}
|
||||
RunnerEvent::CorpusFileDiscovery(event) => {
|
||||
self.handle_corpus_file_discovered_event(*event)
|
||||
}
|
||||
RunnerEvent::MetadataFileDiscovery(event) => {
|
||||
self.handle_metadata_file_discovery_event(*event);
|
||||
}
|
||||
RunnerEvent::TestCaseDiscovery(event) => {
|
||||
self.handle_test_case_discovery(*event);
|
||||
}
|
||||
RunnerEvent::TestSucceeded(event) => {
|
||||
self.handle_test_succeeded_event(*event);
|
||||
}
|
||||
RunnerEvent::TestFailed(event) => {
|
||||
self.handle_test_failed_event(*event);
|
||||
}
|
||||
RunnerEvent::TestIgnored(event) => {
|
||||
self.handle_test_ignored_event(*event);
|
||||
}
|
||||
RunnerEvent::NodeAssigned(event) => {
|
||||
self.handle_node_assigned_event(*event);
|
||||
}
|
||||
RunnerEvent::PreLinkContractsCompilationSucceeded(event) => {
|
||||
self.handle_pre_link_contracts_compilation_succeeded_event(*event)
|
||||
}
|
||||
RunnerEvent::PostLinkContractsCompilationSucceeded(event) => {
|
||||
self.handle_post_link_contracts_compilation_succeeded_event(*event)
|
||||
}
|
||||
RunnerEvent::PreLinkContractsCompilationFailed(event) => {
|
||||
self.handle_pre_link_contracts_compilation_failed_event(*event)
|
||||
}
|
||||
RunnerEvent::PostLinkContractsCompilationFailed(event) => {
|
||||
self.handle_post_link_contracts_compilation_failed_event(*event)
|
||||
}
|
||||
RunnerEvent::LibrariesDeployed(event) => {
|
||||
self.handle_libraries_deployed_event(*event);
|
||||
}
|
||||
RunnerEvent::ContractDeployed(event) => {
|
||||
self.handle_contract_deployed_event(*event);
|
||||
}
|
||||
RunnerEvent::Completion(event) => {
|
||||
self.handle_completion(*event);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
debug!("Report aggregation completed");
|
||||
while let Some(event) = self.runner_rx.recv().await {
|
||||
debug!(?event, "Received Event");
|
||||
match event {
|
||||
RunnerEvent::SubscribeToEvents(event) => {
|
||||
self.handle_subscribe_to_events_event(*event);
|
||||
},
|
||||
RunnerEvent::CorpusFileDiscovery(event) =>
|
||||
self.handle_corpus_file_discovered_event(*event),
|
||||
RunnerEvent::MetadataFileDiscovery(event) => {
|
||||
self.handle_metadata_file_discovery_event(*event);
|
||||
},
|
||||
RunnerEvent::TestCaseDiscovery(event) => {
|
||||
self.handle_test_case_discovery(*event);
|
||||
},
|
||||
RunnerEvent::TestSucceeded(event) => {
|
||||
self.handle_test_succeeded_event(*event);
|
||||
},
|
||||
RunnerEvent::TestFailed(event) => {
|
||||
self.handle_test_failed_event(*event);
|
||||
},
|
||||
RunnerEvent::TestIgnored(event) => {
|
||||
self.handle_test_ignored_event(*event);
|
||||
},
|
||||
RunnerEvent::NodeAssigned(event) => {
|
||||
self.handle_node_assigned_event(*event);
|
||||
},
|
||||
RunnerEvent::PreLinkContractsCompilationSucceeded(event) =>
|
||||
self.handle_pre_link_contracts_compilation_succeeded_event(*event),
|
||||
RunnerEvent::PostLinkContractsCompilationSucceeded(event) =>
|
||||
self.handle_post_link_contracts_compilation_succeeded_event(*event),
|
||||
RunnerEvent::PreLinkContractsCompilationFailed(event) =>
|
||||
self.handle_pre_link_contracts_compilation_failed_event(*event),
|
||||
RunnerEvent::PostLinkContractsCompilationFailed(event) =>
|
||||
self.handle_post_link_contracts_compilation_failed_event(*event),
|
||||
RunnerEvent::LibrariesDeployed(event) => {
|
||||
self.handle_libraries_deployed_event(*event);
|
||||
},
|
||||
RunnerEvent::ContractDeployed(event) => {
|
||||
self.handle_contract_deployed_event(*event);
|
||||
},
|
||||
RunnerEvent::Completion(event) => {
|
||||
self.handle_completion(*event);
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
debug!("Report aggregation completed");
|
||||
|
||||
let file_name = {
|
||||
let current_timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.context("System clock is before UNIX_EPOCH; cannot compute report timestamp")?
|
||||
.as_secs();
|
||||
let mut file_name = current_timestamp.to_string();
|
||||
file_name.push_str(".json");
|
||||
file_name
|
||||
};
|
||||
let file_path = self
|
||||
.report
|
||||
.context
|
||||
.working_directory_configuration()
|
||||
.as_path()
|
||||
.join(file_name);
|
||||
let file = OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.read(false)
|
||||
.open(&file_path)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to open report file for writing: {}",
|
||||
file_path.display()
|
||||
)
|
||||
})?;
|
||||
serde_json::to_writer_pretty(&file, &self.report).with_context(|| {
|
||||
format!("Failed to serialize report JSON to {}", file_path.display())
|
||||
})?;
|
||||
let file_name = {
|
||||
let current_timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.context("System clock is before UNIX_EPOCH; cannot compute report timestamp")?
|
||||
.as_secs();
|
||||
let mut file_name = current_timestamp.to_string();
|
||||
file_name.push_str(".json");
|
||||
file_name
|
||||
};
|
||||
let file_path =
|
||||
self.report.context.working_directory_configuration().as_path().join(file_name);
|
||||
let file = OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.read(false)
|
||||
.open(&file_path)
|
||||
.with_context(|| {
|
||||
format!("Failed to open report file for writing: {}", file_path.display())
|
||||
})?;
|
||||
serde_json::to_writer_pretty(&file, &self.report).with_context(|| {
|
||||
format!("Failed to serialize report JSON to {}", file_path.display())
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) {
|
||||
let _ = event.tx.send(self.listener_tx.subscribe());
|
||||
}
|
||||
fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) {
|
||||
let _ = event.tx.send(self.listener_tx.subscribe());
|
||||
}
|
||||
|
||||
fn handle_corpus_file_discovered_event(&mut self, event: CorpusFileDiscoveryEvent) {
|
||||
self.report.corpora.push(event.corpus);
|
||||
}
|
||||
fn handle_corpus_file_discovered_event(&mut self, event: CorpusFileDiscoveryEvent) {
|
||||
self.report.corpora.push(event.corpus);
|
||||
}
|
||||
|
||||
fn handle_metadata_file_discovery_event(&mut self, event: MetadataFileDiscoveryEvent) {
|
||||
self.report.metadata_files.insert(event.path.clone());
|
||||
}
|
||||
fn handle_metadata_file_discovery_event(&mut self, event: MetadataFileDiscoveryEvent) {
|
||||
self.report.metadata_files.insert(event.path.clone());
|
||||
}
|
||||
|
||||
fn handle_test_case_discovery(&mut self, event: TestCaseDiscoveryEvent) {
|
||||
self.remaining_cases
|
||||
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(event.test_specifier.solc_mode.clone())
|
||||
.or_default()
|
||||
.insert(event.test_specifier.case_idx);
|
||||
}
|
||||
fn handle_test_case_discovery(&mut self, event: TestCaseDiscoveryEvent) {
|
||||
self.remaining_cases
|
||||
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(event.test_specifier.solc_mode.clone())
|
||||
.or_default()
|
||||
.insert(event.test_specifier.case_idx);
|
||||
}
|
||||
|
||||
fn handle_test_succeeded_event(&mut self, event: TestSucceededEvent) {
|
||||
// Remove this from the set of cases we're tracking since it has completed.
|
||||
self.remaining_cases
|
||||
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(event.test_specifier.solc_mode.clone())
|
||||
.or_default()
|
||||
.remove(&event.test_specifier.case_idx);
|
||||
fn handle_test_succeeded_event(&mut self, event: TestSucceededEvent) {
|
||||
// Remove this from the set of cases we're tracking since it has completed.
|
||||
self.remaining_cases
|
||||
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(event.test_specifier.solc_mode.clone())
|
||||
.or_default()
|
||||
.remove(&event.test_specifier.case_idx);
|
||||
|
||||
// Add information on the fact that the case was ignored to the report.
|
||||
let test_case_report = self.test_case_report(&event.test_specifier);
|
||||
test_case_report.status = Some(TestCaseStatus::Succeeded {
|
||||
steps_executed: event.steps_executed,
|
||||
});
|
||||
self.handle_post_test_case_status_update(&event.test_specifier);
|
||||
}
|
||||
// Add information on the fact that the case was ignored to the report.
|
||||
let test_case_report = self.test_case_report(&event.test_specifier);
|
||||
test_case_report.status =
|
||||
Some(TestCaseStatus::Succeeded { steps_executed: event.steps_executed });
|
||||
self.handle_post_test_case_status_update(&event.test_specifier);
|
||||
}
|
||||
|
||||
fn handle_test_failed_event(&mut self, event: TestFailedEvent) {
|
||||
// Remove this from the set of cases we're tracking since it has completed.
|
||||
self.remaining_cases
|
||||
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(event.test_specifier.solc_mode.clone())
|
||||
.or_default()
|
||||
.remove(&event.test_specifier.case_idx);
|
||||
fn handle_test_failed_event(&mut self, event: TestFailedEvent) {
|
||||
// Remove this from the set of cases we're tracking since it has completed.
|
||||
self.remaining_cases
|
||||
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(event.test_specifier.solc_mode.clone())
|
||||
.or_default()
|
||||
.remove(&event.test_specifier.case_idx);
|
||||
|
||||
// Add information on the fact that the case was ignored to the report.
|
||||
let test_case_report = self.test_case_report(&event.test_specifier);
|
||||
test_case_report.status = Some(TestCaseStatus::Failed {
|
||||
reason: event.reason,
|
||||
});
|
||||
self.handle_post_test_case_status_update(&event.test_specifier);
|
||||
}
|
||||
// Add information on the fact that the case was ignored to the report.
|
||||
let test_case_report = self.test_case_report(&event.test_specifier);
|
||||
test_case_report.status = Some(TestCaseStatus::Failed { reason: event.reason });
|
||||
self.handle_post_test_case_status_update(&event.test_specifier);
|
||||
}
|
||||
|
||||
fn handle_test_ignored_event(&mut self, event: TestIgnoredEvent) {
|
||||
// Remove this from the set of cases we're tracking since it has completed.
|
||||
self.remaining_cases
|
||||
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(event.test_specifier.solc_mode.clone())
|
||||
.or_default()
|
||||
.remove(&event.test_specifier.case_idx);
|
||||
fn handle_test_ignored_event(&mut self, event: TestIgnoredEvent) {
|
||||
// Remove this from the set of cases we're tracking since it has completed.
|
||||
self.remaining_cases
|
||||
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(event.test_specifier.solc_mode.clone())
|
||||
.or_default()
|
||||
.remove(&event.test_specifier.case_idx);
|
||||
|
||||
// Add information on the fact that the case was ignored to the report.
|
||||
let test_case_report = self.test_case_report(&event.test_specifier);
|
||||
test_case_report.status = Some(TestCaseStatus::Ignored {
|
||||
reason: event.reason,
|
||||
additional_fields: event.additional_fields,
|
||||
});
|
||||
self.handle_post_test_case_status_update(&event.test_specifier);
|
||||
}
|
||||
// Add information on the fact that the case was ignored to the report.
|
||||
let test_case_report = self.test_case_report(&event.test_specifier);
|
||||
test_case_report.status = Some(TestCaseStatus::Ignored {
|
||||
reason: event.reason,
|
||||
additional_fields: event.additional_fields,
|
||||
});
|
||||
self.handle_post_test_case_status_update(&event.test_specifier);
|
||||
}
|
||||
|
||||
fn handle_post_test_case_status_update(&mut self, specifier: &TestSpecifier) {
|
||||
let remaining_cases = self
|
||||
.remaining_cases
|
||||
.entry(specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(specifier.solc_mode.clone())
|
||||
.or_default();
|
||||
if !remaining_cases.is_empty() {
|
||||
return;
|
||||
}
|
||||
fn handle_post_test_case_status_update(&mut self, specifier: &TestSpecifier) {
|
||||
let remaining_cases = self
|
||||
.remaining_cases
|
||||
.entry(specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(specifier.solc_mode.clone())
|
||||
.or_default();
|
||||
if !remaining_cases.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let case_status = self
|
||||
.report
|
||||
.test_case_information
|
||||
.entry(specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(specifier.solc_mode.clone())
|
||||
.or_default()
|
||||
.iter()
|
||||
.map(|(case_idx, case_report)| {
|
||||
(
|
||||
*case_idx,
|
||||
case_report.status.clone().expect("Can't be uninitialized"),
|
||||
)
|
||||
})
|
||||
.collect::<BTreeMap<_, _>>();
|
||||
let event = ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
||||
metadata_file_path: specifier.metadata_file_path.clone().into(),
|
||||
mode: specifier.solc_mode.clone(),
|
||||
case_status,
|
||||
};
|
||||
let case_status = self
|
||||
.report
|
||||
.test_case_information
|
||||
.entry(specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(specifier.solc_mode.clone())
|
||||
.or_default()
|
||||
.iter()
|
||||
.map(|(case_idx, case_report)| {
|
||||
(*case_idx, case_report.status.clone().expect("Can't be uninitialized"))
|
||||
})
|
||||
.collect::<BTreeMap<_, _>>();
|
||||
let event = ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
||||
metadata_file_path: specifier.metadata_file_path.clone().into(),
|
||||
mode: specifier.solc_mode.clone(),
|
||||
case_status,
|
||||
};
|
||||
|
||||
// According to the documentation on send, the sending fails if there are no more receiver
|
||||
// handles. Therefore, this isn't an error that we want to bubble up or anything. If we fail
|
||||
// to send then we ignore the error.
|
||||
let _ = self.listener_tx.send(event);
|
||||
}
|
||||
// According to the documentation on send, the sending fails if there are no more receiver
|
||||
// handles. Therefore, this isn't an error that we want to bubble up or anything. If we fail
|
||||
// to send then we ignore the error.
|
||||
let _ = self.listener_tx.send(event);
|
||||
}
|
||||
|
||||
fn handle_node_assigned_event(&mut self, event: NodeAssignedEvent) {
|
||||
let execution_information = self.execution_information(&ExecutionSpecifier {
|
||||
test_specifier: event.test_specifier,
|
||||
node_id: event.id,
|
||||
platform_identifier: event.platform_identifier,
|
||||
});
|
||||
execution_information.node = Some(TestCaseNodeInformation {
|
||||
id: event.id,
|
||||
platform_identifier: event.platform_identifier,
|
||||
connection_string: event.connection_string,
|
||||
});
|
||||
}
|
||||
fn handle_node_assigned_event(&mut self, event: NodeAssignedEvent) {
|
||||
let execution_information = self.execution_information(&ExecutionSpecifier {
|
||||
test_specifier: event.test_specifier,
|
||||
node_id: event.id,
|
||||
platform_identifier: event.platform_identifier,
|
||||
});
|
||||
execution_information.node = Some(TestCaseNodeInformation {
|
||||
id: event.id,
|
||||
platform_identifier: event.platform_identifier,
|
||||
connection_string: event.connection_string,
|
||||
});
|
||||
}
|
||||
|
||||
fn handle_pre_link_contracts_compilation_succeeded_event(
|
||||
&mut self,
|
||||
event: PreLinkContractsCompilationSucceededEvent,
|
||||
) {
|
||||
let include_input = self
|
||||
.report
|
||||
.context
|
||||
.report_configuration()
|
||||
.include_compiler_input;
|
||||
let include_output = self
|
||||
.report
|
||||
.context
|
||||
.report_configuration()
|
||||
.include_compiler_output;
|
||||
fn handle_pre_link_contracts_compilation_succeeded_event(
|
||||
&mut self,
|
||||
event: PreLinkContractsCompilationSucceededEvent,
|
||||
) {
|
||||
let include_input = self.report.context.report_configuration().include_compiler_input;
|
||||
let include_output = self.report.context.report_configuration().include_compiler_output;
|
||||
|
||||
let execution_information = self.execution_information(&event.execution_specifier);
|
||||
let execution_information = self.execution_information(&event.execution_specifier);
|
||||
|
||||
let compiler_input = if include_input {
|
||||
event.compiler_input
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let compiler_output = if include_output {
|
||||
Some(event.compiler_output)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let compiler_input = if include_input { event.compiler_input } else { None };
|
||||
let compiler_output = if include_output { Some(event.compiler_output) } else { None };
|
||||
|
||||
execution_information.pre_link_compilation_status = Some(CompilationStatus::Success {
|
||||
is_cached: event.is_cached,
|
||||
compiler_version: event.compiler_version,
|
||||
compiler_path: event.compiler_path,
|
||||
compiler_input,
|
||||
compiler_output,
|
||||
});
|
||||
}
|
||||
execution_information.pre_link_compilation_status = Some(CompilationStatus::Success {
|
||||
is_cached: event.is_cached,
|
||||
compiler_version: event.compiler_version,
|
||||
compiler_path: event.compiler_path,
|
||||
compiler_input,
|
||||
compiler_output,
|
||||
});
|
||||
}
|
||||
|
||||
fn handle_post_link_contracts_compilation_succeeded_event(
|
||||
&mut self,
|
||||
event: PostLinkContractsCompilationSucceededEvent,
|
||||
) {
|
||||
let include_input = self
|
||||
.report
|
||||
.context
|
||||
.report_configuration()
|
||||
.include_compiler_input;
|
||||
let include_output = self
|
||||
.report
|
||||
.context
|
||||
.report_configuration()
|
||||
.include_compiler_output;
|
||||
fn handle_post_link_contracts_compilation_succeeded_event(
|
||||
&mut self,
|
||||
event: PostLinkContractsCompilationSucceededEvent,
|
||||
) {
|
||||
let include_input = self.report.context.report_configuration().include_compiler_input;
|
||||
let include_output = self.report.context.report_configuration().include_compiler_output;
|
||||
|
||||
let execution_information = self.execution_information(&event.execution_specifier);
|
||||
let execution_information = self.execution_information(&event.execution_specifier);
|
||||
|
||||
let compiler_input = if include_input {
|
||||
event.compiler_input
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let compiler_output = if include_output {
|
||||
Some(event.compiler_output)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let compiler_input = if include_input { event.compiler_input } else { None };
|
||||
let compiler_output = if include_output { Some(event.compiler_output) } else { None };
|
||||
|
||||
execution_information.post_link_compilation_status = Some(CompilationStatus::Success {
|
||||
is_cached: event.is_cached,
|
||||
compiler_version: event.compiler_version,
|
||||
compiler_path: event.compiler_path,
|
||||
compiler_input,
|
||||
compiler_output,
|
||||
});
|
||||
}
|
||||
execution_information.post_link_compilation_status = Some(CompilationStatus::Success {
|
||||
is_cached: event.is_cached,
|
||||
compiler_version: event.compiler_version,
|
||||
compiler_path: event.compiler_path,
|
||||
compiler_input,
|
||||
compiler_output,
|
||||
});
|
||||
}
|
||||
|
||||
fn handle_pre_link_contracts_compilation_failed_event(
|
||||
&mut self,
|
||||
event: PreLinkContractsCompilationFailedEvent,
|
||||
) {
|
||||
let execution_information = self.execution_information(&event.execution_specifier);
|
||||
fn handle_pre_link_contracts_compilation_failed_event(
|
||||
&mut self,
|
||||
event: PreLinkContractsCompilationFailedEvent,
|
||||
) {
|
||||
let execution_information = self.execution_information(&event.execution_specifier);
|
||||
|
||||
execution_information.pre_link_compilation_status = Some(CompilationStatus::Failure {
|
||||
reason: event.reason,
|
||||
compiler_version: event.compiler_version,
|
||||
compiler_path: event.compiler_path,
|
||||
compiler_input: event.compiler_input,
|
||||
});
|
||||
}
|
||||
execution_information.pre_link_compilation_status = Some(CompilationStatus::Failure {
|
||||
reason: event.reason,
|
||||
compiler_version: event.compiler_version,
|
||||
compiler_path: event.compiler_path,
|
||||
compiler_input: event.compiler_input,
|
||||
});
|
||||
}
|
||||
|
||||
fn handle_post_link_contracts_compilation_failed_event(
|
||||
&mut self,
|
||||
event: PostLinkContractsCompilationFailedEvent,
|
||||
) {
|
||||
let execution_information = self.execution_information(&event.execution_specifier);
|
||||
fn handle_post_link_contracts_compilation_failed_event(
|
||||
&mut self,
|
||||
event: PostLinkContractsCompilationFailedEvent,
|
||||
) {
|
||||
let execution_information = self.execution_information(&event.execution_specifier);
|
||||
|
||||
execution_information.post_link_compilation_status = Some(CompilationStatus::Failure {
|
||||
reason: event.reason,
|
||||
compiler_version: event.compiler_version,
|
||||
compiler_path: event.compiler_path,
|
||||
compiler_input: event.compiler_input,
|
||||
});
|
||||
}
|
||||
execution_information.post_link_compilation_status = Some(CompilationStatus::Failure {
|
||||
reason: event.reason,
|
||||
compiler_version: event.compiler_version,
|
||||
compiler_path: event.compiler_path,
|
||||
compiler_input: event.compiler_input,
|
||||
});
|
||||
}
|
||||
|
||||
fn handle_libraries_deployed_event(&mut self, event: LibrariesDeployedEvent) {
|
||||
self.execution_information(&event.execution_specifier)
|
||||
.deployed_libraries = Some(event.libraries);
|
||||
}
|
||||
fn handle_libraries_deployed_event(&mut self, event: LibrariesDeployedEvent) {
|
||||
self.execution_information(&event.execution_specifier).deployed_libraries =
|
||||
Some(event.libraries);
|
||||
}
|
||||
|
||||
fn handle_contract_deployed_event(&mut self, event: ContractDeployedEvent) {
|
||||
self.execution_information(&event.execution_specifier)
|
||||
.deployed_contracts
|
||||
.get_or_insert_default()
|
||||
.insert(event.contract_instance, event.address);
|
||||
}
|
||||
fn handle_contract_deployed_event(&mut self, event: ContractDeployedEvent) {
|
||||
self.execution_information(&event.execution_specifier)
|
||||
.deployed_contracts
|
||||
.get_or_insert_default()
|
||||
.insert(event.contract_instance, event.address);
|
||||
}
|
||||
|
||||
fn handle_completion(&mut self, _: CompletionEvent) {
|
||||
self.runner_rx.close();
|
||||
}
|
||||
fn handle_completion(&mut self, _: CompletionEvent) {
|
||||
self.runner_rx.close();
|
||||
}
|
||||
|
||||
fn test_case_report(&mut self, specifier: &TestSpecifier) -> &mut TestCaseReport {
|
||||
self.report
|
||||
.test_case_information
|
||||
.entry(specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(specifier.solc_mode.clone())
|
||||
.or_default()
|
||||
.entry(specifier.case_idx)
|
||||
.or_default()
|
||||
}
|
||||
fn test_case_report(&mut self, specifier: &TestSpecifier) -> &mut TestCaseReport {
|
||||
self.report
|
||||
.test_case_information
|
||||
.entry(specifier.metadata_file_path.clone().into())
|
||||
.or_default()
|
||||
.entry(specifier.solc_mode.clone())
|
||||
.or_default()
|
||||
.entry(specifier.case_idx)
|
||||
.or_default()
|
||||
}
|
||||
|
||||
fn execution_information(
|
||||
&mut self,
|
||||
specifier: &ExecutionSpecifier,
|
||||
) -> &mut ExecutionInformation {
|
||||
let test_case_report = self.test_case_report(&specifier.test_specifier);
|
||||
test_case_report
|
||||
.platform_execution
|
||||
.entry(specifier.platform_identifier)
|
||||
.or_default()
|
||||
.get_or_insert_default()
|
||||
}
|
||||
fn execution_information(
|
||||
&mut self,
|
||||
specifier: &ExecutionSpecifier,
|
||||
) -> &mut ExecutionInformation {
|
||||
let test_case_report = self.test_case_report(&specifier.test_specifier);
|
||||
test_case_report
|
||||
.platform_execution
|
||||
.entry(specifier.platform_identifier)
|
||||
.or_default()
|
||||
.get_or_insert_default()
|
||||
}
|
||||
}
|
||||
|
||||
#[serde_as]
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct Report {
|
||||
/// The context that the tool was started up with.
|
||||
pub context: Context,
|
||||
/// The list of corpus files that the tool found.
|
||||
pub corpora: Vec<Corpus>,
|
||||
/// The list of metadata files that were found by the tool.
|
||||
pub metadata_files: BTreeSet<MetadataFilePath>,
|
||||
/// Information relating to each test case.
|
||||
#[serde_as(as = "BTreeMap<_, HashMap<DisplayFromStr, BTreeMap<DisplayFromStr, _>>>")]
|
||||
pub test_case_information:
|
||||
BTreeMap<MetadataFilePath, HashMap<Mode, BTreeMap<CaseIdx, TestCaseReport>>>,
|
||||
/// The context that the tool was started up with.
|
||||
pub context: Context,
|
||||
/// The list of corpus files that the tool found.
|
||||
pub corpora: Vec<Corpus>,
|
||||
/// The list of metadata files that were found by the tool.
|
||||
pub metadata_files: BTreeSet<MetadataFilePath>,
|
||||
/// Information relating to each test case.
|
||||
#[serde_as(as = "BTreeMap<_, HashMap<DisplayFromStr, BTreeMap<DisplayFromStr, _>>>")]
|
||||
pub test_case_information:
|
||||
BTreeMap<MetadataFilePath, HashMap<Mode, BTreeMap<CaseIdx, TestCaseReport>>>,
|
||||
}
|
||||
|
||||
impl Report {
|
||||
pub fn new(context: Context) -> Self {
|
||||
Self {
|
||||
context,
|
||||
corpora: Default::default(),
|
||||
metadata_files: Default::default(),
|
||||
test_case_information: Default::default(),
|
||||
}
|
||||
}
|
||||
pub fn new(context: Context) -> Self {
|
||||
Self {
|
||||
context,
|
||||
corpora: Default::default(),
|
||||
metadata_files: Default::default(),
|
||||
test_case_information: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Default)]
|
||||
pub struct TestCaseReport {
|
||||
/// Information on the status of the test case and whether it succeeded, failed, or was ignored.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub status: Option<TestCaseStatus>,
|
||||
/// Information related to the execution on one of the platforms.
|
||||
pub platform_execution: BTreeMap<PlatformIdentifier, Option<ExecutionInformation>>,
|
||||
/// Information on the status of the test case and whether it succeeded, failed, or was
|
||||
/// ignored.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub status: Option<TestCaseStatus>,
|
||||
/// Information related to the execution on one of the platforms.
|
||||
pub platform_execution: BTreeMap<PlatformIdentifier, Option<ExecutionInformation>>,
|
||||
}
|
||||
|
||||
/// Information related to the status of the test. Could be that the test succeeded, failed, or that
|
||||
@@ -454,93 +405,93 @@ pub struct TestCaseReport {
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(tag = "status")]
|
||||
pub enum TestCaseStatus {
|
||||
/// The test case succeeded.
|
||||
Succeeded {
|
||||
/// The number of steps of the case that were executed.
|
||||
steps_executed: usize,
|
||||
},
|
||||
/// The test case failed.
|
||||
Failed {
|
||||
/// The reason for the failure of the test case.
|
||||
reason: String,
|
||||
},
|
||||
/// The test case was ignored. This variant carries information related to why it was ignored.
|
||||
Ignored {
|
||||
/// The reason behind the test case being ignored.
|
||||
reason: String,
|
||||
/// Additional fields that describe more information on why the test case is ignored.
|
||||
#[serde(flatten)]
|
||||
additional_fields: IndexMap<String, serde_json::Value>,
|
||||
},
|
||||
/// The test case succeeded.
|
||||
Succeeded {
|
||||
/// The number of steps of the case that were executed.
|
||||
steps_executed: usize,
|
||||
},
|
||||
/// The test case failed.
|
||||
Failed {
|
||||
/// The reason for the failure of the test case.
|
||||
reason: String,
|
||||
},
|
||||
/// The test case was ignored. This variant carries information related to why it was ignored.
|
||||
Ignored {
|
||||
/// The reason behind the test case being ignored.
|
||||
reason: String,
|
||||
/// Additional fields that describe more information on why the test case is ignored.
|
||||
#[serde(flatten)]
|
||||
additional_fields: IndexMap<String, serde_json::Value>,
|
||||
},
|
||||
}
|
||||
|
||||
/// Information related to the platform node that's being used to execute the step.
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct TestCaseNodeInformation {
|
||||
/// The ID of the node that this case is being executed on.
|
||||
pub id: usize,
|
||||
/// The platform of the node.
|
||||
pub platform_identifier: PlatformIdentifier,
|
||||
/// The connection string of the node.
|
||||
pub connection_string: String,
|
||||
/// The ID of the node that this case is being executed on.
|
||||
pub id: usize,
|
||||
/// The platform of the node.
|
||||
pub platform_identifier: PlatformIdentifier,
|
||||
/// The connection string of the node.
|
||||
pub connection_string: String,
|
||||
}
|
||||
|
||||
/// Execution information tied to the platform.
|
||||
#[derive(Clone, Debug, Default, Serialize)]
|
||||
pub struct ExecutionInformation {
|
||||
/// Information related to the node assigned to this test case.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub node: Option<TestCaseNodeInformation>,
|
||||
/// Information on the pre-link compiled contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pre_link_compilation_status: Option<CompilationStatus>,
|
||||
/// Information on the post-link compiled contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub post_link_compilation_status: Option<CompilationStatus>,
|
||||
/// Information on the deployed libraries.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub deployed_libraries: Option<BTreeMap<ContractInstance, Address>>,
|
||||
/// Information on the deployed contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub deployed_contracts: Option<BTreeMap<ContractInstance, Address>>,
|
||||
/// Information related to the node assigned to this test case.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub node: Option<TestCaseNodeInformation>,
|
||||
/// Information on the pre-link compiled contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pre_link_compilation_status: Option<CompilationStatus>,
|
||||
/// Information on the post-link compiled contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub post_link_compilation_status: Option<CompilationStatus>,
|
||||
/// Information on the deployed libraries.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub deployed_libraries: Option<BTreeMap<ContractInstance, Address>>,
|
||||
/// Information on the deployed contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub deployed_contracts: Option<BTreeMap<ContractInstance, Address>>,
|
||||
}
|
||||
|
||||
/// Information related to compilation
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(tag = "status")]
|
||||
pub enum CompilationStatus {
|
||||
/// The compilation was successful.
|
||||
Success {
|
||||
/// A flag with information on whether the compilation artifacts were cached or not.
|
||||
is_cached: bool,
|
||||
/// The version of the compiler used to compile the contracts.
|
||||
compiler_version: Version,
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
compiler_path: PathBuf,
|
||||
/// The input provided to the compiler to compile the contracts. This is only included if
|
||||
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
|
||||
/// the compiler was invoked.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
compiler_input: Option<CompilerInput>,
|
||||
/// The output of the compiler. This is only included if the appropriate flag is set in the
|
||||
/// CLI contexts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
compiler_output: Option<CompilerOutput>,
|
||||
},
|
||||
/// The compilation failed.
|
||||
Failure {
|
||||
/// The failure reason.
|
||||
reason: String,
|
||||
/// The version of the compiler used to compile the contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
compiler_version: Option<Version>,
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
compiler_path: Option<PathBuf>,
|
||||
/// The input provided to the compiler to compile the contracts. This is only included if
|
||||
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
|
||||
/// the compiler was invoked.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
compiler_input: Option<CompilerInput>,
|
||||
},
|
||||
/// The compilation was successful.
|
||||
Success {
|
||||
/// A flag with information on whether the compilation artifacts were cached or not.
|
||||
is_cached: bool,
|
||||
/// The version of the compiler used to compile the contracts.
|
||||
compiler_version: Version,
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
compiler_path: PathBuf,
|
||||
/// The input provided to the compiler to compile the contracts. This is only included if
|
||||
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
|
||||
/// the compiler was invoked.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
compiler_input: Option<CompilerInput>,
|
||||
/// The output of the compiler. This is only included if the appropriate flag is set in the
|
||||
/// CLI contexts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
compiler_output: Option<CompilerOutput>,
|
||||
},
|
||||
/// The compilation failed.
|
||||
Failure {
|
||||
/// The failure reason.
|
||||
reason: String,
|
||||
/// The version of the compiler used to compile the contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
compiler_version: Option<Version>,
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
compiler_path: Option<PathBuf>,
|
||||
/// The input provided to the compiler to compile the contracts. This is only included if
|
||||
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
|
||||
/// the compiler was invoked.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
compiler_input: Option<CompilerInput>,
|
||||
},
|
||||
}
|
||||
|
||||
+11
-11
@@ -8,30 +8,30 @@ use revive_dt_format::{case::CaseIdx, steps::StepPath};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
define_wrapper_type!(
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct MetadataFilePath(PathBuf);
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct MetadataFilePath(PathBuf);
|
||||
);
|
||||
|
||||
/// An absolute specifier for a test.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct TestSpecifier {
|
||||
pub solc_mode: Mode,
|
||||
pub metadata_file_path: PathBuf,
|
||||
pub case_idx: CaseIdx,
|
||||
pub solc_mode: Mode,
|
||||
pub metadata_file_path: PathBuf,
|
||||
pub case_idx: CaseIdx,
|
||||
}
|
||||
|
||||
/// An absolute path for a test that also includes information about the node that it's assigned to
|
||||
/// and what platform it belongs to.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct ExecutionSpecifier {
|
||||
pub test_specifier: Arc<TestSpecifier>,
|
||||
pub node_id: usize,
|
||||
pub platform_identifier: PlatformIdentifier,
|
||||
pub test_specifier: Arc<TestSpecifier>,
|
||||
pub node_id: usize,
|
||||
pub platform_identifier: PlatformIdentifier,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct StepExecutionSpecifier {
|
||||
pub execution_specifier: Arc<ExecutionSpecifier>,
|
||||
pub step_idx: StepPath,
|
||||
pub execution_specifier: Arc<ExecutionSpecifier>,
|
||||
pub step_idx: StepPath,
|
||||
}
|
||||
|
||||
@@ -9,14 +9,14 @@ use crate::{MetadataFilePath, TestCaseStatus};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ReporterEvent {
|
||||
/// An event sent by the reporter once an entire metadata file and solc mode combination has
|
||||
/// finished execution.
|
||||
MetadataFileSolcModeCombinationExecutionCompleted {
|
||||
/// The path of the metadata file.
|
||||
metadata_file_path: MetadataFilePath,
|
||||
/// The Solc mode that this metadata file was executed in.
|
||||
mode: Mode,
|
||||
/// The status of each one of the cases.
|
||||
case_status: BTreeMap<CaseIdx, TestCaseStatus>,
|
||||
},
|
||||
/// An event sent by the reporter once an entire metadata file and solc mode combination has
|
||||
/// finished execution.
|
||||
MetadataFileSolcModeCombinationExecutionCompleted {
|
||||
/// The path of the metadata file.
|
||||
metadata_file_path: MetadataFilePath,
|
||||
/// The Solc mode that this metadata file was executed in.
|
||||
mode: Mode,
|
||||
/// The status of each one of the cases.
|
||||
case_status: BTreeMap<CaseIdx, TestCaseStatus>,
|
||||
},
|
||||
}
|
||||
|
||||
+154
-152
@@ -8,8 +8,10 @@ use anyhow::Context as _;
|
||||
use indexmap::IndexMap;
|
||||
use revive_dt_common::types::PlatformIdentifier;
|
||||
use revive_dt_compiler::{CompilerInput, CompilerOutput};
|
||||
use revive_dt_format::metadata::Metadata;
|
||||
use revive_dt_format::{corpus::Corpus, metadata::ContractInstance};
|
||||
use revive_dt_format::{
|
||||
corpus::Corpus,
|
||||
metadata::{ContractInstance, Metadata},
|
||||
};
|
||||
use semver::Version;
|
||||
use tokio::sync::{broadcast, oneshot};
|
||||
|
||||
@@ -472,160 +474,160 @@ macro_rules! define_event {
|
||||
}
|
||||
|
||||
define_event! {
|
||||
/// An event type that's sent by the test runners/drivers to the report aggregator.
|
||||
pub(crate) enum RunnerEvent {
|
||||
/// An event emitted by the reporter when it wishes to listen to events emitted by the
|
||||
/// aggregator.
|
||||
SubscribeToEvents {
|
||||
/// The channel that the aggregator is to send the receive side of the channel on.
|
||||
tx: oneshot::Sender<broadcast::Receiver<ReporterEvent>>
|
||||
},
|
||||
/// An event emitted by runners when they've discovered a corpus file.
|
||||
CorpusFileDiscovery {
|
||||
/// The contents of the corpus file.
|
||||
corpus: Corpus
|
||||
},
|
||||
/// An event emitted by runners when they've discovered a metadata file.
|
||||
MetadataFileDiscovery {
|
||||
/// The path of the metadata file discovered.
|
||||
path: MetadataFilePath,
|
||||
/// The content of the metadata file.
|
||||
metadata: Metadata
|
||||
},
|
||||
/// An event emitted by the runners when they discover a test case.
|
||||
TestCaseDiscovery {
|
||||
/// A specifier for the test that was discovered.
|
||||
test_specifier: Arc<TestSpecifier>,
|
||||
},
|
||||
/// An event emitted by the runners when a test case is ignored.
|
||||
TestIgnored {
|
||||
/// A specifier for the test that's been ignored.
|
||||
test_specifier: Arc<TestSpecifier>,
|
||||
/// A reason for the test to be ignored.
|
||||
reason: String,
|
||||
/// Additional fields that describe more information on why the test was ignored.
|
||||
additional_fields: IndexMap<String, serde_json::Value>
|
||||
},
|
||||
/// An event emitted by the runners when a test case has succeeded.
|
||||
TestSucceeded {
|
||||
/// A specifier for the test that succeeded.
|
||||
test_specifier: Arc<TestSpecifier>,
|
||||
/// The number of steps of the case that were executed by the driver.
|
||||
steps_executed: usize,
|
||||
},
|
||||
/// An event emitted by the runners when a test case has failed.
|
||||
TestFailed {
|
||||
/// A specifier for the test that succeeded.
|
||||
test_specifier: Arc<TestSpecifier>,
|
||||
/// A reason for the failure of the test.
|
||||
reason: String,
|
||||
},
|
||||
/// An event emitted when the test case is assigned a platform node.
|
||||
NodeAssigned {
|
||||
/// A specifier for the test that the assignment is for.
|
||||
test_specifier: Arc<TestSpecifier>,
|
||||
/// The ID of the node that this case is being executed on.
|
||||
id: usize,
|
||||
/// The identifier of the platform used.
|
||||
platform_identifier: PlatformIdentifier,
|
||||
/// The connection string of the node.
|
||||
connection_string: String,
|
||||
},
|
||||
/// An event emitted by the runners when the compilation of the contracts has succeeded
|
||||
/// on the pre-link contracts.
|
||||
PreLinkContractsCompilationSucceeded {
|
||||
/// A specifier for the execution that's taking place.
|
||||
execution_specifier: Arc<ExecutionSpecifier>,
|
||||
/// The version of the compiler used to compile the contracts.
|
||||
compiler_version: Version,
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
compiler_path: PathBuf,
|
||||
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
|
||||
/// anew.
|
||||
is_cached: bool,
|
||||
/// The input provided to the compiler - this is optional and not provided if the
|
||||
/// contracts were obtained from the cache.
|
||||
compiler_input: Option<CompilerInput>,
|
||||
/// The output of the compiler.
|
||||
compiler_output: CompilerOutput
|
||||
},
|
||||
/// An event emitted by the runners when the compilation of the contracts has succeeded
|
||||
/// on the post-link contracts.
|
||||
PostLinkContractsCompilationSucceeded {
|
||||
/// A specifier for the execution that's taking place.
|
||||
execution_specifier: Arc<ExecutionSpecifier>,
|
||||
/// The version of the compiler used to compile the contracts.
|
||||
compiler_version: Version,
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
compiler_path: PathBuf,
|
||||
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
|
||||
/// anew.
|
||||
is_cached: bool,
|
||||
/// The input provided to the compiler - this is optional and not provided if the
|
||||
/// contracts were obtained from the cache.
|
||||
compiler_input: Option<CompilerInput>,
|
||||
/// The output of the compiler.
|
||||
compiler_output: CompilerOutput
|
||||
},
|
||||
/// An event emitted by the runners when the compilation of the pre-link contract has
|
||||
/// failed.
|
||||
PreLinkContractsCompilationFailed {
|
||||
/// A specifier for the execution that's taking place.
|
||||
execution_specifier: Arc<ExecutionSpecifier>,
|
||||
/// The version of the compiler used to compile the contracts.
|
||||
compiler_version: Option<Version>,
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
compiler_path: Option<PathBuf>,
|
||||
/// The input provided to the compiler - this is optional and not provided if the
|
||||
/// contracts were obtained from the cache.
|
||||
compiler_input: Option<CompilerInput>,
|
||||
/// The failure reason.
|
||||
reason: String,
|
||||
},
|
||||
/// An event emitted by the runners when the compilation of the post-link contract has
|
||||
/// failed.
|
||||
PostLinkContractsCompilationFailed {
|
||||
/// A specifier for the execution that's taking place.
|
||||
execution_specifier: Arc<ExecutionSpecifier>,
|
||||
/// The version of the compiler used to compile the contracts.
|
||||
compiler_version: Option<Version>,
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
compiler_path: Option<PathBuf>,
|
||||
/// The input provided to the compiler - this is optional and not provided if the
|
||||
/// contracts were obtained from the cache.
|
||||
compiler_input: Option<CompilerInput>,
|
||||
/// The failure reason.
|
||||
reason: String,
|
||||
},
|
||||
/// An event emitted by the runners when a library has been deployed.
|
||||
LibrariesDeployed {
|
||||
/// A specifier for the execution that's taking place.
|
||||
execution_specifier: Arc<ExecutionSpecifier>,
|
||||
/// The addresses of the libraries that were deployed.
|
||||
libraries: BTreeMap<ContractInstance, Address>
|
||||
},
|
||||
/// An event emitted by the runners when they've deployed a new contract.
|
||||
ContractDeployed {
|
||||
/// A specifier for the execution that's taking place.
|
||||
execution_specifier: Arc<ExecutionSpecifier>,
|
||||
/// The instance name of the contract.
|
||||
contract_instance: ContractInstance,
|
||||
/// The address of the contract.
|
||||
address: Address
|
||||
},
|
||||
/// Reports the completion of the run.
|
||||
Completion {}
|
||||
}
|
||||
/// An event type that's sent by the test runners/drivers to the report aggregator.
|
||||
pub(crate) enum RunnerEvent {
|
||||
/// An event emitted by the reporter when it wishes to listen to events emitted by the
|
||||
/// aggregator.
|
||||
SubscribeToEvents {
|
||||
/// The channel that the aggregator is to send the receive side of the channel on.
|
||||
tx: oneshot::Sender<broadcast::Receiver<ReporterEvent>>
|
||||
},
|
||||
/// An event emitted by runners when they've discovered a corpus file.
|
||||
CorpusFileDiscovery {
|
||||
/// The contents of the corpus file.
|
||||
corpus: Corpus
|
||||
},
|
||||
/// An event emitted by runners when they've discovered a metadata file.
|
||||
MetadataFileDiscovery {
|
||||
/// The path of the metadata file discovered.
|
||||
path: MetadataFilePath,
|
||||
/// The content of the metadata file.
|
||||
metadata: Metadata
|
||||
},
|
||||
/// An event emitted by the runners when they discover a test case.
|
||||
TestCaseDiscovery {
|
||||
/// A specifier for the test that was discovered.
|
||||
test_specifier: Arc<TestSpecifier>,
|
||||
},
|
||||
/// An event emitted by the runners when a test case is ignored.
|
||||
TestIgnored {
|
||||
/// A specifier for the test that's been ignored.
|
||||
test_specifier: Arc<TestSpecifier>,
|
||||
/// A reason for the test to be ignored.
|
||||
reason: String,
|
||||
/// Additional fields that describe more information on why the test was ignored.
|
||||
additional_fields: IndexMap<String, serde_json::Value>
|
||||
},
|
||||
/// An event emitted by the runners when a test case has succeeded.
|
||||
TestSucceeded {
|
||||
/// A specifier for the test that succeeded.
|
||||
test_specifier: Arc<TestSpecifier>,
|
||||
/// The number of steps of the case that were executed by the driver.
|
||||
steps_executed: usize,
|
||||
},
|
||||
/// An event emitted by the runners when a test case has failed.
|
||||
TestFailed {
|
||||
/// A specifier for the test that succeeded.
|
||||
test_specifier: Arc<TestSpecifier>,
|
||||
/// A reason for the failure of the test.
|
||||
reason: String,
|
||||
},
|
||||
/// An event emitted when the test case is assigned a platform node.
|
||||
NodeAssigned {
|
||||
/// A specifier for the test that the assignment is for.
|
||||
test_specifier: Arc<TestSpecifier>,
|
||||
/// The ID of the node that this case is being executed on.
|
||||
id: usize,
|
||||
/// The identifier of the platform used.
|
||||
platform_identifier: PlatformIdentifier,
|
||||
/// The connection string of the node.
|
||||
connection_string: String,
|
||||
},
|
||||
/// An event emitted by the runners when the compilation of the contracts has succeeded
|
||||
/// on the pre-link contracts.
|
||||
PreLinkContractsCompilationSucceeded {
|
||||
/// A specifier for the execution that's taking place.
|
||||
execution_specifier: Arc<ExecutionSpecifier>,
|
||||
/// The version of the compiler used to compile the contracts.
|
||||
compiler_version: Version,
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
compiler_path: PathBuf,
|
||||
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
|
||||
/// anew.
|
||||
is_cached: bool,
|
||||
/// The input provided to the compiler - this is optional and not provided if the
|
||||
/// contracts were obtained from the cache.
|
||||
compiler_input: Option<CompilerInput>,
|
||||
/// The output of the compiler.
|
||||
compiler_output: CompilerOutput
|
||||
},
|
||||
/// An event emitted by the runners when the compilation of the contracts has succeeded
|
||||
/// on the post-link contracts.
|
||||
PostLinkContractsCompilationSucceeded {
|
||||
/// A specifier for the execution that's taking place.
|
||||
execution_specifier: Arc<ExecutionSpecifier>,
|
||||
/// The version of the compiler used to compile the contracts.
|
||||
compiler_version: Version,
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
compiler_path: PathBuf,
|
||||
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
|
||||
/// anew.
|
||||
is_cached: bool,
|
||||
/// The input provided to the compiler - this is optional and not provided if the
|
||||
/// contracts were obtained from the cache.
|
||||
compiler_input: Option<CompilerInput>,
|
||||
/// The output of the compiler.
|
||||
compiler_output: CompilerOutput
|
||||
},
|
||||
/// An event emitted by the runners when the compilation of the pre-link contract has
|
||||
/// failed.
|
||||
PreLinkContractsCompilationFailed {
|
||||
/// A specifier for the execution that's taking place.
|
||||
execution_specifier: Arc<ExecutionSpecifier>,
|
||||
/// The version of the compiler used to compile the contracts.
|
||||
compiler_version: Option<Version>,
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
compiler_path: Option<PathBuf>,
|
||||
/// The input provided to the compiler - this is optional and not provided if the
|
||||
/// contracts were obtained from the cache.
|
||||
compiler_input: Option<CompilerInput>,
|
||||
/// The failure reason.
|
||||
reason: String,
|
||||
},
|
||||
/// An event emitted by the runners when the compilation of the post-link contract has
|
||||
/// failed.
|
||||
PostLinkContractsCompilationFailed {
|
||||
/// A specifier for the execution that's taking place.
|
||||
execution_specifier: Arc<ExecutionSpecifier>,
|
||||
/// The version of the compiler used to compile the contracts.
|
||||
compiler_version: Option<Version>,
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
compiler_path: Option<PathBuf>,
|
||||
/// The input provided to the compiler - this is optional and not provided if the
|
||||
/// contracts were obtained from the cache.
|
||||
compiler_input: Option<CompilerInput>,
|
||||
/// The failure reason.
|
||||
reason: String,
|
||||
},
|
||||
/// An event emitted by the runners when a library has been deployed.
|
||||
LibrariesDeployed {
|
||||
/// A specifier for the execution that's taking place.
|
||||
execution_specifier: Arc<ExecutionSpecifier>,
|
||||
/// The addresses of the libraries that were deployed.
|
||||
libraries: BTreeMap<ContractInstance, Address>
|
||||
},
|
||||
/// An event emitted by the runners when they've deployed a new contract.
|
||||
ContractDeployed {
|
||||
/// A specifier for the execution that's taking place.
|
||||
execution_specifier: Arc<ExecutionSpecifier>,
|
||||
/// The instance name of the contract.
|
||||
contract_instance: ContractInstance,
|
||||
/// The address of the contract.
|
||||
address: Address
|
||||
},
|
||||
/// Reports the completion of the run.
|
||||
Completion {}
|
||||
}
|
||||
}
|
||||
|
||||
/// An extension to the [`Reporter`] implemented by the macro.
|
||||
impl RunnerEventReporter {
|
||||
pub async fn subscribe(&self) -> anyhow::Result<broadcast::Receiver<ReporterEvent>> {
|
||||
let (tx, rx) = oneshot::channel::<broadcast::Receiver<ReporterEvent>>();
|
||||
self.report_subscribe_to_events_event(tx)
|
||||
.context("Failed to send subscribe request to reporter task")?;
|
||||
rx.await.map_err(Into::into)
|
||||
}
|
||||
pub async fn subscribe(&self) -> anyhow::Result<broadcast::Receiver<ReporterEvent>> {
|
||||
let (tx, rx) = oneshot::channel::<broadcast::Receiver<ReporterEvent>>();
|
||||
self.report_subscribe_to_events_event(tx)
|
||||
.context("Failed to send subscribe request to reporter task")?;
|
||||
rx.await.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
pub type Reporter = RunnerEventReporter;
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
//! Helper for caching the solc binaries.
|
||||
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
fs::{File, create_dir_all},
|
||||
io::{BufWriter, Write},
|
||||
os::unix::fs::PermissionsExt,
|
||||
path::{Path, PathBuf},
|
||||
sync::LazyLock,
|
||||
collections::HashSet,
|
||||
fs::{File, create_dir_all},
|
||||
io::{BufWriter, Write},
|
||||
os::unix::fs::PermissionsExt,
|
||||
path::{Path, PathBuf},
|
||||
sync::LazyLock,
|
||||
};
|
||||
|
||||
use semver::Version;
|
||||
@@ -19,90 +19,71 @@ pub const SOLC_CACHE_DIRECTORY: &str = "solc";
|
||||
pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default);
|
||||
|
||||
pub(crate) async fn get_or_download(
|
||||
working_directory: &Path,
|
||||
downloader: &SolcDownloader,
|
||||
working_directory: &Path,
|
||||
downloader: &SolcDownloader,
|
||||
) -> anyhow::Result<(Version, PathBuf)> {
|
||||
let target_directory = working_directory
|
||||
.join(SOLC_CACHE_DIRECTORY)
|
||||
.join(downloader.version.to_string());
|
||||
let target_file = target_directory.join(downloader.target);
|
||||
let target_directory = working_directory
|
||||
.join(SOLC_CACHE_DIRECTORY)
|
||||
.join(downloader.version.to_string());
|
||||
let target_file = target_directory.join(downloader.target);
|
||||
|
||||
let mut cache = SOLC_CACHER.lock().await;
|
||||
if cache.contains(&target_file) {
|
||||
tracing::debug!("using cached solc: {}", target_file.display());
|
||||
return Ok((downloader.version.clone(), target_file));
|
||||
}
|
||||
let mut cache = SOLC_CACHER.lock().await;
|
||||
if cache.contains(&target_file) {
|
||||
tracing::debug!("using cached solc: {}", target_file.display());
|
||||
return Ok((downloader.version.clone(), target_file));
|
||||
}
|
||||
|
||||
create_dir_all(&target_directory).with_context(|| {
|
||||
format!(
|
||||
"Failed to create solc cache directory: {}",
|
||||
target_directory.display()
|
||||
)
|
||||
})?;
|
||||
download_to_file(&target_file, downloader)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to write downloaded solc to {}",
|
||||
target_file.display()
|
||||
)
|
||||
})?;
|
||||
cache.insert(target_file.clone());
|
||||
create_dir_all(&target_directory).with_context(|| {
|
||||
format!("Failed to create solc cache directory: {}", target_directory.display())
|
||||
})?;
|
||||
download_to_file(&target_file, downloader)
|
||||
.await
|
||||
.with_context(|| format!("Failed to write downloaded solc to {}", target_file.display()))?;
|
||||
cache.insert(target_file.clone());
|
||||
|
||||
Ok((downloader.version.clone(), target_file))
|
||||
Ok((downloader.version.clone(), target_file))
|
||||
}
|
||||
|
||||
async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::Result<()> {
|
||||
let Ok(file) = File::create_new(path) else {
|
||||
return Ok(());
|
||||
};
|
||||
let Ok(file) = File::create_new(path) else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let mut permissions = file
|
||||
.metadata()
|
||||
.with_context(|| format!("Failed to read metadata for {}", path.display()))?
|
||||
.permissions();
|
||||
permissions.set_mode(permissions.mode() | 0o111);
|
||||
file.set_permissions(permissions).with_context(|| {
|
||||
format!("Failed to set executable permissions on {}", path.display())
|
||||
})?;
|
||||
}
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let mut permissions = file
|
||||
.metadata()
|
||||
.with_context(|| format!("Failed to read metadata for {}", path.display()))?
|
||||
.permissions();
|
||||
permissions.set_mode(permissions.mode() | 0o111);
|
||||
file.set_permissions(permissions).with_context(|| {
|
||||
format!("Failed to set executable permissions on {}", path.display())
|
||||
})?;
|
||||
}
|
||||
|
||||
let mut file = BufWriter::new(file);
|
||||
file.write_all(
|
||||
&downloader
|
||||
.download()
|
||||
.await
|
||||
.context("Failed to download solc binary bytes")?,
|
||||
)
|
||||
.with_context(|| format!("Failed to write solc binary to {}", path.display()))?;
|
||||
file.flush()
|
||||
.with_context(|| format!("Failed to flush file {}", path.display()))?;
|
||||
drop(file);
|
||||
let mut file = BufWriter::new(file);
|
||||
file.write_all(&downloader.download().await.context("Failed to download solc binary bytes")?)
|
||||
.with_context(|| format!("Failed to write solc binary to {}", path.display()))?;
|
||||
file.flush()
|
||||
.with_context(|| format!("Failed to flush file {}", path.display()))?;
|
||||
drop(file);
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
std::process::Command::new("xattr")
|
||||
.arg("-d")
|
||||
.arg("com.apple.quarantine")
|
||||
.arg(path)
|
||||
.stderr(std::process::Stdio::null())
|
||||
.stdout(std::process::Stdio::null())
|
||||
.stdout(std::process::Stdio::null())
|
||||
.spawn()
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to spawn xattr to remove quarantine attribute on {}",
|
||||
path.display()
|
||||
)
|
||||
})?
|
||||
.wait()
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed waiting for xattr operation to complete on {}",
|
||||
path.display()
|
||||
)
|
||||
})?;
|
||||
#[cfg(target_os = "macos")]
|
||||
std::process::Command::new("xattr")
|
||||
.arg("-d")
|
||||
.arg("com.apple.quarantine")
|
||||
.arg(path)
|
||||
.stderr(std::process::Stdio::null())
|
||||
.stdout(std::process::Stdio::null())
|
||||
.stdout(std::process::Stdio::null())
|
||||
.spawn()
|
||||
.with_context(|| {
|
||||
format!("Failed to spawn xattr to remove quarantine attribute on {}", path.display())
|
||||
})?
|
||||
.wait()
|
||||
.with_context(|| {
|
||||
format!("Failed waiting for xattr operation to complete on {}", path.display())
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
//! This module downloads solc binaries.
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{LazyLock, Mutex},
|
||||
collections::HashMap,
|
||||
sync::{LazyLock, Mutex},
|
||||
};
|
||||
|
||||
use revive_dt_common::types::VersionOrRequirement;
|
||||
@@ -14,199 +14,158 @@ use crate::list::List;
|
||||
use anyhow::Context as _;
|
||||
|
||||
pub static LIST_CACHE: LazyLock<Mutex<HashMap<&'static str, List>>> =
|
||||
LazyLock::new(Default::default);
|
||||
LazyLock::new(Default::default);
|
||||
|
||||
impl List {
|
||||
pub const LINUX_URL: &str = "https://binaries.soliditylang.org/linux-amd64/list.json";
|
||||
pub const WINDOWS_URL: &str = "https://binaries.soliditylang.org/windows-amd64/list.json";
|
||||
pub const MACOSX_URL: &str = "https://binaries.soliditylang.org/macosx-amd64/list.json";
|
||||
pub const WASM_URL: &str = "https://binaries.soliditylang.org/wasm/list.json";
|
||||
pub const LINUX_URL: &str = "https://binaries.soliditylang.org/linux-amd64/list.json";
|
||||
pub const WINDOWS_URL: &str = "https://binaries.soliditylang.org/windows-amd64/list.json";
|
||||
pub const MACOSX_URL: &str = "https://binaries.soliditylang.org/macosx-amd64/list.json";
|
||||
pub const WASM_URL: &str = "https://binaries.soliditylang.org/wasm/list.json";
|
||||
|
||||
/// Try to downloads the list from the given URL.
|
||||
///
|
||||
/// Caches the list retrieved from the `url` into [LIST_CACHE],
|
||||
/// subsequent calls with the same `url` will return the cached list.
|
||||
pub async fn download(url: &'static str) -> anyhow::Result<Self> {
|
||||
if let Some(list) = LIST_CACHE.lock().unwrap().get(url) {
|
||||
return Ok(list.clone());
|
||||
}
|
||||
/// Try to downloads the list from the given URL.
|
||||
///
|
||||
/// Caches the list retrieved from the `url` into [LIST_CACHE],
|
||||
/// subsequent calls with the same `url` will return the cached list.
|
||||
pub async fn download(url: &'static str) -> anyhow::Result<Self> {
|
||||
if let Some(list) = LIST_CACHE.lock().unwrap().get(url) {
|
||||
return Ok(list.clone());
|
||||
}
|
||||
|
||||
let body: List = reqwest::get(url)
|
||||
.await
|
||||
.with_context(|| format!("Failed to GET solc list from {url}"))?
|
||||
.json()
|
||||
.await
|
||||
.with_context(|| format!("Failed to deserialize solc list JSON from {url}"))?;
|
||||
let body: List = reqwest::get(url)
|
||||
.await
|
||||
.with_context(|| format!("Failed to GET solc list from {url}"))?
|
||||
.json()
|
||||
.await
|
||||
.with_context(|| format!("Failed to deserialize solc list JSON from {url}"))?;
|
||||
|
||||
LIST_CACHE.lock().unwrap().insert(url, body.clone());
|
||||
LIST_CACHE.lock().unwrap().insert(url, body.clone());
|
||||
|
||||
Ok(body)
|
||||
}
|
||||
Ok(body)
|
||||
}
|
||||
}
|
||||
|
||||
/// Download solc binaries from the official SolidityLang site
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SolcDownloader {
|
||||
pub version: Version,
|
||||
pub target: &'static str,
|
||||
pub list: &'static str,
|
||||
pub version: Version,
|
||||
pub target: &'static str,
|
||||
pub list: &'static str,
|
||||
}
|
||||
|
||||
impl SolcDownloader {
|
||||
pub const BASE_URL: &str = "https://binaries.soliditylang.org";
|
||||
pub const BASE_URL: &str = "https://binaries.soliditylang.org";
|
||||
|
||||
pub const LINUX_NAME: &str = "linux-amd64";
|
||||
pub const MACOSX_NAME: &str = "macosx-amd64";
|
||||
pub const WINDOWS_NAME: &str = "windows-amd64";
|
||||
pub const WASM_NAME: &str = "wasm";
|
||||
pub const LINUX_NAME: &str = "linux-amd64";
|
||||
pub const MACOSX_NAME: &str = "macosx-amd64";
|
||||
pub const WINDOWS_NAME: &str = "windows-amd64";
|
||||
pub const WASM_NAME: &str = "wasm";
|
||||
|
||||
async fn new(
|
||||
version: impl Into<VersionOrRequirement>,
|
||||
target: &'static str,
|
||||
list: &'static str,
|
||||
) -> anyhow::Result<Self> {
|
||||
let version_or_requirement = version.into();
|
||||
match version_or_requirement {
|
||||
VersionOrRequirement::Version(version) => Ok(Self {
|
||||
version,
|
||||
target,
|
||||
list,
|
||||
}),
|
||||
VersionOrRequirement::Requirement(requirement) => {
|
||||
let Some(version) = List::download(list)
|
||||
.await
|
||||
.with_context(|| format!("Failed to download solc builds list from {list}"))?
|
||||
.builds
|
||||
.into_iter()
|
||||
.map(|build| build.version)
|
||||
.filter(|version| requirement.matches(version))
|
||||
.max()
|
||||
else {
|
||||
anyhow::bail!("Failed to find a version that satisfies {requirement:?}");
|
||||
};
|
||||
Ok(Self {
|
||||
version,
|
||||
target,
|
||||
list,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
async fn new(
|
||||
version: impl Into<VersionOrRequirement>,
|
||||
target: &'static str,
|
||||
list: &'static str,
|
||||
) -> anyhow::Result<Self> {
|
||||
let version_or_requirement = version.into();
|
||||
match version_or_requirement {
|
||||
VersionOrRequirement::Version(version) => Ok(Self { version, target, list }),
|
||||
VersionOrRequirement::Requirement(requirement) => {
|
||||
let Some(version) = List::download(list)
|
||||
.await
|
||||
.with_context(|| format!("Failed to download solc builds list from {list}"))?
|
||||
.builds
|
||||
.into_iter()
|
||||
.map(|build| build.version)
|
||||
.filter(|version| requirement.matches(version))
|
||||
.max()
|
||||
else {
|
||||
anyhow::bail!("Failed to find a version that satisfies {requirement:?}");
|
||||
};
|
||||
Ok(Self { version, target, list })
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn linux(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
||||
Self::new(version, Self::LINUX_NAME, List::LINUX_URL).await
|
||||
}
|
||||
pub async fn linux(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
||||
Self::new(version, Self::LINUX_NAME, List::LINUX_URL).await
|
||||
}
|
||||
|
||||
pub async fn macosx(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
||||
Self::new(version, Self::MACOSX_NAME, List::MACOSX_URL).await
|
||||
}
|
||||
pub async fn macosx(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
||||
Self::new(version, Self::MACOSX_NAME, List::MACOSX_URL).await
|
||||
}
|
||||
|
||||
pub async fn windows(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
||||
Self::new(version, Self::WINDOWS_NAME, List::WINDOWS_URL).await
|
||||
}
|
||||
pub async fn windows(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
||||
Self::new(version, Self::WINDOWS_NAME, List::WINDOWS_URL).await
|
||||
}
|
||||
|
||||
pub async fn wasm(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
||||
Self::new(version, Self::WASM_NAME, List::WASM_URL).await
|
||||
}
|
||||
pub async fn wasm(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
||||
Self::new(version, Self::WASM_NAME, List::WASM_URL).await
|
||||
}
|
||||
|
||||
/// Download the solc binary.
|
||||
///
|
||||
/// Errors out if the download fails or the digest of the downloaded file
|
||||
/// mismatches the expected digest from the release [List].
|
||||
pub async fn download(&self) -> anyhow::Result<Vec<u8>> {
|
||||
let builds = List::download(self.list)
|
||||
.await
|
||||
.with_context(|| format!("Failed to download solc builds list from {}", self.list))?
|
||||
.builds;
|
||||
let build = builds
|
||||
.iter()
|
||||
.find(|build| build.version == self.version)
|
||||
.ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version))
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Requested solc version {} was not found in builds list fetched from {}",
|
||||
self.version, self.list
|
||||
)
|
||||
})?;
|
||||
/// Download the solc binary.
|
||||
///
|
||||
/// Errors out if the download fails or the digest of the downloaded file
|
||||
/// mismatches the expected digest from the release [List].
|
||||
pub async fn download(&self) -> anyhow::Result<Vec<u8>> {
|
||||
let builds = List::download(self.list)
|
||||
.await
|
||||
.with_context(|| format!("Failed to download solc builds list from {}", self.list))?
|
||||
.builds;
|
||||
let build = builds
|
||||
.iter()
|
||||
.find(|build| build.version == self.version)
|
||||
.ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version))
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Requested solc version {} was not found in builds list fetched from {}",
|
||||
self.version, self.list
|
||||
)
|
||||
})?;
|
||||
|
||||
let path = build.path.clone();
|
||||
let expected_digest = build
|
||||
.sha256
|
||||
.strip_prefix("0x")
|
||||
.unwrap_or(&build.sha256)
|
||||
.to_string();
|
||||
let url = format!("{}/{}/{}", Self::BASE_URL, self.target, path.display());
|
||||
let path = build.path.clone();
|
||||
let expected_digest = build.sha256.strip_prefix("0x").unwrap_or(&build.sha256).to_string();
|
||||
let url = format!("{}/{}/{}", Self::BASE_URL, self.target, path.display());
|
||||
|
||||
let file = reqwest::get(&url)
|
||||
.await
|
||||
.with_context(|| format!("Failed to GET solc binary from {url}"))?
|
||||
.bytes()
|
||||
.await
|
||||
.with_context(|| format!("Failed to read solc binary bytes from {url}"))?
|
||||
.to_vec();
|
||||
let file = reqwest::get(&url)
|
||||
.await
|
||||
.with_context(|| format!("Failed to GET solc binary from {url}"))?
|
||||
.bytes()
|
||||
.await
|
||||
.with_context(|| format!("Failed to read solc binary bytes from {url}"))?
|
||||
.to_vec();
|
||||
|
||||
if hex::encode(Sha256::digest(&file)) != expected_digest {
|
||||
anyhow::bail!("sha256 mismatch for solc version {}", self.version);
|
||||
}
|
||||
if hex::encode(Sha256::digest(&file)) != expected_digest {
|
||||
anyhow::bail!("sha256 mismatch for solc version {}", self.version);
|
||||
}
|
||||
|
||||
Ok(file)
|
||||
}
|
||||
Ok(file)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{download::SolcDownloader, list::List};
|
||||
use crate::{download::SolcDownloader, list::List};
|
||||
|
||||
#[tokio::test]
|
||||
async fn try_get_windows() {
|
||||
let version = List::download(List::WINDOWS_URL)
|
||||
.await
|
||||
.unwrap()
|
||||
.latest_release;
|
||||
SolcDownloader::windows(version)
|
||||
.await
|
||||
.unwrap()
|
||||
.download()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
#[tokio::test]
|
||||
async fn try_get_windows() {
|
||||
let version = List::download(List::WINDOWS_URL).await.unwrap().latest_release;
|
||||
SolcDownloader::windows(version).await.unwrap().download().await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn try_get_macosx() {
|
||||
let version = List::download(List::MACOSX_URL)
|
||||
.await
|
||||
.unwrap()
|
||||
.latest_release;
|
||||
SolcDownloader::macosx(version)
|
||||
.await
|
||||
.unwrap()
|
||||
.download()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
#[tokio::test]
|
||||
async fn try_get_macosx() {
|
||||
let version = List::download(List::MACOSX_URL).await.unwrap().latest_release;
|
||||
SolcDownloader::macosx(version).await.unwrap().download().await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn try_get_linux() {
|
||||
let version = List::download(List::LINUX_URL)
|
||||
.await
|
||||
.unwrap()
|
||||
.latest_release;
|
||||
SolcDownloader::linux(version)
|
||||
.await
|
||||
.unwrap()
|
||||
.download()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
#[tokio::test]
|
||||
async fn try_get_linux() {
|
||||
let version = List::download(List::LINUX_URL).await.unwrap().latest_release;
|
||||
SolcDownloader::linux(version).await.unwrap().download().await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn try_get_wasm() {
|
||||
let version = List::download(List::WASM_URL).await.unwrap().latest_release;
|
||||
SolcDownloader::wasm(version)
|
||||
.await
|
||||
.unwrap()
|
||||
.download()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
#[tokio::test]
|
||||
async fn try_get_wasm() {
|
||||
let version = List::download(List::WASM_URL).await.unwrap().latest_release;
|
||||
SolcDownloader::wasm(version).await.unwrap().download().await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,22 +22,22 @@ pub mod list;
|
||||
/// Subsequent calls for the same version will use a cached artifact
|
||||
/// and not download it again.
|
||||
pub async fn download_solc(
|
||||
cache_directory: &Path,
|
||||
version: impl Into<VersionOrRequirement>,
|
||||
wasm: bool,
|
||||
cache_directory: &Path,
|
||||
version: impl Into<VersionOrRequirement>,
|
||||
wasm: bool,
|
||||
) -> anyhow::Result<(Version, PathBuf)> {
|
||||
let downloader = if wasm {
|
||||
SolcDownloader::wasm(version).await
|
||||
} else if cfg!(target_os = "linux") {
|
||||
SolcDownloader::linux(version).await
|
||||
} else if cfg!(target_os = "macos") {
|
||||
SolcDownloader::macosx(version).await
|
||||
} else if cfg!(target_os = "windows") {
|
||||
SolcDownloader::windows(version).await
|
||||
} else {
|
||||
unimplemented!()
|
||||
}
|
||||
.context("Failed to initialize the Solc Downloader")?;
|
||||
let downloader = if wasm {
|
||||
SolcDownloader::wasm(version).await
|
||||
} else if cfg!(target_os = "linux") {
|
||||
SolcDownloader::linux(version).await
|
||||
} else if cfg!(target_os = "macos") {
|
||||
SolcDownloader::macosx(version).await
|
||||
} else if cfg!(target_os = "windows") {
|
||||
SolcDownloader::windows(version).await
|
||||
} else {
|
||||
unimplemented!()
|
||||
}
|
||||
.context("Failed to initialize the Solc Downloader")?;
|
||||
|
||||
get_or_download(cache_directory, &downloader).await
|
||||
get_or_download(cache_directory, &downloader).await
|
||||
}
|
||||
|
||||
@@ -7,20 +7,20 @@ use serde::Deserialize;
|
||||
|
||||
#[derive(Debug, Deserialize, Clone, Eq, PartialEq)]
|
||||
pub struct List {
|
||||
pub builds: Vec<Build>,
|
||||
pub releases: HashMap<Version, String>,
|
||||
#[serde(rename = "latestRelease")]
|
||||
pub latest_release: Version,
|
||||
pub builds: Vec<Build>,
|
||||
pub releases: HashMap<Version, String>,
|
||||
#[serde(rename = "latestRelease")]
|
||||
pub latest_release: Version,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone, Eq, PartialEq)]
|
||||
pub struct Build {
|
||||
pub path: PathBuf,
|
||||
pub version: Version,
|
||||
pub build: String,
|
||||
#[serde(rename = "longVersion")]
|
||||
pub long_version: String,
|
||||
pub keccak256: String,
|
||||
pub sha256: String,
|
||||
pub urls: Vec<String>,
|
||||
pub path: PathBuf,
|
||||
pub version: Version,
|
||||
pub build: String,
|
||||
#[serde(rename = "longVersion")]
|
||||
pub long_version: String,
|
||||
pub keccak256: String,
|
||||
pub sha256: String,
|
||||
pub urls: Vec<String>,
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user