Compare commits

...

28 Commits

Author SHA1 Message Date
Omar Abdulla ad3d580df9 Make the code even more concurrent 2025-08-18 18:39:03 +03:00
Omar Abdulla d45b8da8e3 Format 2025-08-18 17:27:26 +03:00
Omar Abdulla 79ce4a239c Fix tests 2025-08-18 17:23:30 +03:00
Omar Abdulla 609ececea6 Better logging and fix concurrency issues 2025-08-18 16:16:33 +03:00
Omar Abdulla fb3959d345 Allow for auto display impl in declare wrapper type macro 2025-08-18 09:41:25 +03:00
Omar Abdulla 84026f9aee Cache the compiler versions 2025-08-18 06:34:26 +03:00
Omar Abdulla a7ce202a6b Merge remote-tracking branch 'origin/main' into feature/fix-os-fd-errors 2025-08-16 22:48:53 +03:00
Omar c58551803d Allow multiple files in corpus (#144) 2025-08-16 16:04:17 +00:00
Omar 185edcfad9 Cached compiler artifacts (#143)
* WIP compilation cache

* Implement a persistent compilation cache

* Correct the key and value encoding for the cache
2025-08-16 16:04:13 +00:00
James Wilson 09d56f5177 Redo how we parse and use modes (#125)
* WIP redo how we parse and use modes

* test expanding, too

* WIP integrate new Mode/ParsedMode into rest of code

* First pass integrated new mode bits

* fmt

* clippy

* Remove mode we no longer support from test metadata

* Address nits

* Add ability for compiler to opt out if it can't work with some Mode/version

* Elide viaIR input if compiler does not support it

* Improve test output a little; string modes and list ignored tests

* Move Mode to common crate

* constants.mod, and Display for CaseIdx to use it

* fmt

* Rename ModePipeline::E/Y

* Re-arrange Mode things; ParsedMode in format and Mode etc in common

* Move compile check to prepare_tests

* Remove now-unused deps

* clippy nits

* Update fallback tx weights to avoid out of gas errors

* Update kitchensink weights too and fmt

* Bump default geth timeout to 10s

* 30s timeout

* Improve geth stdout logging on failure

* fix line logging

* remove --networkid and arg, back to 5s timeout for geth
2025-08-16 11:38:17 +00:00
Omar Abdulla e19e0a4e7a Fix the OS FD error 2025-08-15 16:16:36 +03:00
Omar a59e287fa1 Add a cached fs abstraction (#141) 2025-08-14 15:21:05 +00:00
Omar f2045db0e9 Add compiler directives to metadata (#139) 2025-08-14 07:38:56 +00:00
Omar 5a11f44673 Misc features/improvements (#138)
* Implement various needed features and improvements

* Reorder the metadata struct

* Format comments
2025-08-13 13:50:06 +00:00
James Wilson 46aea0890d Split reporter and case runner, use channels to pass test reports (#137)
* Use channels to send data to reporting thread and avoid hangs / mutex / duration. Limit max concurrent tasks to avoid too many open files

* More appropriate name for dirver/reporter task fns

* Back to parallelise individual cases, report individual cases, address grumbles

* newline before 'Failures' title in report
2025-08-13 13:10:26 +00:00
Omar 9b40c9b9e3 Add an EVM version filter (#136)
* Add an EVM version filter

* Update naming
2025-08-12 10:19:59 +00:00
Omar f67a9bf643 Refactor/ignore null values (#135)
* Skip serialization of null values

* Add support for comments in various steps
2025-08-12 08:55:21 +00:00
Omar 67d767ffde Implement storage empty assertion (#134) 2025-08-11 13:17:19 +00:00
Omar f7fbe094ec Balance assertions (#133)
* Make metadata serializable

* Refactor tests to use steps

* Add a balance assertion test step

* Test balance deserialization

* Box the test steps

* Permit size difference in step output
2025-08-11 12:11:16 +00:00
Omar 90b2dd4cfe Make metadata serializable (#132) 2025-08-10 21:57:41 +00:00
Omar 64d63ef999 Remove the provider cache (#121)
* Remove the provider cache

* Add timing information to the CLI report
2025-08-07 03:55:24 +00:00
Omar 757bfbe116 Add more resolvable variables (#120)
* Allow resolution of base fee

* Fix block difficulty resolution

* Allow for the resolution of gas price
2025-08-06 15:17:36 +00:00
Omar 8619e7feb0 Fix the transaction tracing issues (#118)
* Set the gc mode to archive in geth

* Add a maximum to the exponential backoff wait duration

* Edit the formatting of the CLI case reporter
2025-08-06 12:25:39 +00:00
Omar edba49b301 Use SolidityLang for solc downloads (#117) 2025-08-06 10:35:05 +00:00
Omar 9980926d40 Add a case ignore flag (#114)
* Added a resolver tied to a specific block

* Increase the number of private keys

* Increase kitchensink wait time to 60 seconds

* Add a case ignore flag
2025-08-04 16:40:53 +00:00
Omar ff993d44a5 Added a resolver tied to a specific block (#111)
* Added a resolver tied to a specific block

* Increase the number of private keys

* Increase kitchensink wait time to 60 seconds
2025-08-04 12:45:47 +00:00
Omar 8cbb1a9f77 Added basic console reporting (#110)
* Added basic console reporting

* Add some waiting period to the printing task

* Print to the stderr and print logs to stdout
2025-08-04 06:05:49 +00:00
Omar 56c2fe8c0c Parallelize Cases (#109)
* Parallelize over cases

* Rename the state and driver

* Parallelize execution

* Update the default config of the tool

* Make codebase async

* Fix machete

* Fix tests & clear node directories before startup

* Cleanup the cleanup logic

* Rename geth node
2025-08-01 11:00:08 +00:00
51 changed files with 4495 additions and 2339 deletions
+2
View File
@@ -7,3 +7,5 @@ node_modules
# We do not want to commit any log files that we produce from running the code locally so this is # We do not want to commit any log files that we produce from running the code locally so this is
# added to the .gitignore file. # added to the .gitignore file.
*.log *.log
profile.json.gz
Generated
+651 -19
View File
File diff suppressed because it is too large Load Diff
+16 -5
View File
@@ -8,7 +8,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT/Apache-2.0" license = "MIT/Apache-2.0"
edition = "2024" edition = "2024"
repository = "https://github.com/paritytech/revive-differential-testing.git" repository = "https://github.com/paritytech/revive-differential-testing.git"
rust-version = "1.85.0" rust-version = "1.87.0"
[workspace.dependencies] [workspace.dependencies]
revive-dt-common = { version = "0.1.0", path = "crates/common" } revive-dt-common = { version = "0.1.0", path = "crates/common" }
@@ -25,29 +25,38 @@ revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
alloy-primitives = "1.2.1" alloy-primitives = "1.2.1"
alloy-sol-types = "1.2.1" alloy-sol-types = "1.2.1"
anyhow = "1.0" anyhow = "1.0"
bson = { version = "2.15.0" }
cacache = { version = "13.1.0" }
clap = { version = "4", features = ["derive"] } clap = { version = "4", features = ["derive"] }
dashmap = { version = "6.1.0" }
foundry-compilers-artifacts = { version = "0.18.0" } foundry-compilers-artifacts = { version = "0.18.0" }
futures = { version = "0.3.31" } futures = { version = "0.3.31" }
hex = "0.4.3" hex = "0.4.3"
reqwest = { version = "0.12.15", features = ["blocking", "json"] } regex = "1"
moka = "0.12.10"
reqwest = { version = "0.12.15", features = ["json"] }
once_cell = "1.21" once_cell = "1.21"
rayon = { version = "1.10" }
semver = { version = "1.0", features = ["serde"] } semver = { version = "1.0", features = ["serde"] }
serde = { version = "1.0", default-features = false, features = ["derive"] } serde = { version = "1.0", default-features = false, features = ["derive"] }
serde_json = { version = "1.0", default-features = false, features = [ serde_json = { version = "1.0", default-features = false, features = [
"arbitrary_precision", "arbitrary_precision",
"std", "std",
"unbounded_depth",
] } ] }
sha2 = { version = "0.10.9" } sha2 = { version = "0.10.9" }
sp-core = "36.1.0" sp-core = "36.1.0"
sp-runtime = "41.1.0" sp-runtime = "41.1.0"
temp-dir = { version = "0.1.16" } temp-dir = { version = "0.1.16" }
tempfile = "3.3" tempfile = "3.3"
tokio = { version = "1", default-features = false, features = [ thiserror = "2"
tokio = { version = "1.47.0", default-features = false, features = [
"rt-multi-thread", "rt-multi-thread",
"process",
"rt",
] } ] }
uuid = { version = "1.8", features = ["v4"] } uuid = { version = "1.8", features = ["v4"] }
tracing = "0.1.41" tracing = { version = "0.1.41" }
tracing-appender = { version = "0.2.3" }
tracing-subscriber = { version = "0.3.19", default-features = false, features = [ tracing-subscriber = { version = "0.3.19", default-features = false, features = [
"fmt", "fmt",
"json", "json",
@@ -82,3 +91,5 @@ features = [
inherits = "release" inherits = "release"
lto = true lto = true
codegen-units = 1 codegen-units = 1
[workspace.lints.clippy]
+13 -2
View File
@@ -1,13 +1,24 @@
{ {
"modes": [ "modes": [
"Y >=0.8.9", "Y >=0.8.9",
"E", "E"
"I"
], ],
"cases": [ "cases": [
{ {
"name": "first", "name": "first",
"inputs": [ "inputs": [
{
"address": "0xdeadbeef00000000000000000000000000000042",
"expected_balance": "1233"
},
{
"address": "0xdeadbeef00000000000000000000000000000042",
"is_storage_empty": true
},
{
"address": "0xdeadbeef00000000000000000000000000000042",
"is_storage_empty": false
},
{ {
"instance": "WBTC_1", "instance": "WBTC_1",
"method": "#deployer", "method": "#deployer",
+1
View File
@@ -0,0 +1 @@
+7 -4
View File
@@ -10,8 +10,11 @@ rust-version.workspace = true
[dependencies] [dependencies]
anyhow = { workspace = true } anyhow = { workspace = true }
futures = { workspace = true } moka = { workspace = true, features = ["sync"] }
semver = { workspace = true }
tracing = { workspace = true }
once_cell = { workspace = true } once_cell = { workspace = true }
tokio = { workspace = true } semver = { workspace = true }
serde = { workspace = true }
tokio = { workspace = true, default-features = false, features = ["time"] }
[lints]
workspace = true
+49
View File
@@ -0,0 +1,49 @@
//! This module implements a cached file system allowing for results to be stored in-memory rather
//! rather being queried from the file system again.
use std::fs;
use std::io::{Error, Result};
use std::path::{Path, PathBuf};
use moka::sync::Cache;
use once_cell::sync::Lazy;
pub fn read(path: impl AsRef<Path>) -> Result<Vec<u8>> {
static READ_CACHE: Lazy<Cache<PathBuf, Vec<u8>>> = Lazy::new(|| Cache::new(10_000));
let path = path.as_ref().canonicalize()?;
match READ_CACHE.get(path.as_path()) {
Some(content) => Ok(content),
None => {
let content = fs::read(path.as_path())?;
READ_CACHE.insert(path, content.clone());
Ok(content)
}
}
}
pub fn read_to_string(path: impl AsRef<Path>) -> Result<String> {
let content = read(path)?;
String::from_utf8(content).map_err(|_| {
Error::new(
std::io::ErrorKind::InvalidData,
"The contents of the file are not valid UTF8",
)
})
}
pub fn read_dir(path: impl AsRef<Path>) -> Result<Box<dyn Iterator<Item = Result<PathBuf>>>> {
static READ_DIR_CACHE: Lazy<Cache<PathBuf, Vec<PathBuf>>> = Lazy::new(|| Cache::new(10_000));
let path = path.as_ref().canonicalize()?;
match READ_DIR_CACHE.get(path.as_path()) {
Some(entries) => Ok(Box::new(entries.into_iter().map(Ok)) as Box<_>),
None => {
let entries = fs::read_dir(path.as_path())?
.flat_map(|maybe_entry| maybe_entry.map(|entry| entry.path()))
.collect();
READ_DIR_CACHE.insert(path.clone(), entries);
Ok(read_dir(path).unwrap())
}
}
}
@@ -1,225 +0,0 @@
//! The alloy crate __requires__ a tokio runtime.
//! We contain any async rust right here.
use std::{any::Any, panic::AssertUnwindSafe, pin::Pin, thread};
use futures::FutureExt;
use once_cell::sync::Lazy;
use tokio::{
runtime::Builder,
sync::{mpsc::UnboundedSender, oneshot},
};
use tracing::Instrument;
/// A blocking async executor.
///
/// This struct exposes the abstraction of a blocking async executor. It is a global and static
/// executor which means that it doesn't require for new instances of it to be created, it's a
/// singleton and can be accessed by any thread that wants to perform some async computation on the
/// blocking executor thread.
///
/// The API of the blocking executor is created in a way so that it's very natural, simple to use,
/// and unbounded to specific tasks or return types. The following is an example of using this
/// executor to drive an async computation:
///
/// ```rust
/// use revive_dt_common::concepts::*;
///
/// fn blocking_function() {
/// let result = BlockingExecutor::execute(async move {
/// tokio::time::sleep(std::time::Duration::from_secs(1)).await;
/// 0xFFu8
/// })
/// .expect("Computation failed");
///
/// assert_eq!(result, 0xFF);
/// }
/// ```
///
/// Users get to pass in their async tasks without needing to worry about putting them in a [`Box`],
/// [`Pin`], needing to perform down-casting, or the internal channel mechanism used by the runtime.
/// To the user, it just looks like a function that converts some async code into sync code.
///
/// This struct also handled panics that occur in the passed futures and converts them into errors
/// that can be handled by the user. This is done to allow the executor to be robust.
///
/// Internally, the executor communicates with the tokio runtime thread through channels which carry
/// the [`TaskMessage`] and the results of the execution.
pub struct BlockingExecutor;
impl BlockingExecutor {
pub fn execute<R>(future: impl Future<Output = R> + Send + 'static) -> Result<R, anyhow::Error>
where
R: Send + 'static,
{
// Note: The blocking executor is a singleton and therefore we store its state in a static
// so that it's assigned only once. Additionally, when we set the state of the executor we
// spawn the thread where the async runtime runs.
static STATE: Lazy<ExecutorState> = Lazy::new(|| {
tracing::trace!("Initializing the BlockingExecutor state");
// All communication with the tokio runtime thread happens over mspc channels where the
// producers here are the threads that want to run async tasks and the consumer here is
// the tokio runtime thread.
let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel::<TaskMessage>();
thread::spawn(move || {
tracing::info!(
thread_id = ?std::thread::current().id(),
"Starting async runtime thread"
);
let runtime = Builder::new_current_thread()
.enable_all()
.build()
.expect("Failed to create the async runtime");
runtime.block_on(async move {
while let Some(TaskMessage {
future: task,
response_tx: response_channel,
}) = rx.recv().await
{
tracing::trace!("Received a new future to execute");
tokio::spawn(async move {
// One of the things that the blocking executor does is that it allows
// us to catch panics if they occur. By wrapping the given future in an
// AssertUnwindSafe::catch_unwind we are able to catch all panic unwinds
// in the given future and convert them into errors.
let task = AssertUnwindSafe(task).catch_unwind();
let result = task.await;
let _ = response_channel.send(result);
});
}
})
});
ExecutorState { tx }
});
// We need to perform blocking synchronous communication between the current thread and the
// tokio runtime thread with the result of the async computation and the oneshot channels
// from tokio allows us to do that. The sender side of the channel will be given to the
// tokio runtime thread to send the result when the computation is completed and the receive
// side of the channel will be kept with this thread to await for the response of the async
// task to come back.
let (response_tx, response_rx) =
oneshot::channel::<Result<Box<dyn Any + Send>, Box<dyn Any + Send>>>();
// The tokio runtime thread expects a Future<Output = Box<dyn Any + Send>> + Send to be
// sent to it to execute. However, this function has a typed Future<Output = R> + Send and
// therefore we need to change the type of the future to fit what the runtime thread expects
// in the task message. In doing this conversion, we lose some of the type information since
// we're converting R => dyn Any. However, we will perform down-casting on the result to
// convert it back into R.
let future = Box::pin(
async move { Box::new(future.await) as Box<dyn Any + Send> }.in_current_span(),
);
let task = TaskMessage::new(future, response_tx);
if let Err(error) = STATE.tx.send(task) {
tracing::error!(?error, "Failed to send the task to the blocking executor");
anyhow::bail!("Failed to send the task to the blocking executor: {error:?}")
}
let result = match response_rx.blocking_recv() {
Ok(result) => result,
Err(error) => {
tracing::error!(
?error,
"Failed to get the response from the blocking executor"
);
anyhow::bail!("Failed to get the response from the blocking executor: {error:?}")
}
};
let result = match result {
Ok(result) => result,
Err(error) => {
tracing::error!(?error, "An error occurred when running the async task");
anyhow::bail!("An error occurred when running the async task: {error:?}")
}
};
Ok(*result
.downcast::<R>()
.expect("An error occurred when downcasting into R. This is a bug"))
}
}
/// Represents the state of the async runtime. This runtime is designed to be a singleton runtime
/// which means that in the current running program there's just a single thread that has an async
/// runtime.
struct ExecutorState {
/// The sending side of the task messages channel. This is used by all of the other threads to
/// communicate with the async runtime thread.
tx: UnboundedSender<TaskMessage>,
}
/// Represents a message that contains an asynchronous task that's to be executed by the runtime
/// as well as a way for the runtime to report back on the result of the execution.
struct TaskMessage {
/// The task that's being requested to run. This is a future that returns an object that does
/// implement [`Any`] and [`Send`] to allow it to be sent between the requesting thread and the
/// async thread.
future: Pin<Box<dyn Future<Output = Box<dyn Any + Send>> + Send>>,
/// A one shot sender channel where the sender of the task is expecting to hear back on the
/// result of the task.
response_tx: oneshot::Sender<Result<Box<dyn Any + Send>, Box<dyn Any + Send>>>,
}
impl TaskMessage {
pub fn new(
future: Pin<Box<dyn Future<Output = Box<dyn Any + Send>> + Send>>,
response_tx: oneshot::Sender<Result<Box<dyn Any + Send>, Box<dyn Any + Send>>>,
) -> Self {
Self {
future,
response_tx,
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn simple_future_works() {
// Act
let result = BlockingExecutor::execute(async move {
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
0xFFu8
})
.unwrap();
// Assert
assert_eq!(result, 0xFFu8);
}
#[test]
#[allow(unreachable_code, clippy::unreachable)]
fn panics_in_futures_are_caught() {
// Act
let result = BlockingExecutor::execute(async move {
panic!(
"If this panic causes, well, a panic, then this is an issue. If it's caught then all good!"
);
0xFFu8
});
// Assert
assert!(result.is_err());
// Act
let result = BlockingExecutor::execute(async move {
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
0xFFu8
})
.unwrap();
// Assert
assert_eq!(result, 0xFFu8)
}
}
-3
View File
@@ -1,3 +0,0 @@
mod blocking_executor;
pub use blocking_executor::*;
+22
View File
@@ -0,0 +1,22 @@
use std::{
fs::{read_dir, remove_dir_all, remove_file},
path::Path,
};
use anyhow::Result;
/// This method clears the passed directory of all of the files and directories contained within
/// without deleting the directory.
pub fn clear_directory(path: impl AsRef<Path>) -> Result<()> {
for entry in read_dir(path.as_ref())? {
let entry = entry?;
let entry_path = entry.path();
if entry_path.is_file() {
remove_file(entry_path)?
} else {
remove_dir_all(entry_path)?
}
}
Ok(())
}
+3
View File
@@ -0,0 +1,3 @@
mod clear_dir;
pub use clear_dir::*;
+3
View File
@@ -0,0 +1,3 @@
mod poll;
pub use poll::*;
+69
View File
@@ -0,0 +1,69 @@
use std::ops::ControlFlow;
use std::time::Duration;
use anyhow::{Result, anyhow};
const EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION: Duration = Duration::from_secs(60);
/// A function that polls for a fallible future for some period of time and errors if it fails to
/// get a result after polling.
///
/// Given a future that returns a [`Result<ControlFlow<O, ()>>`], this function calls the future
/// repeatedly (with some wait period) until the future returns a [`ControlFlow::Break`] or until it
/// returns an [`Err`] in which case the function stops polling and returns the error.
///
/// If the future keeps returning [`ControlFlow::Continue`] and fails to return a [`Break`] within
/// the permitted polling duration then this function returns an [`Err`]
///
/// [`Break`]: ControlFlow::Break
/// [`Continue`]: ControlFlow::Continue
pub async fn poll<F, O>(
polling_duration: Duration,
polling_wait_behavior: PollingWaitBehavior,
mut future: impl FnMut() -> F,
) -> Result<O>
where
F: Future<Output = Result<ControlFlow<O, ()>>>,
{
let mut retries = 0;
let mut total_wait_duration = Duration::ZERO;
let max_allowed_wait_duration = polling_duration;
loop {
if total_wait_duration >= max_allowed_wait_duration {
break Err(anyhow!(
"Polling failed after {} retries and a total of {:?} of wait time",
retries,
total_wait_duration
));
}
match future().await? {
ControlFlow::Continue(()) => {
let next_wait_duration = match polling_wait_behavior {
PollingWaitBehavior::Constant(duration) => duration,
PollingWaitBehavior::ExponentialBackoff => {
Duration::from_secs(2u64.pow(retries))
.min(EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION)
}
};
let next_wait_duration =
next_wait_duration.min(max_allowed_wait_duration - total_wait_duration);
total_wait_duration += next_wait_duration;
retries += 1;
tokio::time::sleep(next_wait_duration).await;
}
ControlFlow::Break(output) => {
break Ok(output);
}
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
pub enum PollingWaitBehavior {
Constant(Duration),
#[default]
ExponentialBackoff,
}
@@ -1,4 +1,8 @@
use std::{borrow::Cow, collections::HashSet, path::PathBuf}; use std::{
borrow::Cow,
collections::HashSet,
path::{Path, PathBuf},
};
/// An iterator that finds files of a certain extension in the provided directory. You can think of /// An iterator that finds files of a certain extension in the provided directory. You can think of
/// this a glob pattern similar to: `${path}/**/*.md` /// this a glob pattern similar to: `${path}/**/*.md`
@@ -15,14 +19,20 @@ pub struct FilesWithExtensionIterator {
/// this vector then they will be returned when the [`Iterator::next`] method is called. If not /// this vector then they will be returned when the [`Iterator::next`] method is called. If not
/// then we visit one of the next directories to visit. /// then we visit one of the next directories to visit.
files_matching_allowed_extensions: Vec<PathBuf>, files_matching_allowed_extensions: Vec<PathBuf>,
/// This option controls if the the cached file system should be used or not. This could be
/// better for certain cases where the entries in the directories do not change and therefore
/// caching can be used.
use_cached_fs: bool,
} }
impl FilesWithExtensionIterator { impl FilesWithExtensionIterator {
pub fn new(root_directory: PathBuf) -> Self { pub fn new(root_directory: impl AsRef<Path>) -> Self {
Self { Self {
allowed_extensions: Default::default(), allowed_extensions: Default::default(),
directories_to_search: vec![root_directory], directories_to_search: vec![root_directory.as_ref().to_path_buf()],
files_matching_allowed_extensions: Default::default(), files_matching_allowed_extensions: Default::default(),
use_cached_fs: Default::default(),
} }
} }
@@ -33,6 +43,11 @@ impl FilesWithExtensionIterator {
self.allowed_extensions.insert(allowed_extension.into()); self.allowed_extensions.insert(allowed_extension.into());
self self
} }
pub fn with_use_cached_fs(mut self, use_cached_fs: bool) -> Self {
self.use_cached_fs = use_cached_fs;
self
}
} }
impl Iterator for FilesWithExtensionIterator { impl Iterator for FilesWithExtensionIterator {
@@ -45,16 +60,19 @@ impl Iterator for FilesWithExtensionIterator {
let directory_to_search = self.directories_to_search.pop()?; let directory_to_search = self.directories_to_search.pop()?;
// Read all of the entries in the directory. If we failed to read this dir's entires then we let iterator = if self.use_cached_fs {
// elect to just ignore it and look in the next directory, we do that by calling the next let Ok(dir_entries) = crate::cached_fs::read_dir(directory_to_search.as_path()) else {
// method again on the iterator, which is an intentional decision that we made here instead return self.next();
// of panicking. };
Box::new(dir_entries) as Box<dyn Iterator<Item = std::io::Result<PathBuf>>>
} else {
let Ok(dir_entries) = std::fs::read_dir(directory_to_search) else { let Ok(dir_entries) = std::fs::read_dir(directory_to_search) else {
return self.next(); return self.next();
}; };
Box::new(dir_entries.map(|maybe_entry| maybe_entry.map(|entry| entry.path()))) as Box<_>
};
for entry in dir_entries.flatten() { for entry_path in iterator.flatten() {
let entry_path = entry.path();
if entry_path.is_dir() { if entry_path.is_dir() {
self.directories_to_search.push(entry_path) self.directories_to_search.push(entry_path)
} else if entry_path.is_file() } else if entry_path.is_file()
+3 -1
View File
@@ -1,7 +1,9 @@
//! This crate provides common concepts, functionality, types, macros, and more that other crates in //! This crate provides common concepts, functionality, types, macros, and more that other crates in
//! the workspace can benefit from. //! the workspace can benefit from.
pub mod concepts; pub mod cached_fs;
pub mod fs;
pub mod futures;
pub mod iterators; pub mod iterators;
pub mod macros; pub mod macros;
pub mod types; pub mod types;
@@ -1,3 +1,14 @@
#[macro_export]
macro_rules! impl_for_wrapper {
(Display, $ident: ident) => {
impl std::fmt::Display for $ident {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(&self.0, f)
}
}
};
}
/// Defines wrappers around types. /// Defines wrappers around types.
/// ///
/// For example, the macro invocation seen below: /// For example, the macro invocation seen below:
@@ -42,7 +53,13 @@
macro_rules! define_wrapper_type { macro_rules! define_wrapper_type {
( (
$(#[$meta: meta])* $(#[$meta: meta])*
$vis:vis struct $ident: ident($ty: ty); $vis:vis struct $ident: ident($ty: ty)
$(
impl $($trait_ident: ident),*
)?
;
) => { ) => {
$(#[$meta])* $(#[$meta])*
$vis struct $ident($ty); $vis struct $ident($ty);
@@ -98,9 +115,15 @@ macro_rules! define_wrapper_type {
value.0 value.0
} }
} }
$(
$(
$crate::macros::impl_for_wrapper!($trait_ident, $ident);
)*
)?
}; };
} }
/// Technically not needed but this allows for the macro to be found in the `macros` module of the /// Technically not needed but this allows for the macro to be found in the `macros` module of the
/// crate in addition to being found in the root of the crate. /// crate in addition to being found in the root of the crate.
pub use define_wrapper_type; pub use {define_wrapper_type, impl_for_wrapper};
+2
View File
@@ -1,3 +1,5 @@
mod mode;
mod version_or_requirement; mod version_or_requirement;
pub use mode::*;
pub use version_or_requirement::*; pub use version_or_requirement::*;
+167
View File
@@ -0,0 +1,167 @@
use crate::types::VersionOrRequirement;
use semver::Version;
use serde::{Deserialize, Serialize};
use std::fmt::Display;
use std::str::FromStr;
/// This represents a mode that a given test should be run with, if possible.
///
/// We obtain this by taking a [`ParsedMode`], which may be looser or more strict
/// in its requirements, and then expanding it out into a list of [`Mode`]s.
///
/// Use [`ParsedMode::to_test_modes()`] to do this.
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct Mode {
pub pipeline: ModePipeline,
pub optimize_setting: ModeOptimizerSetting,
pub version: Option<semver::VersionReq>,
}
impl Display for Mode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.pipeline.fmt(f)?;
f.write_str(" ")?;
self.optimize_setting.fmt(f)?;
if let Some(version) = &self.version {
f.write_str(" ")?;
version.fmt(f)?;
}
Ok(())
}
}
impl Mode {
/// Return all of the available mode combinations.
pub fn all() -> impl Iterator<Item = Mode> {
ModePipeline::test_cases().flat_map(|pipeline| {
ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode {
pipeline,
optimize_setting,
version: None,
})
})
}
/// Resolves the [`Mode`]'s solidity version requirement into a [`VersionOrRequirement`] if
/// the requirement is present on the object. Otherwise, the passed default version is used.
pub fn compiler_version_to_use(&self, default: Version) -> VersionOrRequirement {
match self.version {
Some(ref requirement) => requirement.clone().into(),
None => default.into(),
}
}
}
/// What do we want the compiler to do?
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum ModePipeline {
/// Compile Solidity code via Yul IR
ViaYulIR,
/// Compile Solidity direct to assembly
ViaEVMAssembly,
}
impl FromStr for ModePipeline {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
// via Yul IR
"Y" => Ok(ModePipeline::ViaYulIR),
// Don't go via Yul IR
"E" => Ok(ModePipeline::ViaEVMAssembly),
// Anything else that we see isn't a mode at all
_ => Err(anyhow::anyhow!(
"Unsupported pipeline '{s}': expected 'Y' or 'E'"
)),
}
}
}
impl Display for ModePipeline {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ModePipeline::ViaYulIR => f.write_str("Y"),
ModePipeline::ViaEVMAssembly => f.write_str("E"),
}
}
}
impl ModePipeline {
/// Should we go via Yul IR?
pub fn via_yul_ir(&self) -> bool {
matches!(self, ModePipeline::ViaYulIR)
}
/// An iterator over the available pipelines that we'd like to test,
/// when an explicit pipeline was not specified.
pub fn test_cases() -> impl Iterator<Item = ModePipeline> + Clone {
[ModePipeline::ViaYulIR, ModePipeline::ViaEVMAssembly].into_iter()
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum ModeOptimizerSetting {
/// 0 / -: Don't apply any optimizations
M0,
/// 1: Apply less than default optimizations
M1,
/// 2: Apply the default optimizations
M2,
/// 3 / +: Apply aggressive optimizations
M3,
/// s: Optimize for size
Ms,
/// z: Aggressively optimize for size
Mz,
}
impl FromStr for ModeOptimizerSetting {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"M0" => Ok(ModeOptimizerSetting::M0),
"M1" => Ok(ModeOptimizerSetting::M1),
"M2" => Ok(ModeOptimizerSetting::M2),
"M3" => Ok(ModeOptimizerSetting::M3),
"Ms" => Ok(ModeOptimizerSetting::Ms),
"Mz" => Ok(ModeOptimizerSetting::Mz),
_ => Err(anyhow::anyhow!(
"Unsupported optimizer setting '{s}': expected 'M0', 'M1', 'M2', 'M3', 'Ms' or 'Mz'"
)),
}
}
}
impl Display for ModeOptimizerSetting {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ModeOptimizerSetting::M0 => f.write_str("M0"),
ModeOptimizerSetting::M1 => f.write_str("M1"),
ModeOptimizerSetting::M2 => f.write_str("M2"),
ModeOptimizerSetting::M3 => f.write_str("M3"),
ModeOptimizerSetting::Ms => f.write_str("Ms"),
ModeOptimizerSetting::Mz => f.write_str("Mz"),
}
}
}
impl ModeOptimizerSetting {
/// An iterator over the available optimizer settings that we'd like to test,
/// when an explicit optimizer setting was not specified.
pub fn test_cases() -> impl Iterator<Item = ModeOptimizerSetting> + Clone {
[
// No optimizations:
ModeOptimizerSetting::M0,
// Aggressive optimizations:
ModeOptimizerSetting::M3,
]
.into_iter()
}
/// Are any optimizations enabled?
pub fn optimizations_enabled(&self) -> bool {
!matches!(self, ModeOptimizerSetting::M0)
}
}
+5
View File
@@ -18,8 +18,13 @@ revive-common = { workspace = true }
alloy = { workspace = true } alloy = { workspace = true }
alloy-primitives = { workspace = true } alloy-primitives = { workspace = true }
anyhow = { workspace = true } anyhow = { workspace = true }
dashmap = { workspace = true }
foundry-compilers-artifacts = { workspace = true } foundry-compilers-artifacts = { workspace = true }
semver = { workspace = true } semver = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
serde_json = { workspace = true } serde_json = { workspace = true }
tracing = { workspace = true } tracing = { workspace = true }
tokio = { workspace = true }
[lints]
workspace = true
+4
View File
@@ -0,0 +1,4 @@
use semver::Version;
/// This is the first version of solc that supports the `--via-ir` flag / "viaIR" input JSON.
pub const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13);
+61 -14
View File
@@ -3,9 +3,10 @@
//! - Polkadot revive resolc compiler //! - Polkadot revive resolc compiler
//! - Polkadot revive Wasm compiler //! - Polkadot revive Wasm compiler
mod constants;
use std::{ use std::{
collections::HashMap, collections::HashMap,
fs::read_to_string,
hash::Hash, hash::Hash,
path::{Path, PathBuf}, path::{Path, PathBuf},
}; };
@@ -16,9 +17,13 @@ use semver::Version;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use revive_common::EVMVersion; use revive_common::EVMVersion;
use revive_dt_common::cached_fs::read_to_string;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::Arguments; use revive_dt_config::Arguments;
// Re-export this as it's a part of the compiler interface.
pub use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
pub mod revive_js; pub mod revive_js;
pub mod revive_resolc; pub mod revive_resolc;
pub mod solc; pub mod solc;
@@ -33,28 +38,36 @@ pub trait SolidityCompiler {
&self, &self,
input: CompilerInput, input: CompilerInput,
additional_options: Self::Options, additional_options: Self::Options,
) -> anyhow::Result<CompilerOutput>; ) -> impl Future<Output = anyhow::Result<CompilerOutput>>;
fn new(solc_executable: PathBuf) -> Self; fn new(solc_executable: PathBuf) -> Self;
fn get_compiler_executable( fn get_compiler_executable(
config: &Arguments, config: &Arguments,
version: impl Into<VersionOrRequirement>, version: impl Into<VersionOrRequirement>,
) -> anyhow::Result<PathBuf>; ) -> impl Future<Output = anyhow::Result<PathBuf>>;
fn version(&self) -> anyhow::Result<Version>; fn version(&self) -> impl Future<Output = anyhow::Result<Version>>;
/// Does the compiler support the provided mode and version settings?
fn supports_mode(
compiler_version: &Version,
optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline,
) -> bool;
} }
/// The generic compilation input configuration. /// The generic compilation input configuration.
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CompilerInput { pub struct CompilerInput {
pub enable_optimization: Option<bool>, pub pipeline: Option<ModePipeline>,
pub via_ir: Option<bool>, pub optimization: Option<ModeOptimizerSetting>,
pub evm_version: Option<EVMVersion>, pub evm_version: Option<EVMVersion>,
pub allow_paths: Vec<PathBuf>, pub allow_paths: Vec<PathBuf>,
pub base_path: Option<PathBuf>, pub base_path: Option<PathBuf>,
pub sources: HashMap<PathBuf, String>, pub sources: HashMap<PathBuf, String>,
pub libraries: HashMap<PathBuf, HashMap<String, Address>>, pub libraries: HashMap<PathBuf, HashMap<String, Address>>,
pub revert_string_handling: Option<RevertString>,
} }
/// The generic compilation output configuration. /// The generic compilation output configuration.
@@ -84,25 +97,26 @@ where
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
input: CompilerInput { input: CompilerInput {
enable_optimization: Default::default(), pipeline: Default::default(),
via_ir: Default::default(), optimization: Default::default(),
evm_version: Default::default(), evm_version: Default::default(),
allow_paths: Default::default(), allow_paths: Default::default(),
base_path: Default::default(), base_path: Default::default(),
sources: Default::default(), sources: Default::default(),
libraries: Default::default(), libraries: Default::default(),
revert_string_handling: Default::default(),
}, },
additional_options: T::Options::default(), additional_options: T::Options::default(),
} }
} }
pub fn with_optimization(mut self, value: impl Into<Option<bool>>) -> Self { pub fn with_optimization(mut self, value: impl Into<Option<ModeOptimizerSetting>>) -> Self {
self.input.enable_optimization = value.into(); self.input.optimization = value.into();
self self
} }
pub fn with_via_ir(mut self, value: impl Into<Option<bool>>) -> Self { pub fn with_pipeline(mut self, value: impl Into<Option<ModePipeline>>) -> Self {
self.input.via_ir = value.into(); self.input.pipeline = value.into();
self self
} }
@@ -142,16 +156,49 @@ where
self self
} }
pub fn with_revert_string_handling(
mut self,
revert_string_handling: impl Into<Option<RevertString>>,
) -> Self {
self.input.revert_string_handling = revert_string_handling.into();
self
}
pub fn with_additional_options(mut self, options: impl Into<T::Options>) -> Self { pub fn with_additional_options(mut self, options: impl Into<T::Options>) -> Self {
self.additional_options = options.into(); self.additional_options = options.into();
self self
} }
pub fn try_build(self, compiler_path: impl AsRef<Path>) -> anyhow::Result<CompilerOutput> { pub fn then(self, callback: impl FnOnce(Self) -> Self) -> Self {
T::new(compiler_path.as_ref().to_path_buf()).build(self.input, self.additional_options) callback(self)
}
pub fn try_then<E>(self, callback: impl FnOnce(Self) -> Result<Self, E>) -> Result<Self, E> {
callback(self)
}
pub async fn try_build(
self,
compiler_path: impl AsRef<Path>,
) -> anyhow::Result<CompilerOutput> {
T::new(compiler_path.as_ref().to_path_buf())
.build(self.input, self.additional_options)
.await
} }
pub fn input(&self) -> CompilerInput { pub fn input(&self) -> CompilerInput {
self.input.clone() self.input.clone()
} }
} }
/// Defines how the compiler should handle revert strings.
#[derive(
Clone, Debug, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize,
)]
pub enum RevertString {
#[default]
Default,
Debug,
Strip,
VerboseDebug,
}
+61 -19
View File
@@ -4,9 +4,10 @@
use std::{ use std::{
path::PathBuf, path::PathBuf,
process::{Command, Stdio}, process::{Command, Stdio},
sync::LazyLock,
}; };
use alloy::json_abi::JsonAbi; use dashmap::DashMap;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::Arguments; use revive_dt_config::Arguments;
use revive_solc_json_interface::{ use revive_solc_json_interface::{
@@ -15,10 +16,13 @@ use revive_solc_json_interface::{
SolcStandardJsonOutput, SolcStandardJsonOutput,
}; };
use crate::{CompilerInput, CompilerOutput, SolidityCompiler}; use super::constants::SOLC_VERSION_SUPPORTING_VIA_YUL_IR;
use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
use alloy::json_abi::JsonAbi;
use anyhow::Context; use anyhow::Context;
use semver::Version; use semver::Version;
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
// TODO: I believe that we need to also pass the solc compiler to resolc so that resolc uses the // TODO: I believe that we need to also pass the solc compiler to resolc so that resolc uses the
// specified solc compiler. I believe that currently we completely ignore the specified solc binary // specified solc compiler. I believe that currently we completely ignore the specified solc binary
@@ -35,20 +39,28 @@ impl SolidityCompiler for Resolc {
type Options = Vec<String>; type Options = Vec<String>;
#[tracing::instrument(level = "debug", ret)] #[tracing::instrument(level = "debug", ret)]
fn build( async fn build(
&self, &self,
CompilerInput { CompilerInput {
enable_optimization, pipeline,
// Ignored and not honored since this is required for the resolc compilation. optimization,
via_ir: _via_ir,
evm_version, evm_version,
allow_paths, allow_paths,
base_path, base_path,
sources, sources,
libraries, libraries,
// TODO: this is currently not being handled since there is no way to pass it into
// resolc. So, we need to go back to this later once it's supported.
revert_string_handling: _,
}: CompilerInput, }: CompilerInput,
additional_options: Self::Options, additional_options: Self::Options,
) -> anyhow::Result<CompilerOutput> { ) -> anyhow::Result<CompilerOutput> {
if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) {
anyhow::bail!(
"Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}"
);
}
let input = SolcStandardJsonInput { let input = SolcStandardJsonInput {
language: SolcStandardJsonInputLanguage::Solidity, language: SolcStandardJsonInputLanguage::Solidity,
sources: sources sources: sources
@@ -77,7 +89,9 @@ impl SolidityCompiler for Resolc {
output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()), output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()),
via_ir: Some(true), via_ir: Some(true),
optimizer: SolcStandardJsonInputSettingsOptimizer::new( optimizer: SolcStandardJsonInputSettingsOptimizer::new(
enable_optimization.unwrap_or(false), optimization
.unwrap_or(ModeOptimizerSetting::M0)
.optimizations_enabled(),
None, None,
&Version::new(0, 0, 0), &Version::new(0, 0, 0),
false, false,
@@ -87,7 +101,7 @@ impl SolidityCompiler for Resolc {
}, },
}; };
let mut command = Command::new(&self.resolc_path); let mut command = AsyncCommand::new(&self.resolc_path);
command command
.stdin(Stdio::piped()) .stdin(Stdio::piped())
.stdout(Stdio::piped()) .stdout(Stdio::piped())
@@ -109,9 +123,10 @@ impl SolidityCompiler for Resolc {
let mut child = command.spawn()?; let mut child = command.spawn()?;
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped"); let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
serde_json::to_writer(stdin_pipe, &input)?; let serialized_input = serde_json::to_vec(&input)?;
stdin_pipe.write_all(&serialized_input).await?;
let output = child.wait_with_output()?; let output = child.wait_with_output().await?;
let stdout = output.stdout; let stdout = output.stdout;
let stderr = output.stderr; let stderr = output.stderr;
@@ -195,7 +210,7 @@ impl SolidityCompiler for Resolc {
Resolc { resolc_path } Resolc { resolc_path }
} }
fn get_compiler_executable( async fn get_compiler_executable(
config: &Arguments, config: &Arguments,
_version: impl Into<VersionOrRequirement>, _version: impl Into<VersionOrRequirement>,
) -> anyhow::Result<PathBuf> { ) -> anyhow::Result<PathBuf> {
@@ -206,16 +221,23 @@ impl SolidityCompiler for Resolc {
Ok(PathBuf::from("resolc")) Ok(PathBuf::from("resolc"))
} }
fn version(&self) -> anyhow::Result<semver::Version> { async fn version(&self) -> anyhow::Result<semver::Version> {
// Logic for parsing the resolc version from the following string: /// This is a cache of the path of the compiler to the version number of the compiler. We
// Solidity frontend for the revive compiler version 0.3.0+commit.b238913.llvm-18.1.8 /// choose to cache the version in this way rather than through a field on the struct since
/// compiler objects are being created all the time from the path and the compiler object is
/// not reused over time.
static VERSION_CACHE: LazyLock<DashMap<PathBuf, Version>> = LazyLock::new(Default::default);
match VERSION_CACHE.entry(self.resolc_path.clone()) {
dashmap::Entry::Occupied(occupied_entry) => Ok(occupied_entry.get().clone()),
dashmap::Entry::Vacant(vacant_entry) => {
let output = Command::new(self.resolc_path.as_path()) let output = Command::new(self.resolc_path.as_path())
.arg("--version") .arg("--version")
.stdout(Stdio::piped()) .stdout(Stdio::piped())
.spawn()? .spawn()?
.wait_with_output()? .wait_with_output()?
.stdout; .stdout;
let output = String::from_utf8_lossy(&output); let output = String::from_utf8_lossy(&output);
let version_string = output let version_string = output
.split("version ") .split("version ")
@@ -225,7 +247,25 @@ impl SolidityCompiler for Resolc {
.next() .next()
.context("Version parsing failed")?; .context("Version parsing failed")?;
Version::parse(version_string).map_err(Into::into) let version = Version::parse(version_string)?;
vacant_entry.insert(version.clone());
Ok(version)
}
}
}
fn supports_mode(
compiler_version: &Version,
_optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline,
) -> bool {
// We only support the Y (IE compile via Yul IR) mode here, which also means that we can
// only use solc version 0.8.13 and above. We must always compile via Yul IR as resolc
// needs this to translate to LLVM IR and then RISCV.
pipeline == ModePipeline::ViaYulIR
&& compiler_version >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR
} }
} }
@@ -233,15 +273,17 @@ impl SolidityCompiler for Resolc {
mod test { mod test {
use super::*; use super::*;
#[test] #[tokio::test]
fn compiler_version_can_be_obtained() { async fn compiler_version_can_be_obtained() {
// Arrange // Arrange
let args = Arguments::default(); let args = Arguments::default();
let path = Resolc::get_compiler_executable(&args, Version::new(0, 7, 6)).unwrap(); let path = Resolc::get_compiler_executable(&args, Version::new(0, 7, 6))
.await
.unwrap();
let compiler = Resolc::new(path); let compiler = Resolc::new(path);
// Act // Act
let version = compiler.version(); let version = compiler.version().await;
// Assert // Assert
let _ = version.expect("Failed to get version"); let _ = version.expect("Failed to get version");
+92 -19
View File
@@ -4,13 +4,16 @@
use std::{ use std::{
path::PathBuf, path::PathBuf,
process::{Command, Stdio}, process::{Command, Stdio},
sync::LazyLock,
}; };
use dashmap::DashMap;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::Arguments; use revive_dt_config::Arguments;
use revive_dt_solc_binaries::download_solc; use revive_dt_solc_binaries::download_solc;
use crate::{CompilerInput, CompilerOutput, SolidityCompiler}; use super::constants::SOLC_VERSION_SUPPORTING_VIA_YUL_IR;
use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
use anyhow::Context; use anyhow::Context;
use foundry_compilers_artifacts::{ use foundry_compilers_artifacts::{
@@ -21,6 +24,7 @@ use foundry_compilers_artifacts::{
solc::*, solc::*,
}; };
use semver::Version; use semver::Version;
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
#[derive(Debug)] #[derive(Debug)]
pub struct Solc { pub struct Solc {
@@ -31,19 +35,31 @@ impl SolidityCompiler for Solc {
type Options = (); type Options = ();
#[tracing::instrument(level = "debug", ret)] #[tracing::instrument(level = "debug", ret)]
fn build( async fn build(
&self, &self,
CompilerInput { CompilerInput {
enable_optimization, pipeline,
via_ir, optimization,
evm_version, evm_version,
allow_paths, allow_paths,
base_path, base_path,
sources, sources,
libraries, libraries,
revert_string_handling,
}: CompilerInput, }: CompilerInput,
_: Self::Options, _: Self::Options,
) -> anyhow::Result<CompilerOutput> { ) -> anyhow::Result<CompilerOutput> {
let compiler_supports_via_ir = self.version().await? >= SOLC_VERSION_SUPPORTING_VIA_YUL_IR;
// Be careful to entirely omit the viaIR field if the compiler does not support it,
// as it will error if you provide fields it does not know about. Because
// `supports_mode` is called prior to instantiating a compiler, we should never
// ask for something which is invalid.
let via_ir = match (pipeline, compiler_supports_via_ir) {
(pipeline, true) => pipeline.map(|p| p.via_yul_ir()),
(_pipeline, false) => None,
};
let input = SolcInput { let input = SolcInput {
language: SolcLanguage::Solidity, language: SolcLanguage::Solidity,
sources: Sources( sources: Sources(
@@ -54,7 +70,7 @@ impl SolidityCompiler for Solc {
), ),
settings: Settings { settings: Settings {
optimizer: Optimizer { optimizer: Optimizer {
enabled: enable_optimization, enabled: optimization.map(|o| o.optimizations_enabled()),
details: Some(Default::default()), details: Some(Default::default()),
..Default::default() ..Default::default()
}, },
@@ -86,11 +102,20 @@ impl SolidityCompiler for Solc {
}) })
.collect(), .collect(),
}, },
debug: revert_string_handling.map(|revert_string_handling| DebuggingSettings {
revert_strings: match revert_string_handling {
crate::RevertString::Default => Some(RevertStrings::Default),
crate::RevertString::Debug => Some(RevertStrings::Debug),
crate::RevertString::Strip => Some(RevertStrings::Strip),
crate::RevertString::VerboseDebug => Some(RevertStrings::VerboseDebug),
},
debug_info: Default::default(),
}),
..Default::default() ..Default::default()
}, },
}; };
let mut command = Command::new(&self.solc_path); let mut command = AsyncCommand::new(&self.solc_path);
command command
.stdin(Stdio::piped()) .stdin(Stdio::piped())
.stdout(Stdio::piped()) .stdout(Stdio::piped())
@@ -112,8 +137,9 @@ impl SolidityCompiler for Solc {
let mut child = command.spawn()?; let mut child = command.spawn()?;
let stdin = child.stdin.as_mut().expect("should be piped"); let stdin = child.stdin.as_mut().expect("should be piped");
serde_json::to_writer(stdin, &input)?; let serialized_input = serde_json::to_vec(&input)?;
let output = child.wait_with_output()?; stdin.write_all(&serialized_input).await?;
let output = child.wait_with_output().await?;
if !output.status.success() { if !output.status.success() {
let json_in = serde_json::to_string_pretty(&input)?; let json_in = serde_json::to_string_pretty(&input)?;
@@ -177,22 +203,30 @@ impl SolidityCompiler for Solc {
Self { solc_path } Self { solc_path }
} }
fn get_compiler_executable( async fn get_compiler_executable(
config: &Arguments, config: &Arguments,
version: impl Into<VersionOrRequirement>, version: impl Into<VersionOrRequirement>,
) -> anyhow::Result<PathBuf> { ) -> anyhow::Result<PathBuf> {
let path = download_solc(config.directory(), version, config.wasm)?; let path = download_solc(config.directory(), version, config.wasm).await?;
Ok(path) Ok(path)
} }
fn version(&self) -> anyhow::Result<semver::Version> { async fn version(&self) -> anyhow::Result<semver::Version> {
// The following is the parsing code for the version from the solc version strings which /// This is a cache of the path of the compiler to the version number of the compiler. We
// look like the following: /// choose to cache the version in this way rather than through a field on the struct since
/// compiler objects are being created all the time from the path and the compiler object is
/// not reused over time.
static VERSION_CACHE: LazyLock<DashMap<PathBuf, Version>> = LazyLock::new(Default::default);
match VERSION_CACHE.entry(self.solc_path.clone()) {
dashmap::Entry::Occupied(occupied_entry) => Ok(occupied_entry.get().clone()),
dashmap::Entry::Vacant(vacant_entry) => {
// The following is the parsing code for the version from the solc version strings
// which look like the following:
// ``` // ```
// solc, the solidity compiler commandline interface // solc, the solidity compiler commandline interface
// Version: 0.8.30+commit.73712a01.Darwin.appleclang // Version: 0.8.30+commit.73712a01.Darwin.appleclang
// ``` // ```
let child = Command::new(self.solc_path.as_path()) let child = Command::new(self.solc_path.as_path())
.arg("--version") .arg("--version")
.stdout(Stdio::piped()) .stdout(Stdio::piped())
@@ -208,7 +242,25 @@ impl SolidityCompiler for Solc {
.next() .next()
.context("Version parsing failed")?; .context("Version parsing failed")?;
Version::parse(version_string).map_err(Into::into) let version = Version::parse(version_string)?;
vacant_entry.insert(version.clone());
Ok(version)
}
}
}
fn supports_mode(
compiler_version: &Version,
_optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline,
) -> bool {
// solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support mode E
// (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler is new enough.
pipeline == ModePipeline::ViaEVMAssembly
|| (pipeline == ModePipeline::ViaYulIR
&& compiler_version >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR)
} }
} }
@@ -216,15 +268,17 @@ impl SolidityCompiler for Solc {
mod test { mod test {
use super::*; use super::*;
#[test] #[tokio::test]
fn compiler_version_can_be_obtained() { async fn compiler_version_can_be_obtained() {
// Arrange // Arrange
let args = Arguments::default(); let args = Arguments::default();
let path = Solc::get_compiler_executable(&args, Version::new(0, 7, 6)).unwrap(); let path = Solc::get_compiler_executable(&args, Version::new(0, 7, 6))
.await
.unwrap();
let compiler = Solc::new(path); let compiler = Solc::new(path);
// Act // Act
let version = compiler.version(); let version = compiler.version().await;
// Assert // Assert
assert_eq!( assert_eq!(
@@ -232,4 +286,23 @@ mod test {
Version::new(0, 7, 6) Version::new(0, 7, 6)
) )
} }
#[tokio::test]
async fn compiler_version_can_be_obtained1() {
// Arrange
let args = Arguments::default();
let path = Solc::get_compiler_executable(&args, Version::new(0, 4, 21))
.await
.unwrap();
let compiler = Solc::new(path);
// Act
let version = compiler.version().await;
// Assert
assert_eq!(
version.expect("Failed to get version"),
Version::new(0, 4, 21)
)
}
} }
+14 -8
View File
@@ -4,11 +4,13 @@ use revive_dt_compiler::{Compiler, SolidityCompiler, revive_resolc::Resolc, solc
use revive_dt_config::Arguments; use revive_dt_config::Arguments;
use semver::Version; use semver::Version;
#[test] #[tokio::test]
fn contracts_can_be_compiled_with_solc() { async fn contracts_can_be_compiled_with_solc() {
// Arrange // Arrange
let args = Arguments::default(); let args = Arguments::default();
let compiler_path = Solc::get_compiler_executable(&args, Version::new(0, 8, 30)).unwrap(); let compiler_path = Solc::get_compiler_executable(&args, Version::new(0, 8, 30))
.await
.unwrap();
// Act // Act
let output = Compiler::<Solc>::new() let output = Compiler::<Solc>::new()
@@ -16,7 +18,8 @@ fn contracts_can_be_compiled_with_solc() {
.unwrap() .unwrap()
.with_source("./tests/assets/array_one_element/main.sol") .with_source("./tests/assets/array_one_element/main.sol")
.unwrap() .unwrap()
.try_build(compiler_path); .try_build(compiler_path)
.await;
// Assert // Assert
let output = output.expect("Failed to compile"); let output = output.expect("Failed to compile");
@@ -42,11 +45,13 @@ fn contracts_can_be_compiled_with_solc() {
assert!(callable_file_contracts.contains_key("Callable")); assert!(callable_file_contracts.contains_key("Callable"));
} }
#[test] #[tokio::test]
fn contracts_can_be_compiled_with_resolc() { async fn contracts_can_be_compiled_with_resolc() {
// Arrange // Arrange
let args = Arguments::default(); let args = Arguments::default();
let compiler_path = Resolc::get_compiler_executable(&args, Version::new(0, 8, 30)).unwrap(); let compiler_path = Resolc::get_compiler_executable(&args, Version::new(0, 8, 30))
.await
.unwrap();
// Act // Act
let output = Compiler::<Resolc>::new() let output = Compiler::<Resolc>::new()
@@ -54,7 +59,8 @@ fn contracts_can_be_compiled_with_resolc() {
.unwrap() .unwrap()
.with_source("./tests/assets/array_one_element/main.sol") .with_source("./tests/assets/array_one_element/main.sol")
.unwrap() .unwrap()
.try_build(compiler_path); .try_build(compiler_path)
.await;
// Assert // Assert
let output = output.expect("Failed to compile"); let output = output.expect("Failed to compile");
+2
View File
@@ -15,3 +15,5 @@ semver = { workspace = true }
temp-dir = { workspace = true } temp-dir = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
[lints]
workspace = true
+31 -9
View File
@@ -58,10 +58,6 @@ pub struct Arguments {
#[arg(long = "geth-start-timeout", default_value = "5000")] #[arg(long = "geth-start-timeout", default_value = "5000")]
pub geth_start_timeout: u64, pub geth_start_timeout: u64,
/// The test network chain ID.
#[arg(short, long = "network-id", default_value = "420420420")]
pub network_id: u64,
/// Configure nodes according to this genesis.json file. /// Configure nodes according to this genesis.json file.
#[arg(long = "genesis", default_value = "genesis.json")] #[arg(long = "genesis", default_value = "genesis.json")]
pub genesis_file: PathBuf, pub genesis_file: PathBuf,
@@ -77,7 +73,7 @@ pub struct Arguments {
/// This argument controls which private keys the nodes should have access to and be added to /// This argument controls which private keys the nodes should have access to and be added to
/// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set /// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set
/// of the node. /// of the node.
#[arg(long = "private-keys-count", default_value_t = 30)] #[arg(long = "private-keys-count", default_value_t = 100_000)]
pub private_keys_to_add: usize, pub private_keys_to_add: usize,
/// The differential testing leader node implementation. /// The differential testing leader node implementation.
@@ -92,9 +88,22 @@ pub struct Arguments {
#[arg(long = "compile-only")] #[arg(long = "compile-only")]
pub compile_only: Option<TestingPlatform>, pub compile_only: Option<TestingPlatform>,
/// Determines the amount of tests that are executed in parallel. /// Determines the amount of nodes that will be spawned for each chain.
#[arg(long = "workers", default_value = "12")] #[arg(long, default_value = "1")]
pub workers: usize, pub number_of_nodes: usize,
/// Determines the amount of tokio worker threads that will will be used.
#[arg(
long,
default_value_t = std::thread::available_parallelism()
.map(|n| n.get())
.unwrap_or(1)
)]
pub number_of_threads: usize,
/// Determines the amount of concurrent tasks that will be spawned to run tests. Defaults to 10 x the number of nodes.
#[arg(long)]
pub number_concurrent_tasks: Option<usize>,
/// Extract problems back to the test corpus. /// Extract problems back to the test corpus.
#[arg(short, long = "extract-problems")] #[arg(short, long = "extract-problems")]
@@ -111,6 +120,10 @@ pub struct Arguments {
/// By default it uses `eth-rpc` binary found in `$PATH`. /// By default it uses `eth-rpc` binary found in `$PATH`.
#[arg(short = 'p', long = "eth_proxy", default_value = "eth-rpc")] #[arg(short = 'p', long = "eth_proxy", default_value = "eth-rpc")]
pub eth_proxy: PathBuf, pub eth_proxy: PathBuf,
/// Controls if the compilation cache should be invalidated or not.
#[arg(short, long)]
pub invalidate_compilation_cache: bool,
} }
impl Arguments { impl Arguments {
@@ -130,6 +143,13 @@ impl Arguments {
panic!("should have a workdir configured") panic!("should have a workdir configured")
} }
/// Return the number of concurrent tasks to run. This is provided via the
/// `--number-concurrent-tasks` argument, and otherwise defaults to --number-of-nodes * 20.
pub fn number_of_concurrent_tasks(&self) -> usize {
self.number_concurrent_tasks
.unwrap_or(20 * self.number_of_nodes)
}
/// Try to parse `self.account` into a [PrivateKeySigner], /// Try to parse `self.account` into a [PrivateKeySigner],
/// panicing on error. /// panicing on error.
pub fn wallet(&self) -> EthereumWallet { pub fn wallet(&self) -> EthereumWallet {
@@ -159,7 +179,9 @@ impl Default for Arguments {
/// The Solidity compatible node implementation. /// The Solidity compatible node implementation.
/// ///
/// This describes the solutions to be tested against on a high level. /// This describes the solutions to be tested against on a high level.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, ValueEnum, Serialize, Deserialize)] #[derive(
Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, ValueEnum, Serialize, Deserialize,
)]
#[clap(rename_all = "lower")] #[clap(rename_all = "lower")]
pub enum TestingPlatform { pub enum TestingPlatform {
/// The go-ethereum reference full node EVM implementation. /// The go-ethereum reference full node EVM implementation.
+12 -1
View File
@@ -23,10 +23,21 @@ revive-dt-report = { workspace = true }
alloy = { workspace = true } alloy = { workspace = true }
anyhow = { workspace = true } anyhow = { workspace = true }
bson = { workspace = true }
cacache = { workspace = true }
clap = { workspace = true } clap = { workspace = true }
futures = { workspace = true }
indexmap = { workspace = true } indexmap = { workspace = true }
once_cell = { workspace = true }
tokio = { workspace = true }
tracing = { workspace = true } tracing = { workspace = true }
tracing-appender = { workspace = true }
tracing-subscriber = { workspace = true } tracing-subscriber = { workspace = true }
rayon = { workspace = true }
semver = { workspace = true } semver = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
temp-dir = { workspace = true } temp-dir = { workspace = true }
tempfile = { workspace = true }
[lints]
workspace = true
+262
View File
@@ -0,0 +1,262 @@
//! A wrapper around the compiler which allows for caching of compilation artifacts so that they can
//! be reused between runs.
use std::{
collections::HashMap,
path::{Path, PathBuf},
sync::Arc,
};
use futures::FutureExt;
use revive_dt_common::iterators::FilesWithExtensionIterator;
use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler};
use revive_dt_config::Arguments;
use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata};
use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address};
use anyhow::{Error, Result};
use once_cell::sync::Lazy;
use semver::Version;
use serde::{Deserialize, Serialize};
use tokio::sync::{Mutex, RwLock};
use tracing::{Instrument, debug, debug_span, instrument};
use crate::Platform;
pub struct CachedCompiler(ArtifactsCache);
impl CachedCompiler {
pub async fn new(path: impl AsRef<Path>, invalidate_cache: bool) -> Result<Self> {
let mut cache = ArtifactsCache::new(path);
if invalidate_cache {
cache = cache.with_invalidated_cache().await?;
}
Ok(Self(cache))
}
/// Compiles or gets the compilation artifacts from the cache.
#[instrument(
level = "debug",
skip_all,
fields(
metadata_file_path = %metadata_file_path.as_ref().display(),
%mode,
platform = P::config_id().to_string()
),
err
)]
pub async fn compile_contracts<P: Platform>(
&self,
metadata: &Metadata,
metadata_file_path: impl AsRef<Path>,
mode: &Mode,
config: &Arguments,
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
) -> Result<(CompilerOutput, Version)> {
static CACHE_KEY_LOCK: Lazy<RwLock<HashMap<CacheKey, Arc<Mutex<()>>>>> =
Lazy::new(Default::default);
let compiler_version_or_requirement = mode.compiler_version_to_use(config.solc.clone());
let compiler_path = <P::Compiler as SolidityCompiler>::get_compiler_executable(
config,
compiler_version_or_requirement,
)
.await?;
let compiler_version = <P::Compiler as SolidityCompiler>::new(compiler_path.clone())
.version()
.await?;
let cache_key = CacheKey {
platform_key: P::config_id().to_string(),
compiler_version: compiler_version.clone(),
metadata_file_path: metadata_file_path.as_ref().to_path_buf(),
solc_mode: mode.clone(),
};
let compilation_callback = || {
async move {
compile_contracts::<P>(
metadata.directory()?,
compiler_path,
metadata.files_to_compile()?,
mode,
deployed_libraries,
)
.map(|compilation_result| compilation_result.map(CacheValue::new))
.await
}
.instrument(debug_span!(
"Running compilation for the cache key",
cache_key.platform_key = %cache_key.platform_key,
cache_key.compiler_version = %cache_key.compiler_version,
cache_key.metadata_file_path = %cache_key.metadata_file_path.display(),
cache_key.solc_mode = %cache_key.solc_mode,
))
};
let compiled_contracts = match deployed_libraries {
// If deployed libraries have been specified then we will re-compile the contract as it
// means that linking is required in this case.
Some(_) => {
debug!("Deployed libraries defined, recompilation must take place");
debug!("Cache miss");
compilation_callback().await?.compiler_output
}
// If no deployed libraries are specified then we can follow the cached flow and attempt
// to lookup the compilation artifacts in the cache.
None => {
debug!("Deployed libraries undefined, attempting to make use of cache");
// Lock this specific cache key such that we do not get inconsistent state. We want
// that when multiple cases come in asking for the compilation artifacts then they
// don't all trigger a compilation if there's a cache miss. Hence, the lock here.
let read_guard = CACHE_KEY_LOCK.read().await;
let mutex = match read_guard.get(&cache_key).cloned() {
Some(value) => value,
None => {
drop(read_guard);
CACHE_KEY_LOCK
.write()
.await
.entry(cache_key.clone())
.or_default()
.clone()
}
};
let _guard = mutex.lock().await;
self.0
.get_or_insert_with(&cache_key, compilation_callback)
.await
.map(|value| value.compiler_output)?
}
};
Ok((compiled_contracts, compiler_version))
}
}
async fn compile_contracts<P: Platform>(
metadata_directory: impl AsRef<Path>,
compiler_path: impl AsRef<Path>,
mut files_to_compile: impl Iterator<Item = PathBuf>,
mode: &Mode,
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
) -> Result<CompilerOutput> {
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
.with_allowed_extension("sol")
.with_use_cached_fs(true)
.collect::<Vec<_>>();
Compiler::<P::Compiler>::new()
.with_allow_path(metadata_directory)
// Handling the modes
.with_optimization(mode.optimize_setting)
.with_pipeline(mode.pipeline)
// Adding the contract sources to the compiler.
.try_then(|compiler| {
files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path))
})?
// Adding the deployed libraries to the compiler.
.then(|compiler| {
deployed_libraries
.iter()
.flat_map(|value| value.iter())
.map(|(instance, (ident, address, abi))| (instance, ident, address, abi))
.flat_map(|(_, ident, address, _)| {
all_sources_in_dir
.iter()
.map(move |path| (ident, address, path))
})
.fold(compiler, |compiler, (ident, address, path)| {
compiler.with_library(path, ident.as_str(), *address)
})
})
.try_build(compiler_path)
.await
}
struct ArtifactsCache {
path: PathBuf,
}
impl ArtifactsCache {
pub fn new(path: impl AsRef<Path>) -> Self {
Self {
path: path.as_ref().to_path_buf(),
}
}
#[instrument(level = "debug", skip_all, err)]
pub async fn with_invalidated_cache(self) -> Result<Self> {
cacache::clear(self.path.as_path())
.await
.map_err(Into::<Error>::into)?;
Ok(self)
}
#[instrument(level = "debug", skip_all, err)]
pub async fn insert(&self, key: &CacheKey, value: &CacheValue) -> Result<()> {
let key = bson::to_vec(key)?;
let value = bson::to_vec(value)?;
cacache::write(self.path.as_path(), key.encode_hex(), value).await?;
Ok(())
}
pub async fn get(&self, key: &CacheKey) -> Option<CacheValue> {
let key = bson::to_vec(key).ok()?;
let value = cacache::read(self.path.as_path(), key.encode_hex())
.await
.ok()?;
let value = bson::from_slice::<CacheValue>(&value).ok()?;
Some(value)
}
#[instrument(level = "debug", skip_all, err)]
pub async fn get_or_insert_with(
&self,
key: &CacheKey,
callback: impl AsyncFnOnce() -> Result<CacheValue>,
) -> Result<CacheValue> {
match self.get(key).await {
Some(value) => {
debug!("Cache hit");
Ok(value)
}
None => {
debug!("Cache miss");
let value = callback().await?;
self.insert(key, &value).await?;
Ok(value)
}
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
struct CacheKey {
/// The platform name that this artifact was compiled for. For example, this could be EVM or
/// PVM.
platform_key: String,
/// The version of the compiler that was used to compile the artifacts.
compiler_version: Version,
/// The path of the metadata file that the compilation artifacts are for.
metadata_file_path: PathBuf,
/// The mode that the compilation artifacts where compiled with.
solc_mode: Mode,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
struct CacheValue {
/// The compiler output from the compilation run.
compiler_output: CompilerOutput,
}
impl CacheValue {
pub fn new(compiler_output: CompilerOutput) -> Self {
Self { compiler_output }
}
}
File diff suppressed because it is too large Load Diff
+1 -1
View File
@@ -26,7 +26,7 @@ pub trait Platform {
pub struct Geth; pub struct Geth;
impl Platform for Geth { impl Platform for Geth {
type Blockchain = geth::Instance; type Blockchain = geth::GethNode;
type Compiler = solc::Solc; type Compiler = solc::Solc;
fn config_id() -> TestingPlatform { fn config_id() -> TestingPlatform {
+655 -85
View File
@@ -1,48 +1,113 @@
use std::{collections::HashMap, sync::LazyLock}; mod cached_compiler;
use std::{
collections::{BTreeMap, HashMap},
io::{BufWriter, Write, stderr},
path::Path,
sync::{Arc, LazyLock},
time::Instant,
};
use alloy::{
network::{Ethereum, TransactionBuilder},
rpc::types::TransactionRequest,
};
use anyhow::Context;
use clap::Parser; use clap::Parser;
use rayon::{ThreadPoolBuilder, prelude::*}; use futures::stream;
use futures::{Stream, StreamExt};
use indexmap::IndexMap;
use revive_dt_node_interaction::EthereumNode;
use temp_dir::TempDir;
use tokio::{sync::mpsc, try_join};
use tracing::{debug, info, info_span, instrument};
use tracing_appender::non_blocking::WorkerGuard;
use tracing_subscriber::{EnvFilter, FmtSubscriber};
use revive_dt_common::types::Mode;
use revive_dt_compiler::{CompilerOutput, SolidityCompiler};
use revive_dt_config::*; use revive_dt_config::*;
use revive_dt_core::{ use revive_dt_core::{
Geth, Kitchensink, Platform, Geth, Kitchensink, Platform,
driver::{Driver, State}, driver::{CaseDriver, CaseState},
}; };
use revive_dt_format::{corpus::Corpus, metadata::MetadataFile}; use revive_dt_format::{
use revive_dt_node::pool::NodePool; case::{Case, CaseIdx},
corpus::Corpus,
input::{Input, Step},
metadata::{ContractPathAndIdent, MetadataFile},
mode::ParsedMode,
};
use revive_dt_node::{Node, pool::NodePool};
use revive_dt_report::reporter::{Report, Span}; use revive_dt_report::reporter::{Report, Span};
use temp_dir::TempDir;
use tracing::Level; use crate::cached_compiler::CachedCompiler;
use tracing_subscriber::{EnvFilter, FmtSubscriber};
static TEMP_DIR: LazyLock<TempDir> = LazyLock::new(|| TempDir::new().unwrap()); static TEMP_DIR: LazyLock<TempDir> = LazyLock::new(|| TempDir::new().unwrap());
fn main() -> anyhow::Result<()> { /// this represents a single "test"; a mode, path and collection of cases.
let args = init_cli()?; #[derive(Clone, Debug)]
struct Test<'a> {
metadata: &'a MetadataFile,
metadata_file_path: &'a Path,
mode: Mode,
case_idx: CaseIdx,
case: &'a Case,
}
/// This represents the results that we gather from running test cases.
type CaseResult = Result<usize, anyhow::Error>;
fn main() -> anyhow::Result<()> {
let (args, _guard) = init_cli()?;
info!(
leader = args.leader.to_string(),
follower = args.follower.to_string(),
working_directory = %args.directory().display(),
number_of_nodes = args.number_of_nodes,
invalidate_compilation_cache = args.invalidate_compilation_cache,
"Differential testing tool has been initialized"
);
let body = async {
for (corpus, tests) in collect_corpora(&args)? { for (corpus, tests) in collect_corpora(&args)? {
let span = Span::new(corpus, args.clone())?; let span = Span::new(corpus, args.clone())?;
match &args.compile_only { match &args.compile_only {
Some(platform) => compile_corpus(&args, &tests, platform, span), Some(platform) => compile_corpus(&args, &tests, platform, span).await,
None => execute_corpus(&args, &tests, span)?, None => execute_corpus(&args, &tests, span).await?,
} }
Report::save()?; Report::save()?;
} }
Ok(()) Ok(())
};
tokio::runtime::Builder::new_multi_thread()
.worker_threads(args.number_of_threads)
.enable_all()
.build()
.expect("Failed building the Runtime")
.block_on(body)
} }
fn init_cli() -> anyhow::Result<Arguments> { fn init_cli() -> anyhow::Result<(Arguments, WorkerGuard)> {
let (writer, guard) = tracing_appender::non_blocking::NonBlockingBuilder::default()
.lossy(false)
// Assuming that each line contains 255 characters and that each character is one byte, then
// this means that our buffer is about 4GBs large.
.buffered_lines_limit(0x1000000)
.thread_name("buffered writer")
.finish(std::io::stdout());
let subscriber = FmtSubscriber::builder() let subscriber = FmtSubscriber::builder()
.with_thread_ids(true) .with_writer(writer)
.with_thread_names(true) .with_thread_ids(false)
.with_thread_names(false)
.with_env_filter(EnvFilter::from_default_env()) .with_env_filter(EnvFilter::from_default_env())
.with_ansi(false) .with_ansi(false)
.pretty() .pretty()
.finish(); .finish();
tracing::subscriber::set_global_default(subscriber)?; tracing::subscriber::set_global_default(subscriber)?;
info!("Differential testing tool is starting");
let mut args = Arguments::parse(); let mut args = Arguments::parse();
@@ -60,94 +125,568 @@ fn init_cli() -> anyhow::Result<Arguments> {
args.temp_dir = Some(&TEMP_DIR); args.temp_dir = Some(&TEMP_DIR);
} }
} }
tracing::info!("workdir: {}", args.directory().display());
ThreadPoolBuilder::new() Ok((args, guard))
.num_threads(args.workers)
.build_global()?;
Ok(args)
} }
#[instrument(level = "debug", name = "Collecting Corpora", skip_all)]
fn collect_corpora(args: &Arguments) -> anyhow::Result<HashMap<Corpus, Vec<MetadataFile>>> { fn collect_corpora(args: &Arguments) -> anyhow::Result<HashMap<Corpus, Vec<MetadataFile>>> {
let mut corpora = HashMap::new(); let mut corpora = HashMap::new();
for path in &args.corpus { for path in &args.corpus {
let span = info_span!("Processing corpus file", path = %path.display());
let _guard = span.enter();
let corpus = Corpus::try_from_path(path)?; let corpus = Corpus::try_from_path(path)?;
tracing::info!("found corpus: {}", path.display()); info!(
name = corpus.name(),
number_of_contained_paths = corpus.path_count(),
"Deserialized corpus file"
);
let tests = corpus.enumerate_tests(); let tests = corpus.enumerate_tests();
tracing::info!("corpus '{}' contains {} tests", &corpus.name, tests.len());
corpora.insert(corpus, tests); corpora.insert(corpus, tests);
} }
Ok(corpora) Ok(corpora)
} }
fn run_driver<L, F>(args: &Arguments, tests: &[MetadataFile], span: Span) -> anyhow::Result<()> async fn run_driver<L, F>(
args: &Arguments,
metadata_files: &[MetadataFile],
span: Span,
) -> anyhow::Result<()>
where where
L: Platform, L: Platform,
F: Platform, F: Platform,
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
{ {
let leader_nodes = NodePool::<L::Blockchain>::new(args)?; let (report_tx, report_rx) = mpsc::unbounded_channel::<(Test<'_>, CaseResult)>();
let follower_nodes = NodePool::<F::Blockchain>::new(args)?;
tests.par_iter().for_each( let tests = prepare_tests::<L, F>(args, metadata_files);
|MetadataFile { let driver_task = start_driver_task::<L, F>(args, tests, span, report_tx).await?;
content: metadata, let status_reporter_task = start_reporter_task(report_rx);
path: metadata_file_path,
}| {
// Starting a new tracing span for this metadata file. This allows our logs to be clear
// about which metadata file the logs belong to. We can add other information into this
// as well to be able to associate the logs with the correct metadata file and case
// that's being executed.
let tracing_span = tracing::span!(
Level::INFO,
"Running driver",
metadata_file_path = metadata_file_path.display().to_string(),
);
let _guard = tracing_span.enter();
let mut driver = Driver::<L, F>::new( tokio::join!(status_reporter_task, driver_task);
metadata,
args,
leader_nodes.round_robbin(),
follower_nodes.round_robbin(),
);
let execution_result = driver.execute(span);
tracing::info!(
case_success_count = execution_result.successful_cases_count,
case_failure_count = execution_result.failed_cases_count,
"Execution completed"
);
let mut error_count = 0;
for result in execution_result.results.iter() {
if !result.is_success() {
tracing::error!(execution_error = ?result, "Encountered an error");
error_count += 1;
}
}
if error_count == 0 {
tracing::info!("Execution succeeded");
} else {
tracing::info!("Execution failed");
}
},
);
Ok(()) Ok(())
} }
fn execute_corpus(args: &Arguments, tests: &[MetadataFile], span: Span) -> anyhow::Result<()> { fn prepare_tests<'a, L, F>(
args: &Arguments,
metadata_files: &'a [MetadataFile],
) -> impl Stream<Item = Test<'a>>
where
L: Platform,
F: Platform,
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
{
let filtered_tests = metadata_files
.iter()
.flat_map(|metadata_file| {
metadata_file
.cases
.iter()
.enumerate()
.map(move |(case_idx, case)| (metadata_file, case_idx, case))
})
// Flatten over the modes, prefer the case modes over the metadata file modes.
.flat_map(|(metadata_file, case_idx, case)| {
case.modes
.as_ref()
.or(metadata_file.modes.as_ref())
.map(|modes| ParsedMode::many_to_modes(modes.iter()).collect::<Vec<_>>())
.unwrap_or(Mode::all().collect())
.into_iter()
.map(move |mode| (metadata_file, case_idx, case, mode))
})
.fold(
IndexMap::<_, BTreeMap<_, Vec<_>>>::new(),
|mut map, (metadata_file, case_idx, case, mode)| {
let test = Test {
metadata: metadata_file,
metadata_file_path: metadata_file.metadata_file_path.as_path(),
mode: mode.clone(),
case_idx: CaseIdx::new(case_idx),
case,
};
map.entry(mode)
.or_default()
.entry(test.case_idx)
.or_default()
.push(test);
map
},
)
.into_values()
.flatten()
.flat_map(|(_, value)| value.into_iter())
// Filter the test out if the leader and follower do not support the target.
.filter(|test| {
let leader_support =
<L::Blockchain as Node>::matches_target(test.metadata.targets.as_deref());
let follower_support =
<F::Blockchain as Node>::matches_target(test.metadata.targets.as_deref());
let is_allowed = leader_support && follower_support;
if !is_allowed {
debug!(
file_path = %test.metadata.relative_path().display(),
leader_support,
follower_support,
"Target is not supported, throwing metadata file out"
)
}
is_allowed
})
// Filter the test out if the metadata file is ignored.
.filter(|test| {
if test.metadata.ignore.is_some_and(|ignore| ignore) {
debug!(
file_path = %test.metadata.relative_path().display(),
"Metadata file is ignored, throwing case out"
);
false
} else {
true
}
})
// Filter the test case if the case is ignored.
.filter(|test| {
if test.case.ignore.is_some_and(|ignore| ignore) {
debug!(
file_path = %test.metadata.relative_path().display(),
case_idx = %test.case_idx,
"Case is ignored, throwing case out"
);
false
} else {
true
}
})
// Filtering based on the EVM version compatibility
.filter(|test| {
if let Some(evm_version_requirement) = test.metadata.required_evm_version {
let leader_compatibility = evm_version_requirement
.matches(&<L::Blockchain as revive_dt_node::Node>::evm_version());
let follower_compatibility = evm_version_requirement
.matches(&<F::Blockchain as revive_dt_node::Node>::evm_version());
let is_allowed = leader_compatibility && follower_compatibility;
if !is_allowed {
debug!(
file_path = %test.metadata.relative_path().display(),
case_idx = %test.case_idx,
leader_compatibility,
follower_compatibility,
"EVM Version is incompatible, throwing case out"
);
}
is_allowed
} else {
true
}
});
stream::iter(filtered_tests)
// Filter based on the compiler compatibility
.filter_map(move |test| async move {
let leader_support = does_compiler_support_mode::<L>(args, &test.mode)
.await
.ok()
.unwrap_or(false);
let follower_support = does_compiler_support_mode::<F>(args, &test.mode)
.await
.ok()
.unwrap_or(false);
let is_allowed = leader_support && follower_support;
if !is_allowed {
debug!(
file_path = %test.metadata.relative_path().display(),
leader_support,
follower_support,
"Compilers do not support this, throwing case out"
);
}
is_allowed.then_some(test)
})
}
async fn does_compiler_support_mode<P: Platform>(
args: &Arguments,
mode: &Mode,
) -> anyhow::Result<bool> {
let compiler_version_or_requirement = mode.compiler_version_to_use(args.solc.clone());
let compiler_path =
P::Compiler::get_compiler_executable(args, compiler_version_or_requirement).await?;
let compiler_version = P::Compiler::new(compiler_path.clone()).version().await?;
Ok(P::Compiler::supports_mode(
&compiler_version,
mode.optimize_setting,
mode.pipeline,
))
}
async fn start_driver_task<'a, L, F>(
args: &Arguments,
tests: impl Stream<Item = Test<'a>>,
span: Span,
report_tx: mpsc::UnboundedSender<(Test<'a>, CaseResult)>,
) -> anyhow::Result<impl Future<Output = ()>>
where
L: Platform,
F: Platform,
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
{
let leader_nodes = Arc::new(NodePool::<L::Blockchain>::new(args)?);
let follower_nodes = Arc::new(NodePool::<F::Blockchain>::new(args)?);
let number_concurrent_tasks = args.number_of_concurrent_tasks();
let cached_compiler = Arc::new(
CachedCompiler::new(
args.directory().join("compilation_cache"),
args.invalidate_compilation_cache,
)
.await?,
);
Ok(tests.for_each_concurrent(
// We want to limit the concurrent tasks here because:
//
// 1. We don't want to overwhelm the nodes with too many requests, leading to responses timing out.
// 2. We don't want to open too many files at once, leading to the OS running out of file descriptors.
//
// By default, we allow maximum of 10 ongoing requests per node in order to limit (1), and assume that
// this number will automatically be low enough to address (2). The user can override this.
Some(number_concurrent_tasks),
move |test| {
let leader_nodes = leader_nodes.clone();
let follower_nodes = follower_nodes.clone();
let report_tx = report_tx.clone();
let cached_compiler = cached_compiler.clone();
async move {
let leader_node = leader_nodes.round_robbin();
let follower_node = follower_nodes.round_robbin();
let result = handle_case_driver::<L, F>(
test.metadata_file_path,
test.metadata,
test.case_idx,
test.case,
test.mode.clone(),
args,
cached_compiler,
leader_node,
follower_node,
span,
)
.await;
report_tx
.send((test, result))
.expect("Failed to send report");
}
},
))
}
async fn start_reporter_task(mut report_rx: mpsc::UnboundedReceiver<(Test<'_>, CaseResult)>) {
let start = Instant::now();
const GREEN: &str = "\x1B[32m";
const RED: &str = "\x1B[31m";
const COLOUR_RESET: &str = "\x1B[0m";
const BOLD: &str = "\x1B[1m";
const BOLD_RESET: &str = "\x1B[22m";
let mut number_of_successes = 0;
let mut number_of_failures = 0;
let mut failures = vec![];
// Wait for reports to come from our test runner. When the channel closes, this ends.
let mut buf = BufWriter::new(stderr());
while let Some((test, case_result)) = report_rx.recv().await {
let case_name = test.case.name.as_deref().unwrap_or("unnamed_case");
let case_idx = test.case_idx;
let test_path = test.metadata_file_path.display();
let test_mode = test.mode.clone();
match case_result {
Ok(_inputs) => {
number_of_successes += 1;
let _ = writeln!(
buf,
"{GREEN}Case Succeeded:{COLOUR_RESET} {test_path} -> {case_name}:{case_idx} (mode: {test_mode})"
);
}
Err(err) => {
number_of_failures += 1;
let _ = writeln!(
buf,
"{RED}Case Failed:{COLOUR_RESET} {test_path} -> {case_name}:{case_idx} (mode: {test_mode})"
);
failures.push((test, err));
}
}
}
let _ = writeln!(buf,);
let elapsed = start.elapsed();
// Now, log the failures with more complete errors at the bottom, like `cargo test` does, so
// that we don't have to scroll through the entire output to find them.
if !failures.is_empty() {
let _ = writeln!(buf, "{BOLD}Failures:{BOLD_RESET}\n");
for failure in failures {
let (test, err) = failure;
let case_name = test.case.name.as_deref().unwrap_or("unnamed_case");
let case_idx = test.case_idx;
let test_path = test.metadata_file_path.display();
let test_mode = test.mode.clone();
let _ = writeln!(
buf,
"---- {RED}Case Failed:{COLOUR_RESET} {test_path} -> {case_name}:{case_idx} (mode: {test_mode}) ----\n\n{err}\n"
);
}
}
// Summary at the end.
let _ = writeln!(
buf,
"{} cases: {GREEN}{number_of_successes}{COLOUR_RESET} cases succeeded, {RED}{number_of_failures}{COLOUR_RESET} cases failed in {} seconds",
number_of_successes + number_of_failures,
elapsed.as_secs()
);
}
#[allow(clippy::too_many_arguments)]
#[instrument(
level = "info",
name = "Handling Case"
skip_all,
fields(
metadata_file_path = %metadata.relative_path().display(),
mode = %mode,
%case_idx,
case_name = case.name.as_deref().unwrap_or("Unnamed Case"),
leader_node = leader_node.id(),
follower_node = follower_node.id(),
)
)]
async fn handle_case_driver<L, F>(
metadata_file_path: &Path,
metadata: &MetadataFile,
case_idx: CaseIdx,
case: &Case,
mode: Mode,
config: &Arguments,
cached_compiler: Arc<CachedCompiler>,
leader_node: &L::Blockchain,
follower_node: &F::Blockchain,
_: Span,
) -> anyhow::Result<usize>
where
L: Platform,
F: Platform,
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
{
let (
(
CompilerOutput {
contracts: leader_pre_link_contracts,
},
_,
),
(
CompilerOutput {
contracts: follower_pre_link_contracts,
},
_,
),
) = try_join!(
cached_compiler.compile_contracts::<L>(metadata, metadata_file_path, &mode, config, None),
cached_compiler.compile_contracts::<F>(metadata, metadata_file_path, &mode, config, None)
)?;
let mut leader_deployed_libraries = None::<HashMap<_, _>>;
let mut follower_deployed_libraries = None::<HashMap<_, _>>;
let mut contract_sources = metadata.contract_sources()?;
for library_instance in metadata
.libraries
.iter()
.flatten()
.flat_map(|(_, map)| map.values())
{
debug!(%library_instance, "Deploying Library Instance");
let ContractPathAndIdent {
contract_source_path: library_source_path,
contract_ident: library_ident,
} = contract_sources
.remove(library_instance)
.context("Failed to find the contract source")?;
let (leader_code, leader_abi) = leader_pre_link_contracts
.get(&library_source_path)
.and_then(|contracts| contracts.get(library_ident.as_str()))
.context("Declared library was not compiled")?;
let (follower_code, follower_abi) = follower_pre_link_contracts
.get(&library_source_path)
.and_then(|contracts| contracts.get(library_ident.as_str()))
.context("Declared library was not compiled")?;
let leader_code = match alloy::hex::decode(leader_code) {
Ok(code) => code,
Err(error) => {
anyhow::bail!("Failed to hex-decode the byte code {}", error)
}
};
let follower_code = match alloy::hex::decode(follower_code) {
Ok(code) => code,
Err(error) => {
anyhow::bail!("Failed to hex-decode the byte code {}", error)
}
};
// Getting the deployer address from the cases themselves. This is to ensure that we're
// doing the deployments from different accounts and therefore we're not slowed down by
// the nonce.
let deployer_address = case
.steps
.iter()
.filter_map(|step| match step {
Step::FunctionCall(input) => Some(input.caller),
Step::BalanceAssertion(..) => None,
Step::StorageEmptyAssertion(..) => None,
})
.next()
.unwrap_or(Input::default_caller());
let leader_tx = TransactionBuilder::<Ethereum>::with_deploy_code(
TransactionRequest::default().from(deployer_address),
leader_code,
);
let follower_tx = TransactionBuilder::<Ethereum>::with_deploy_code(
TransactionRequest::default().from(deployer_address),
follower_code,
);
let (leader_receipt, follower_receipt) = try_join!(
leader_node.execute_transaction(leader_tx),
follower_node.execute_transaction(follower_tx)
)?;
debug!(
?library_instance,
library_address = ?leader_receipt.contract_address,
"Deployed library to leader"
);
debug!(
?library_instance,
library_address = ?follower_receipt.contract_address,
"Deployed library to follower"
);
let leader_library_address = leader_receipt
.contract_address
.context("Contract deployment didn't return an address")?;
let follower_library_address = follower_receipt
.contract_address
.context("Contract deployment didn't return an address")?;
leader_deployed_libraries.get_or_insert_default().insert(
library_instance.clone(),
(
library_ident.clone(),
leader_library_address,
leader_abi.clone(),
),
);
follower_deployed_libraries.get_or_insert_default().insert(
library_instance.clone(),
(
library_ident,
follower_library_address,
follower_abi.clone(),
),
);
}
let (
(
CompilerOutput {
contracts: leader_post_link_contracts,
},
leader_compiler_version,
),
(
CompilerOutput {
contracts: follower_post_link_contracts,
},
follower_compiler_version,
),
) = try_join!(
cached_compiler.compile_contracts::<L>(
metadata,
metadata_file_path,
&mode,
config,
leader_deployed_libraries.as_ref()
),
cached_compiler.compile_contracts::<F>(
metadata,
metadata_file_path,
&mode,
config,
follower_deployed_libraries.as_ref()
)
)?;
let leader_state = CaseState::<L>::new(
leader_compiler_version,
leader_post_link_contracts,
leader_deployed_libraries.unwrap_or_default(),
);
let follower_state = CaseState::<F>::new(
follower_compiler_version,
follower_post_link_contracts,
follower_deployed_libraries.unwrap_or_default(),
);
let mut driver = CaseDriver::<L, F>::new(
metadata,
case,
leader_node,
follower_node,
leader_state,
follower_state,
);
driver
.execute()
.await
.inspect(|steps_executed| info!(steps_executed, "Case succeeded"))
}
async fn execute_corpus(
args: &Arguments,
tests: &[MetadataFile],
span: Span,
) -> anyhow::Result<()> {
match (&args.leader, &args.follower) { match (&args.leader, &args.follower) {
(TestingPlatform::Geth, TestingPlatform::Kitchensink) => { (TestingPlatform::Geth, TestingPlatform::Kitchensink) => {
run_driver::<Geth, Kitchensink>(args, tests, span)? run_driver::<Geth, Kitchensink>(args, tests, span).await?
} }
(TestingPlatform::Geth, TestingPlatform::Geth) => { (TestingPlatform::Geth, TestingPlatform::Geth) => {
run_driver::<Geth, Geth>(args, tests, span)? run_driver::<Geth, Geth>(args, tests, span).await?
} }
_ => unimplemented!(), _ => unimplemented!(),
} }
@@ -155,24 +694,55 @@ fn execute_corpus(args: &Arguments, tests: &[MetadataFile], span: Span) -> anyho
Ok(()) Ok(())
} }
fn compile_corpus( async fn compile_corpus(
config: &Arguments, config: &Arguments,
tests: &[MetadataFile], tests: &[MetadataFile],
platform: &TestingPlatform, platform: &TestingPlatform,
span: Span, _: Span,
) { ) {
tests.par_iter().for_each(|metadata| { let tests = tests.iter().flat_map(|metadata| {
for mode in &metadata.solc_modes() { metadata
.solc_modes()
.into_iter()
.map(move |solc_mode| (metadata, solc_mode))
});
let file = tempfile::NamedTempFile::new().expect("Failed to create temp file");
let cached_compiler = CachedCompiler::new(file.path(), false)
.await
.map(Arc::new)
.expect("Failed to create the cached compiler");
futures::stream::iter(tests)
.for_each_concurrent(None, |(metadata, mode)| {
let cached_compiler = cached_compiler.clone();
async move {
match platform { match platform {
TestingPlatform::Geth => { TestingPlatform::Geth => {
let mut state = State::<Geth>::new(config, span); let _ = cached_compiler
let _ = state.build_contracts(mode, metadata); .compile_contracts::<Geth>(
metadata,
metadata.metadata_file_path.as_path(),
&mode,
config,
None,
)
.await;
} }
TestingPlatform::Kitchensink => { TestingPlatform::Kitchensink => {
let mut state = State::<Kitchensink>::new(config, span); let _ = cached_compiler
let _ = state.build_contracts(mode, metadata); .compile_contracts::<Kitchensink>(
metadata,
metadata.metadata_file_path.as_path(),
&mode,
config,
None,
)
.await;
} }
};
} }
}); }
})
.await;
} }
+10
View File
@@ -11,11 +11,21 @@ rust-version.workspace = true
[dependencies] [dependencies]
revive-dt-common = { workspace = true } revive-dt-common = { workspace = true }
revive-common = { workspace = true }
alloy = { workspace = true } alloy = { workspace = true }
alloy-primitives = { workspace = true } alloy-primitives = { workspace = true }
alloy-sol-types = { workspace = true } alloy-sol-types = { workspace = true }
anyhow = { workspace = true } anyhow = { workspace = true }
futures = { workspace = true }
regex = { workspace = true }
tracing = { workspace = true } tracing = { workspace = true }
semver = { workspace = true } semver = { workspace = true }
serde = { workspace = true, features = ["derive"] } serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true } serde_json = { workspace = true }
[dev-dependencies]
tokio = { workspace = true }
[lints]
workspace = true
+41 -15
View File
@@ -1,31 +1,50 @@
use serde::Deserialize; use serde::{Deserialize, Serialize};
use revive_dt_common::macros::define_wrapper_type; use revive_dt_common::{macros::define_wrapper_type, types::Mode};
use crate::{ use crate::{
input::{Expected, Input}, input::{Expected, Step},
mode::Mode, mode::ParsedMode,
}; };
#[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)] #[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq)]
pub struct Case { pub struct Case {
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>, pub name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
pub modes: Option<Vec<Mode>>,
pub inputs: Vec<Input>, #[serde(skip_serializing_if = "Option::is_none")]
pub modes: Option<Vec<ParsedMode>>,
#[serde(rename = "inputs")]
pub steps: Vec<Step>,
#[serde(skip_serializing_if = "Option::is_none")]
pub group: Option<String>, pub group: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub expected: Option<Expected>, pub expected: Option<Expected>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ignore: Option<bool>,
} }
impl Case { impl Case {
pub fn inputs_iterator(&self) -> impl Iterator<Item = Input> { #[allow(irrefutable_let_patterns)]
let inputs_len = self.inputs.len(); pub fn steps_iterator(&self) -> impl Iterator<Item = Step> {
self.inputs let steps_len = self.steps.len();
self.steps
.clone() .clone()
.into_iter() .into_iter()
.enumerate() .enumerate()
.map(move |(idx, mut input)| { .map(move |(idx, mut step)| {
if idx + 1 == inputs_len { let Step::FunctionCall(ref mut input) = step else {
return step;
};
if idx + 1 == steps_len {
if input.expected.is_none() { if input.expected.is_none() {
input.expected = self.expected.clone(); input.expected = self.expected.clone();
} }
@@ -35,16 +54,23 @@ impl Case {
// the case? What are we supposed to do with that final expected field on the // the case? What are we supposed to do with that final expected field on the
// case? // case?
input step
} else { } else {
input step
} }
}) })
} }
pub fn solc_modes(&self) -> Vec<Mode> {
match &self.modes {
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
None => Mode::all().collect(),
}
}
} }
define_wrapper_type!( define_wrapper_type!(
/// A wrapper type for the index of test cases found in metadata file. /// A wrapper type for the index of test cases found in metadata file.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct CaseIdx(usize); pub struct CaseIdx(usize) impl Display;
); );
+87 -57
View File
@@ -3,25 +3,28 @@ use std::{
path::{Path, PathBuf}, path::{Path, PathBuf},
}; };
use revive_dt_common::iterators::FilesWithExtensionIterator;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tracing::{debug, info};
use crate::metadata::MetadataFile; use crate::metadata::{Metadata, MetadataFile};
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, Hash)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub struct Corpus { #[serde(untagged)]
pub name: String, pub enum Corpus {
pub path: PathBuf, SinglePath { name: String, path: PathBuf },
MultiplePaths { name: String, paths: Vec<PathBuf> },
} }
impl Corpus { impl Corpus {
/// Try to read and parse the corpus definition file at given `path`. pub fn try_from_path(file_path: impl AsRef<Path>) -> anyhow::Result<Self> {
pub fn try_from_path(path: &Path) -> anyhow::Result<Self> { let mut corpus = File::open(file_path.as_ref())
let file = File::open(path)?; .map_err(anyhow::Error::from)
let mut corpus: Corpus = serde_json::from_reader(file)?; .and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into))?;
// Ensure that the path mentioned in the corpus is relative to the corpus file. for path in corpus.paths_iter_mut() {
// Canonicalizing also helps make the path in any errors unambiguous. *path = file_path
corpus.path = path .as_ref()
.parent() .parent()
.ok_or_else(|| { .ok_or_else(|| {
anyhow::anyhow!("Corpus path '{}' does not point to a file", path.display()) anyhow::anyhow!("Corpus path '{}' does not point to a file", path.display())
@@ -33,67 +36,94 @@ impl Corpus {
path.display() path.display()
) )
})? })?
.join(corpus.path); .join(path.as_path())
}
Ok(corpus) Ok(corpus)
} }
/// Scan the corpus base directory and return all tests found.
pub fn enumerate_tests(&self) -> Vec<MetadataFile> { pub fn enumerate_tests(&self) -> Vec<MetadataFile> {
let mut tests = Vec::new(); let mut tests = self
collect_metadata(&self.path, &mut tests); .paths_iter()
.flat_map(|root_path| {
if !root_path.is_dir() {
Box::new(std::iter::once(root_path.to_path_buf()))
as Box<dyn Iterator<Item = _>>
} else {
Box::new(
FilesWithExtensionIterator::new(root_path)
.with_use_cached_fs(true)
.with_allowed_extension("sol")
.with_allowed_extension("json"),
)
}
.map(move |metadata_file_path| (root_path, metadata_file_path))
})
.filter_map(|(root_path, metadata_file_path)| {
Metadata::try_from_file(&metadata_file_path)
.or_else(|| {
debug!(
discovered_from = %root_path.display(),
metadata_file_path = %metadata_file_path.display(),
"Skipping file since it doesn't contain valid metadata"
);
None
})
.map(|metadata| MetadataFile {
metadata_file_path,
corpus_file_path: root_path.to_path_buf(),
content: metadata,
})
.inspect(|metadata_file| {
debug!(
metadata_file_path = %metadata_file.relative_path().display(),
"Loaded metadata file"
)
})
})
.collect::<Vec<_>>();
tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
info!(
len = tests.len(),
corpus_name = self.name(),
"Found tests in Corpus"
);
tests tests
} }
pub fn name(&self) -> &str {
match self {
Corpus::SinglePath { name, .. } | Corpus::MultiplePaths { name, .. } => name.as_str(),
}
} }
/// Recursively walks `path` and parses any JSON or Solidity file into a test pub fn paths_iter(&self) -> impl Iterator<Item = &Path> {
/// definition [Metadata]. match self {
/// Corpus::SinglePath { path, .. } => {
/// Found tests are inserted into `tests`. Box::new(std::iter::once(path.as_path())) as Box<dyn Iterator<Item = _>>
/// }
/// `path` is expected to be a directory. Corpus::MultiplePaths { paths, .. } => {
pub fn collect_metadata(path: &Path, tests: &mut Vec<MetadataFile>) { Box::new(paths.iter().map(|path| path.as_path())) as Box<dyn Iterator<Item = _>>
if path.is_dir() {
let dir_entry = match std::fs::read_dir(path) {
Ok(dir_entry) => dir_entry,
Err(error) => {
tracing::error!("failed to read dir '{}': {error}", path.display());
return;
} }
};
for entry in dir_entry {
let entry = match entry {
Ok(entry) => entry,
Err(error) => {
tracing::error!("error reading dir entry: {error}");
continue;
} }
};
let path = entry.path();
if path.is_dir() {
collect_metadata(&path, tests);
continue;
} }
if path.is_file() { pub fn paths_iter_mut(&mut self) -> impl Iterator<Item = &mut PathBuf> {
if let Some(metadata) = MetadataFile::try_from_file(&path) { match self {
tests.push(metadata) Corpus::SinglePath { path, .. } => {
Box::new(std::iter::once(path)) as Box<dyn Iterator<Item = _>>
}
Corpus::MultiplePaths { paths, .. } => {
Box::new(paths.iter_mut()) as Box<dyn Iterator<Item = _>>
} }
} }
} }
} else {
let Some(extension) = path.extension() else { pub fn path_count(&self) -> usize {
tracing::error!("Failed to get file extension"); match self {
return; Corpus::SinglePath { .. } => 1,
}; Corpus::MultiplePaths { paths, .. } => paths.len(),
if extension.eq_ignore_ascii_case("sol") || extension.eq_ignore_ascii_case("json") {
if let Some(metadata) = MetadataFile::try_from_file(path) {
tests.push(metadata)
}
} else {
tracing::error!(?extension, "Unsupported file extension");
} }
} }
} }
+344 -251
View File
File diff suppressed because it is too large Load Diff
+236 -68
View File
@@ -1,7 +1,8 @@
use std::{ use std::{
cmp::Ordering,
collections::BTreeMap, collections::BTreeMap,
fmt::Display, fmt::Display,
fs::{File, read_to_string}, fs::File,
ops::Deref, ops::Deref,
path::{Path, PathBuf}, path::{Path, PathBuf},
str::FromStr, str::FromStr,
@@ -9,12 +10,14 @@ use std::{
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use revive_dt_common::{iterators::FilesWithExtensionIterator, macros::define_wrapper_type}; use revive_common::EVMVersion;
use revive_dt_common::{
use crate::{ cached_fs::read_to_string, iterators::FilesWithExtensionIterator, macros::define_wrapper_type,
case::Case, types::Mode,
mode::{Mode, SolcMode},
}; };
use tracing::error;
use crate::{case::Case, mode::ParsedMode};
pub const METADATA_FILE_EXTENSION: &str = "json"; pub const METADATA_FILE_EXTENSION: &str = "json";
pub const SOLIDITY_CASE_FILE_EXTENSION: &str = "sol"; pub const SOLIDITY_CASE_FILE_EXTENSION: &str = "sol";
@@ -22,16 +25,26 @@ pub const SOLIDITY_CASE_COMMENT_MARKER: &str = "//!";
#[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)] #[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)]
pub struct MetadataFile { pub struct MetadataFile {
pub path: PathBuf, /// The path of the metadata file. This will either be a JSON or solidity file.
pub metadata_file_path: PathBuf,
/// This is the path contained within the corpus file. This could either be the path of some dir
/// or could be the actual metadata file path.
pub corpus_file_path: PathBuf,
/// The metadata contained within the file.
pub content: Metadata, pub content: Metadata,
} }
impl MetadataFile { impl MetadataFile {
pub fn try_from_file(path: &Path) -> Option<Self> { pub fn relative_path(&self) -> &Path {
Metadata::try_from_file(path).map(|metadata| Self { if self.corpus_file_path.is_file() {
path: path.to_owned(), &self.corpus_file_path
content: metadata, } else {
}) self.metadata_file_path
.strip_prefix(&self.corpus_file_path)
.unwrap()
}
} }
} }
@@ -43,34 +56,51 @@ impl Deref for MetadataFile {
} }
} }
#[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)] #[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq)]
pub struct Metadata { pub struct Metadata {
pub targets: Option<Vec<String>>, /// A comment on the test case that's added for human-readability.
pub cases: Vec<Case>, #[serde(skip_serializing_if = "Option::is_none")]
pub contracts: Option<BTreeMap<ContractInstance, ContractPathAndIdent>>, pub comment: Option<String>,
// TODO: Convert into wrapper types for clarity.
pub libraries: Option<BTreeMap<PathBuf, BTreeMap<ContractIdent, ContractInstance>>>, #[serde(skip_serializing_if = "Option::is_none")]
pub ignore: Option<bool>, pub ignore: Option<bool>,
pub modes: Option<Vec<Mode>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub targets: Option<Vec<String>>,
pub cases: Vec<Case>,
#[serde(skip_serializing_if = "Option::is_none")]
pub contracts: Option<BTreeMap<ContractInstance, ContractPathAndIdent>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub libraries: Option<BTreeMap<PathBuf, BTreeMap<ContractIdent, ContractInstance>>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub modes: Option<Vec<ParsedMode>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub file_path: Option<PathBuf>, pub file_path: Option<PathBuf>,
/// This field specifies an EVM version requirement that the test case has where the test might
/// be run of the evm version of the nodes match the evm version specified here.
#[serde(skip_serializing_if = "Option::is_none")]
pub required_evm_version: Option<EvmVersionRequirement>,
/// A set of compilation directives that will be passed to the compiler whenever the contracts for
/// the test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`] is
/// just a filter for when a test can run whereas this is an instruction to the compiler.
#[serde(skip_serializing_if = "Option::is_none")]
pub compiler_directives: Option<CompilationDirectives>,
} }
impl Metadata { impl Metadata {
/// Returns the solc modes of this metadata, inserting a default mode if not present. /// Returns the modes that we should test from this metadata.
pub fn solc_modes(&self) -> Vec<SolcMode> { pub fn solc_modes(&self) -> Vec<Mode> {
self.modes match &self.modes {
.to_owned() Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
.unwrap_or_else(|| vec![Mode::Solidity(Default::default())]) None => Mode::all().collect(),
.iter()
.filter_map(|mode| match mode {
Mode::Solidity(solc_mode) => Some(solc_mode),
Mode::Unknown(mode) => {
tracing::debug!("compiler: ignoring unknown mode '{mode}'");
None
} }
})
.cloned()
.collect()
} }
/// Returns the base directory of this metadata. /// Returns the base directory of this metadata.
@@ -126,10 +156,7 @@ impl Metadata {
pub fn try_from_file(path: &Path) -> Option<Self> { pub fn try_from_file(path: &Path) -> Option<Self> {
assert!(path.is_file(), "not a file: {}", path.display()); assert!(path.is_file(), "not a file: {}", path.display());
let Some(file_extension) = path.extension() else { let file_extension = path.extension()?;
tracing::debug!("skipping corpus file: {}", path.display());
return None;
};
if file_extension == METADATA_FILE_EXTENSION { if file_extension == METADATA_FILE_EXTENSION {
return Self::try_from_json(path); return Self::try_from_json(path);
@@ -139,18 +166,12 @@ impl Metadata {
return Self::try_from_solidity(path); return Self::try_from_solidity(path);
} }
tracing::debug!("ignoring invalid corpus file: {}", path.display());
None None
} }
fn try_from_json(path: &Path) -> Option<Self> { fn try_from_json(path: &Path) -> Option<Self> {
let file = File::open(path) let file = File::open(path)
.inspect_err(|error| { .inspect_err(|err| error!(path = %path.display(), %err, "Failed to open file"))
tracing::error!(
"opening JSON test metadata file '{}' error: {error}",
path.display()
);
})
.ok()?; .ok()?;
match serde_json::from_reader::<_, Metadata>(file) { match serde_json::from_reader::<_, Metadata>(file) {
@@ -158,11 +179,8 @@ impl Metadata {
metadata.file_path = Some(path.to_path_buf()); metadata.file_path = Some(path.to_path_buf());
Some(metadata) Some(metadata)
} }
Err(error) => { Err(err) => {
tracing::error!( error!(path = %path.display(), %err, "Deserialization of metadata failed");
"parsing JSON test metadata file '{}' error: {error}",
path.display()
);
None None
} }
} }
@@ -170,12 +188,7 @@ impl Metadata {
fn try_from_solidity(path: &Path) -> Option<Self> { fn try_from_solidity(path: &Path) -> Option<Self> {
let spec = read_to_string(path) let spec = read_to_string(path)
.inspect_err(|error| { .inspect_err(|err| error!(path = %path.display(), %err, "Failed to read file content"))
tracing::error!(
"opening JSON test metadata file '{}' error: {error}",
path.display()
);
})
.ok()? .ok()?
.lines() .lines()
.filter_map(|line| line.strip_prefix(SOLIDITY_CASE_COMMENT_MARKER)) .filter_map(|line| line.strip_prefix(SOLIDITY_CASE_COMMENT_MARKER))
@@ -203,11 +216,8 @@ impl Metadata {
); );
Some(metadata) Some(metadata)
} }
Err(error) => { Err(err) => {
tracing::error!( error!(path = %path.display(), %err, "Failed to deserialize metadata");
"parsing Solidity test metadata file '{}' error: '{error}' from data: {spec}",
path.display()
);
None None
} }
} }
@@ -231,7 +241,9 @@ impl Metadata {
Ok(Box::new(std::iter::once(metadata_file_path.clone()))) Ok(Box::new(std::iter::once(metadata_file_path.clone())))
} else { } else {
Ok(Box::new( Ok(Box::new(
FilesWithExtensionIterator::new(self.directory()?).with_allowed_extension("sol"), FilesWithExtensionIterator::new(self.directory()?)
.with_allowed_extension("sol")
.with_use_cached_fs(true),
)) ))
} }
} }
@@ -245,7 +257,7 @@ define_wrapper_type!(
Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize,
)] )]
#[serde(transparent)] #[serde(transparent)]
pub struct ContractInstance(String); pub struct ContractInstance(String) impl Display;
); );
define_wrapper_type!( define_wrapper_type!(
@@ -256,7 +268,7 @@ define_wrapper_type!(
Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize,
)] )]
#[serde(transparent)] #[serde(transparent)]
pub struct ContractIdent(String); pub struct ContractIdent(String) impl Display;
); );
/// Represents an identifier used for contracts. /// Represents an identifier used for contracts.
@@ -310,17 +322,23 @@ impl FromStr for ContractPathAndIdent {
identifier = Some(next_item.to_owned()) identifier = Some(next_item.to_owned())
} }
} }
let Some(path) = path else { match (path, identifier) {
anyhow::bail!("Path is not defined"); (Some(path), Some(identifier)) => Ok(Self {
}; contract_source_path: PathBuf::from(path),
let Some(identifier) = identifier else { contract_ident: ContractIdent::new(identifier),
anyhow::bail!("Contract identifier is not defined") }),
(None, Some(path)) | (Some(path), None) => {
let Some(identifier) = path.split(".").next().map(ToOwned::to_owned) else {
anyhow::bail!("Failed to find identifier");
}; };
Ok(Self { Ok(Self {
contract_source_path: PathBuf::from(path), contract_source_path: PathBuf::from(path),
contract_ident: ContractIdent::new(identifier), contract_ident: ContractIdent::new(identifier),
}) })
} }
(None, None) => anyhow::bail!("Failed to find the path and identifier"),
}
}
} }
impl TryFrom<String> for ContractPathAndIdent { impl TryFrom<String> for ContractPathAndIdent {
@@ -337,6 +355,156 @@ impl From<ContractPathAndIdent> for String {
} }
} }
/// An EVM version requirement that the test case has. This gets serialized and
/// deserialized from and into [`String`].
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[serde(try_from = "String", into = "String")]
pub struct EvmVersionRequirement {
ordering: Ordering,
or_equal: bool,
evm_version: EVMVersion,
}
impl EvmVersionRequirement {
pub fn new_greater_than_or_equals(version: EVMVersion) -> Self {
Self {
ordering: Ordering::Greater,
or_equal: true,
evm_version: version,
}
}
pub fn new_greater_than(version: EVMVersion) -> Self {
Self {
ordering: Ordering::Greater,
or_equal: false,
evm_version: version,
}
}
pub fn new_equals(version: EVMVersion) -> Self {
Self {
ordering: Ordering::Equal,
or_equal: false,
evm_version: version,
}
}
pub fn new_less_than(version: EVMVersion) -> Self {
Self {
ordering: Ordering::Less,
or_equal: false,
evm_version: version,
}
}
pub fn new_less_than_or_equals(version: EVMVersion) -> Self {
Self {
ordering: Ordering::Less,
or_equal: true,
evm_version: version,
}
}
pub fn matches(&self, other: &EVMVersion) -> bool {
let ordering = other.cmp(&self.evm_version);
ordering == self.ordering || (self.or_equal && matches!(ordering, Ordering::Equal))
}
}
impl Display for EvmVersionRequirement {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let Self {
ordering,
or_equal,
evm_version,
} = self;
match ordering {
Ordering::Less => write!(f, "<")?,
Ordering::Equal => write!(f, "=")?,
Ordering::Greater => write!(f, ">")?,
}
if *or_equal && !matches!(ordering, Ordering::Equal) {
write!(f, "=")?;
}
write!(f, "{evm_version}")
}
}
impl FromStr for EvmVersionRequirement {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.as_bytes() {
[b'>', b'=', remaining @ ..] => Ok(Self {
ordering: Ordering::Greater,
or_equal: true,
evm_version: str::from_utf8(remaining)?.try_into()?,
}),
[b'>', remaining @ ..] => Ok(Self {
ordering: Ordering::Greater,
or_equal: false,
evm_version: str::from_utf8(remaining)?.try_into()?,
}),
[b'<', b'=', remaining @ ..] => Ok(Self {
ordering: Ordering::Less,
or_equal: true,
evm_version: str::from_utf8(remaining)?.try_into()?,
}),
[b'<', remaining @ ..] => Ok(Self {
ordering: Ordering::Less,
or_equal: false,
evm_version: str::from_utf8(remaining)?.try_into()?,
}),
[b'=', remaining @ ..] => Ok(Self {
ordering: Ordering::Equal,
or_equal: false,
evm_version: str::from_utf8(remaining)?.try_into()?,
}),
_ => anyhow::bail!("Invalid EVM version requirement {s}"),
}
}
}
impl TryFrom<String> for EvmVersionRequirement {
type Error = anyhow::Error;
fn try_from(value: String) -> Result<Self, Self::Error> {
value.parse()
}
}
impl From<EvmVersionRequirement> for String {
fn from(value: EvmVersionRequirement) -> Self {
value.to_string()
}
}
/// A set of compilation directives that will be passed to the compiler whenever the contracts for
/// the test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`] is
/// just a filter for when a test can run whereas this is an instruction to the compiler.
/// Defines how the compiler should handle revert strings.
#[derive(
Clone, Debug, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize,
)]
pub struct CompilationDirectives {
/// Defines how the revert strings should be handled.
pub revert_string_handling: Option<RevertString>,
}
/// Defines how the compiler should handle revert strings.
#[derive(
Clone, Debug, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize,
)]
#[serde(rename_all = "camelCase")]
pub enum RevertString {
#[default]
Default,
Debug,
Strip,
VerboseDebug,
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
+238 -82
View File
@@ -1,106 +1,262 @@
use revive_dt_common::types::VersionOrRequirement; use regex::Regex;
use semver::Version; use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
use serde::de::Deserializer;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use std::fmt::Display;
use std::str::FromStr;
use std::sync::LazyLock;
/// Specifies the compilation mode of the test artifact. /// This represents a mode that has been parsed from test metadata.
#[derive(Hash, Debug, Clone, Eq, PartialEq)]
pub enum Mode {
Solidity(SolcMode),
Unknown(String),
}
/// Specify Solidity specific compiler options.
#[derive(Hash, Debug, Default, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct SolcMode {
pub solc_version: Option<semver::VersionReq>,
solc_optimize: Option<bool>,
pub llvm_optimizer_settings: Vec<String>,
}
impl SolcMode {
/// Try to parse a mode string into a solc mode.
/// Returns `None` if the string wasn't a solc YUL mode string.
/// ///
/// The mode string is expected to start with the `Y` ID (YUL ID), /// Mode strings can take the following form (in pseudo-regex):
/// optionally followed by `+` or `-` for the solc optimizer settings.
/// ///
/// Options can be separated by a whitespace contain the following /// ```text
/// - A solc `SemVer version requirement` string /// [YEILV][+-]? (M[0123sz])? <semver>?
/// - One or more `-OX` where X is a supposed to be an LLVM opt mode /// ```
pub fn parse_from_mode_string(mode_string: &str) -> Option<Self> { ///
let mut result = Self::default(); /// We can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].
#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize)]
let mut parts = mode_string.trim().split(" "); #[serde(try_from = "String", into = "String")]
pub struct ParsedMode {
match parts.next()? { pub pipeline: Option<ModePipeline>,
"Y" => {} pub optimize_flag: Option<bool>,
"Y+" => result.solc_optimize = Some(true), pub optimize_setting: Option<ModeOptimizerSetting>,
"Y-" => result.solc_optimize = Some(false), pub version: Option<semver::VersionReq>,
_ => return None,
} }
for part in parts { impl FromStr for ParsedMode {
if let Ok(solc_version) = semver::VersionReq::parse(part) { type Err = anyhow::Error;
result.solc_version = Some(solc_version); fn from_str(s: &str) -> Result<Self, Self::Err> {
continue; static REGEX: LazyLock<Regex> = LazyLock::new(|| {
} Regex::new(r"(?x)
if let Some(level) = part.strip_prefix("-O") { ^
result.llvm_optimizer_settings.push(level.to_string()); (?:(?P<pipeline>[YEILV])(?P<optimize_flag>[+-])?)? # Pipeline to use eg Y, E+, E-
continue; \s*
} (?P<optimize_setting>M[a-zA-Z0-9])? # Optimize setting eg M0, Ms, Mz
panic!("the YUL mode string {mode_string} failed to parse, invalid part: {part}") \s*
} (?P<version>[>=<]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8
$
").unwrap()
});
Some(result) let Some(caps) = REGEX.captures(s) else {
} anyhow::bail!("Cannot parse mode '{s}' from string");
/// Returns whether to enable the solc optimizer.
pub fn solc_optimize(&self) -> bool {
self.solc_optimize.unwrap_or(true)
}
/// Calculate the latest matching solc patch version. Returns:
/// - `latest_supported` if no version request was specified.
/// - A matching version with the same minor version as `latest_supported`, if any.
/// - `None` if no minor version of the `latest_supported` version matches.
pub fn last_patch_version(&self, latest_supported: &Version) -> Option<Version> {
let Some(version_req) = self.solc_version.as_ref() else {
return Some(latest_supported.to_owned());
}; };
// lgtm let pipeline = match caps.name("pipeline") {
for patch in (0..latest_supported.patch + 1).rev() { Some(m) => Some(ModePipeline::from_str(m.as_str())?),
let version = Version::new(0, latest_supported.minor, patch); None => None,
if version_req.matches(&version) { };
return Some(version);
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
let optimize_setting = match caps.name("optimize_setting") {
Some(m) => Some(ModeOptimizerSetting::from_str(m.as_str())?),
None => None,
};
let version = match caps.name("version") {
Some(m) => Some(semver::VersionReq::parse(m.as_str()).map_err(|e| {
anyhow::anyhow!("Cannot parse the version requirement '{}': {e}", m.as_str())
})?),
None => None,
};
Ok(ParsedMode {
pipeline,
optimize_flag,
optimize_setting,
version,
})
} }
} }
None impl Display for ParsedMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut has_written = false;
if let Some(pipeline) = self.pipeline {
pipeline.fmt(f)?;
if let Some(optimize_flag) = self.optimize_flag {
f.write_str(if optimize_flag { "+" } else { "-" })?;
}
has_written = true;
} }
/// Resolves the [`SolcMode`]'s solidity version requirement into a [`VersionOrRequirement`] if if let Some(optimize_setting) = self.optimize_setting {
/// the requirement is present on the object. Otherwise, the passed default version is used. if has_written {
pub fn compiler_version_to_use(&self, default: Version) -> VersionOrRequirement { f.write_str(" ")?;
match self.solc_version {
Some(ref requirement) => requirement.clone().into(),
None => default.into(),
} }
optimize_setting.fmt(f)?;
has_written = true;
}
if let Some(version) = &self.version {
if has_written {
f.write_str(" ")?;
}
version.fmt(f)?;
}
Ok(())
} }
} }
impl<'de> Deserialize<'de> for Mode { impl From<ParsedMode> for String {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> fn from(parsed_mode: ParsedMode) -> Self {
parsed_mode.to_string()
}
}
impl TryFrom<String> for ParsedMode {
type Error = anyhow::Error;
fn try_from(value: String) -> Result<Self, Self::Error> {
ParsedMode::from_str(&value)
}
}
impl ParsedMode {
/// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try.
pub fn to_modes(&self) -> impl Iterator<Item = Mode> {
let pipeline_iter = self.pipeline.as_ref().map_or_else(
|| EitherIter::A(ModePipeline::test_cases()),
|p| EitherIter::B(std::iter::once(*p)),
);
let optimize_flag_setting = self.optimize_flag.map(|flag| {
if flag {
ModeOptimizerSetting::M3
} else {
ModeOptimizerSetting::M0
}
});
let optimize_flag_iter = match optimize_flag_setting {
Some(setting) => EitherIter::A(std::iter::once(setting)),
None => EitherIter::B(ModeOptimizerSetting::test_cases()),
};
let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else(
|| EitherIter::A(optimize_flag_iter),
|s| EitherIter::B(std::iter::once(*s)),
);
pipeline_iter.flat_map(move |pipeline| {
optimize_settings_iter
.clone()
.map(move |optimize_setting| Mode {
pipeline,
optimize_setting,
version: self.version.clone(),
})
})
}
/// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s.
/// This avoids any duplicate entries.
pub fn many_to_modes<'a>(
parsed: impl Iterator<Item = &'a ParsedMode>,
) -> impl Iterator<Item = Mode> {
let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect();
modes.into_iter()
}
}
/// An iterator that could be either of two iterators.
#[derive(Clone, Debug)]
enum EitherIter<A, B> {
A(A),
B(B),
}
impl<A, B> Iterator for EitherIter<A, B>
where where
D: Deserializer<'de>, A: Iterator,
B: Iterator<Item = A::Item>,
{ {
let mode_string = String::deserialize(deserializer)?; type Item = A::Item;
fn next(&mut self) -> Option<Self::Item> {
if let Some(solc_mode) = SolcMode::parse_from_mode_string(&mode_string) { match self {
return Ok(Self::Solidity(solc_mode)); EitherIter::A(iter) => iter.next(),
EitherIter::B(iter) => iter.next(),
}
}
} }
Ok(Self::Unknown(mode_string)) #[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parsed_mode_from_str() {
let strings = vec![
("Mz", "Mz"),
("Y", "Y"),
("Y+", "Y+"),
("Y-", "Y-"),
("E", "E"),
("E+", "E+"),
("E-", "E-"),
("Y M0", "Y M0"),
("Y M1", "Y M1"),
("Y M2", "Y M2"),
("Y M3", "Y M3"),
("Y Ms", "Y Ms"),
("Y Mz", "Y Mz"),
("E M0", "E M0"),
("E M1", "E M1"),
("E M2", "E M2"),
("E M3", "E M3"),
("E Ms", "E Ms"),
("E Mz", "E Mz"),
// When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning)
("Y 0.8.0", "Y ^0.8.0"),
("E+ 0.8.0", "E+ ^0.8.0"),
("Y M3 >=0.8.0", "Y M3 >=0.8.0"),
("E Mz <0.7.0", "E Mz <0.7.0"),
// We can parse +- _and_ M1/M2 but the latter takes priority.
("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"),
("E- M2 0.7.0", "E- M2 ^0.7.0"),
// We don't see this in the wild but it is parsed.
("<=0.8", "<=0.8"),
];
for (actual, expected) in strings {
let parsed = ParsedMode::from_str(actual)
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
assert_eq!(
expected,
parsed.to_string(),
"Mode string '{actual}' did not parse to '{expected}': got '{parsed}'"
);
}
}
#[test]
fn test_parsed_mode_to_test_modes() {
let strings = vec![
("Mz", vec!["Y Mz", "E Mz"]),
("Y", vec!["Y M0", "Y M3"]),
("E", vec!["E M0", "E M3"]),
("Y+", vec!["Y M3"]),
("Y-", vec!["Y M0"]),
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
(
"<=0.8",
vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"],
),
];
for (actual, expected) in strings {
let parsed = ParsedMode::from_str(actual)
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect();
let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect();
assert_eq!(
expected_set, actual_set,
"Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'"
);
}
} }
} }
+134 -7
View File
@@ -1,30 +1,157 @@
use std::collections::HashMap;
use alloy::eips::BlockNumberOrTag; use alloy::eips::BlockNumberOrTag;
use alloy::json_abi::JsonAbi;
use alloy::primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, U256}; use alloy::primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, U256};
use alloy_primitives::TxHash;
use anyhow::Result; use anyhow::Result;
use crate::metadata::{ContractIdent, ContractInstance};
/// A trait of the interface are required to implement to be used by the resolution logic that this /// A trait of the interface are required to implement to be used by the resolution logic that this
/// crate implements to go from string calldata and into the bytes calldata. /// crate implements to go from string calldata and into the bytes calldata.
pub trait ResolverApi { pub trait ResolverApi {
/// Returns the ID of the chain that the node is on. /// Returns the ID of the chain that the node is on.
fn chain_id(&self) -> Result<ChainId>; fn chain_id(&self) -> impl Future<Output = Result<ChainId>>;
/// Returns the gas price for the specified transaction.
fn transaction_gas_price(&self, tx_hash: &TxHash) -> impl Future<Output = Result<u128>>;
// TODO: This is currently a u128 due to Kitchensink needing more than 64 bits for its gas limit // TODO: This is currently a u128 due to Kitchensink needing more than 64 bits for its gas limit
// when we implement the changes to the gas we need to adjust this to be a u64. // when we implement the changes to the gas we need to adjust this to be a u64.
/// Returns the gas limit of the specified block. /// Returns the gas limit of the specified block.
fn block_gas_limit(&self, number: BlockNumberOrTag) -> Result<u128>; fn block_gas_limit(&self, number: BlockNumberOrTag) -> impl Future<Output = Result<u128>>;
/// Returns the coinbase of the specified block. /// Returns the coinbase of the specified block.
fn block_coinbase(&self, number: BlockNumberOrTag) -> Result<Address>; fn block_coinbase(&self, number: BlockNumberOrTag) -> impl Future<Output = Result<Address>>;
/// Returns the difficulty of the specified block. /// Returns the difficulty of the specified block.
fn block_difficulty(&self, number: BlockNumberOrTag) -> Result<U256>; fn block_difficulty(&self, number: BlockNumberOrTag) -> impl Future<Output = Result<U256>>;
/// Returns the base fee of the specified block.
fn block_base_fee(&self, number: BlockNumberOrTag) -> impl Future<Output = Result<u64>>;
/// Returns the hash of the specified block. /// Returns the hash of the specified block.
fn block_hash(&self, number: BlockNumberOrTag) -> Result<BlockHash>; fn block_hash(&self, number: BlockNumberOrTag) -> impl Future<Output = Result<BlockHash>>;
/// Returns the timestamp of the specified block, /// Returns the timestamp of the specified block,
fn block_timestamp(&self, number: BlockNumberOrTag) -> Result<BlockTimestamp>; fn block_timestamp(
&self,
number: BlockNumberOrTag,
) -> impl Future<Output = Result<BlockTimestamp>>;
/// Returns the number of the last block. /// Returns the number of the last block.
fn last_block_number(&self) -> Result<BlockNumber>; fn last_block_number(&self) -> impl Future<Output = Result<BlockNumber>>;
}
#[derive(Clone, Copy, Debug, Default)]
/// Contextual information required by the code that's performing the resolution.
pub struct ResolutionContext<'a> {
/// When provided the contracts provided here will be used for resolutions.
deployed_contracts: Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
/// When provided the variables in here will be used for performing resolutions.
variables: Option<&'a HashMap<String, U256>>,
/// When provided this block number will be treated as the tip of the chain.
block_number: Option<&'a BlockNumber>,
/// When provided the resolver will use this transaction hash for all of its resolutions.
transaction_hash: Option<&'a TxHash>,
}
impl<'a> ResolutionContext<'a> {
pub fn new() -> Self {
Default::default()
}
pub fn new_from_parts(
deployed_contracts: impl Into<
Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
>,
variables: impl Into<Option<&'a HashMap<String, U256>>>,
block_number: impl Into<Option<&'a BlockNumber>>,
transaction_hash: impl Into<Option<&'a TxHash>>,
) -> Self {
Self {
deployed_contracts: deployed_contracts.into(),
variables: variables.into(),
block_number: block_number.into(),
transaction_hash: transaction_hash.into(),
}
}
pub fn with_deployed_contracts(
mut self,
deployed_contracts: impl Into<
Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
>,
) -> Self {
self.deployed_contracts = deployed_contracts.into();
self
}
pub fn with_variables(
mut self,
variables: impl Into<Option<&'a HashMap<String, U256>>>,
) -> Self {
self.variables = variables.into();
self
}
pub fn with_block_number(mut self, block_number: impl Into<Option<&'a BlockNumber>>) -> Self {
self.block_number = block_number.into();
self
}
pub fn with_transaction_hash(
mut self,
transaction_hash: impl Into<Option<&'a TxHash>>,
) -> Self {
self.transaction_hash = transaction_hash.into();
self
}
pub fn resolve_block_number(&self, number: BlockNumberOrTag) -> BlockNumberOrTag {
match self.block_number {
Some(block_number) => match number {
BlockNumberOrTag::Latest => BlockNumberOrTag::Number(*block_number),
n @ (BlockNumberOrTag::Finalized
| BlockNumberOrTag::Safe
| BlockNumberOrTag::Earliest
| BlockNumberOrTag::Pending
| BlockNumberOrTag::Number(_)) => n,
},
None => number,
}
}
pub fn deployed_contract(
&self,
instance: &ContractInstance,
) -> Option<&(ContractIdent, Address, JsonAbi)> {
self.deployed_contracts
.and_then(|deployed_contracts| deployed_contracts.get(instance))
}
pub fn deployed_contract_address(&self, instance: &ContractInstance) -> Option<&Address> {
self.deployed_contract(instance).map(|(_, a, _)| a)
}
pub fn deployed_contract_abi(&self, instance: &ContractInstance) -> Option<&JsonAbi> {
self.deployed_contract(instance).map(|(_, _, a)| a)
}
pub fn variable(&self, name: impl AsRef<str>) -> Option<&U256> {
self.variables
.and_then(|variables| variables.get(name.as_ref()))
}
pub fn tip_block_number(&self) -> Option<&'a BlockNumber> {
self.block_number
}
pub fn transaction_hash(&self) -> Option<&'a TxHash> {
self.transaction_hash
}
} }
+3
View File
@@ -11,3 +11,6 @@ rust-version.workspace = true
[dependencies] [dependencies]
alloy = { workspace = true } alloy = { workspace = true }
anyhow = { workspace = true } anyhow = { workspace = true }
[lints]
workspace = true
+18 -4
View File
@@ -1,21 +1,35 @@
//! This crate implements all node interactions. //! This crate implements all node interactions.
use alloy::primitives::{Address, StorageKey, U256};
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace}; use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace};
use alloy::rpc::types::{TransactionReceipt, TransactionRequest}; use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest};
use anyhow::Result; use anyhow::Result;
/// An interface for all interactions with Ethereum compatible nodes. /// An interface for all interactions with Ethereum compatible nodes.
pub trait EthereumNode { pub trait EthereumNode {
/// Execute the [TransactionRequest] and return a [TransactionReceipt]. /// Execute the [TransactionRequest] and return a [TransactionReceipt].
fn execute_transaction(&self, transaction: TransactionRequest) -> Result<TransactionReceipt>; fn execute_transaction(
&self,
transaction: TransactionRequest,
) -> impl Future<Output = Result<TransactionReceipt>>;
/// Trace the transaction in the [TransactionReceipt] and return a [GethTrace]. /// Trace the transaction in the [TransactionReceipt] and return a [GethTrace].
fn trace_transaction( fn trace_transaction(
&self, &self,
receipt: &TransactionReceipt, receipt: &TransactionReceipt,
trace_options: GethDebugTracingOptions, trace_options: GethDebugTracingOptions,
) -> Result<GethTrace>; ) -> impl Future<Output = Result<GethTrace>>;
/// Returns the state diff of the transaction hash in the [TransactionReceipt]. /// Returns the state diff of the transaction hash in the [TransactionReceipt].
fn state_diff(&self, receipt: &TransactionReceipt) -> Result<DiffMode>; fn state_diff(&self, receipt: &TransactionReceipt) -> impl Future<Output = Result<DiffMode>>;
/// Returns the balance of the provided [`Address`] back.
fn balance_of(&self, address: Address) -> impl Future<Output = Result<U256>>;
/// Returns the latest storage proof of the provided [`Address`]
fn latest_state_proof(
&self,
address: Address,
keys: Vec<StorageKey>,
) -> impl Future<Output = Result<EIP1186AccountProofResponse>>;
} }
+4
View File
@@ -14,6 +14,7 @@ alloy = { workspace = true }
tracing = { workspace = true } tracing = { workspace = true }
tokio = { workspace = true } tokio = { workspace = true }
revive-common = { workspace = true }
revive-dt-common = { workspace = true } revive-dt-common = { workspace = true }
revive-dt-config = { workspace = true } revive-dt-config = { workspace = true }
revive-dt-format = { workspace = true } revive-dt-format = { workspace = true }
@@ -28,3 +29,6 @@ sp-runtime = { workspace = true }
[dev-dependencies] [dev-dependencies]
temp-dir = { workspace = true } temp-dir = { workspace = true }
tokio = { workspace = true } tokio = { workspace = true }
[lints]
workspace = true
+264 -195
View File
@@ -3,9 +3,13 @@
use std::{ use std::{
fs::{File, OpenOptions, create_dir_all, remove_dir_all}, fs::{File, OpenOptions, create_dir_all, remove_dir_all},
io::{BufRead, BufReader, Read, Write}, io::{BufRead, BufReader, Read, Write},
ops::ControlFlow,
path::PathBuf, path::PathBuf,
process::{Child, Command, Stdio}, process::{Child, Command, Stdio},
sync::atomic::{AtomicU32, Ordering}, sync::{
Arc,
atomic::{AtomicU32, Ordering},
},
time::{Duration, Instant}, time::{Duration, Instant},
}; };
@@ -13,23 +17,31 @@ use alloy::{
eips::BlockNumberOrTag, eips::BlockNumberOrTag,
genesis::{Genesis, GenesisAccount}, genesis::{Genesis, GenesisAccount},
network::{Ethereum, EthereumWallet, NetworkWallet}, network::{Ethereum, EthereumWallet, NetworkWallet},
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, FixedBytes, U256}, primitives::{
Address, BlockHash, BlockNumber, BlockTimestamp, FixedBytes, StorageKey, TxHash, U256,
},
providers::{ providers::{
Provider, ProviderBuilder, Provider, ProviderBuilder,
ext::DebugApi, ext::DebugApi,
fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller}, fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller},
}, },
rpc::types::{ rpc::types::{
TransactionReceipt, TransactionRequest, EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame}, trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
}, },
signers::local::PrivateKeySigner, signers::local::PrivateKeySigner,
}; };
use revive_dt_common::concepts::BlockingExecutor; use anyhow::Context;
use revive_common::EVMVersion;
use tracing::{Instrument, instrument};
use revive_dt_common::{
fs::clear_directory,
futures::{PollingWaitBehavior, poll},
};
use revive_dt_config::Arguments; use revive_dt_config::Arguments;
use revive_dt_format::traits::ResolverApi; use revive_dt_format::traits::ResolverApi;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
use tracing::Level;
use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE}; use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE};
@@ -43,7 +55,8 @@ static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
/// ///
/// Prunes the child process and the base directory on drop. /// Prunes the child process and the base directory on drop.
#[derive(Debug)] #[derive(Debug)]
pub struct Instance { #[allow(clippy::type_complexity)]
pub struct GethNode {
connection_string: String, connection_string: String,
base_directory: PathBuf, base_directory: PathBuf,
data_directory: PathBuf, data_directory: PathBuf,
@@ -51,10 +64,10 @@ pub struct Instance {
geth: PathBuf, geth: PathBuf,
id: u32, id: u32,
handle: Option<Child>, handle: Option<Child>,
network_id: u64,
start_timeout: u64, start_timeout: u64,
wallet: EthereumWallet, wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
chain_id_filler: ChainIdFiller,
/// This vector stores [`File`] objects that we use for logging which we want to flush when the /// This vector stores [`File`] objects that we use for logging which we want to flush when the
/// node object is dropped. We do not store them in a structured fashion at the moment (in /// node object is dropped. We do not store them in a structured fashion at the moment (in
/// separate fields) as the logic that we need to apply to them is all the same regardless of /// separate fields) as the logic that we need to apply to them is all the same regardless of
@@ -62,7 +75,7 @@ pub struct Instance {
logs_file_to_flush: Vec<File>, logs_file_to_flush: Vec<File>,
} }
impl Instance { impl GethNode {
const BASE_DIRECTORY: &str = "geth"; const BASE_DIRECTORY: &str = "geth";
const DATA_DIRECTORY: &str = "data"; const DATA_DIRECTORY: &str = "data";
const LOGS_DIRECTORY: &str = "logs"; const LOGS_DIRECTORY: &str = "logs";
@@ -77,10 +90,17 @@ impl Instance {
const GETH_STDERR_LOG_FILE_NAME: &str = "node_stderr.log"; const GETH_STDERR_LOG_FILE_NAME: &str = "node_stderr.log";
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress"; const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet";
const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60);
const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60);
/// Create the node directory and call `geth init` to configure the genesis. /// Create the node directory and call `geth init` to configure the genesis.
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn init(&mut self, genesis: String) -> anyhow::Result<&mut Self> { fn init(&mut self, genesis: String) -> anyhow::Result<&mut Self> {
let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory);
create_dir_all(&self.base_directory)?; create_dir_all(&self.base_directory)?;
create_dir_all(&self.logs_directory)?; create_dir_all(&self.logs_directory)?;
@@ -99,6 +119,8 @@ impl Instance {
serde_json::to_writer(File::create(&genesis_path)?, &genesis)?; serde_json::to_writer(File::create(&genesis_path)?, &genesis)?;
let mut child = Command::new(&self.geth) let mut child = Command::new(&self.geth)
.arg("--state.scheme")
.arg("hash")
.arg("init") .arg("init")
.arg("--datadir") .arg("--datadir")
.arg(&self.data_directory) .arg(&self.data_directory)
@@ -124,7 +146,7 @@ impl Instance {
/// Spawn the go-ethereum node child process. /// Spawn the go-ethereum node child process.
/// ///
/// [Instance::init] must be called prior. /// [Instance::init] must be called prior.
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn spawn_process(&mut self) -> anyhow::Result<&mut Self> { fn spawn_process(&mut self) -> anyhow::Result<&mut Self> {
// This is the `OpenOptions` that we wish to use for all of the log files that we will be // This is the `OpenOptions` that we wish to use for all of the log files that we will be
// opening in this method. We need to construct it in this way to: // opening in this method. We need to construct it in this way to:
@@ -147,8 +169,6 @@ impl Instance {
.arg(&self.data_directory) .arg(&self.data_directory)
.arg("--ipcpath") .arg("--ipcpath")
.arg(&self.connection_string) .arg(&self.connection_string)
.arg("--networkid")
.arg(self.network_id.to_string())
.arg("--nodiscover") .arg("--nodiscover")
.arg("--maxpeers") .arg("--maxpeers")
.arg("0") .arg("0")
@@ -156,6 +176,12 @@ impl Instance {
.arg("0") .arg("0")
.arg("--cache.blocklogs") .arg("--cache.blocklogs")
.arg("512") .arg("512")
.arg("--state.scheme")
.arg("hash")
.arg("--syncmode")
.arg("full")
.arg("--gcmode")
.arg("archive")
.stderr(stderr_logs_file.try_clone()?) .stderr(stderr_logs_file.try_clone()?)
.stdout(stdout_logs_file.try_clone()?) .stdout(stdout_logs_file.try_clone()?)
.spawn()? .spawn()?
@@ -176,7 +202,7 @@ impl Instance {
/// Wait for the g-ethereum node child process getting ready. /// Wait for the g-ethereum node child process getting ready.
/// ///
/// [Instance::spawn_process] must be called priorly. /// [Instance::spawn_process] must be called priorly.
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn wait_ready(&mut self) -> anyhow::Result<&mut Self> { fn wait_ready(&mut self) -> anyhow::Result<&mut Self> {
let start_time = Instant::now(); let start_time = Instant::now();
@@ -189,6 +215,7 @@ impl Instance {
let maximum_wait_time = Duration::from_millis(self.start_timeout); let maximum_wait_time = Duration::from_millis(self.start_timeout);
let mut stderr = BufReader::new(logs_file).lines(); let mut stderr = BufReader::new(logs_file).lines();
let mut lines = vec![];
loop { loop {
if let Some(Ok(line)) = stderr.next() { if let Some(Ok(line)) = stderr.next() {
if line.contains(Self::ERROR_MARKER) { if line.contains(Self::ERROR_MARKER) {
@@ -197,247 +224,281 @@ impl Instance {
if line.contains(Self::READY_MARKER) { if line.contains(Self::READY_MARKER) {
return Ok(self); return Ok(self);
} }
lines.push(line);
} }
if Instant::now().duration_since(start_time) > maximum_wait_time { if Instant::now().duration_since(start_time) > maximum_wait_time {
anyhow::bail!("Timeout in starting geth"); anyhow::bail!(
"Timeout in starting geth: took longer than {}ms. stdout:\n\n{}\n",
self.start_timeout,
lines.join("\n")
);
} }
} }
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id), level = Level::TRACE)] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn geth_stdout_log_file_path(&self) -> PathBuf { fn geth_stdout_log_file_path(&self) -> PathBuf {
self.logs_directory.join(Self::GETH_STDOUT_LOG_FILE_NAME) self.logs_directory.join(Self::GETH_STDOUT_LOG_FILE_NAME)
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id), level = Level::TRACE)] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn geth_stderr_log_file_path(&self) -> PathBuf { fn geth_stderr_log_file_path(&self) -> PathBuf {
self.logs_directory.join(Self::GETH_STDERR_LOG_FILE_NAME) self.logs_directory.join(Self::GETH_STDERR_LOG_FILE_NAME)
} }
fn provider( async fn provider(
&self, &self,
) -> impl Future< ) -> anyhow::Result<FillProvider<impl TxFiller<Ethereum>, impl Provider<Ethereum>, Ethereum>>
Output = anyhow::Result< {
FillProvider<impl TxFiller<Ethereum>, impl Provider<Ethereum>, Ethereum>,
>,
> + 'static {
let connection_string = self.connection_string();
let wallet = self.wallet.clone();
// Note: We would like all providers to make use of the same nonce manager so that we have
// monotonically increasing nonces that are cached. The cached nonce manager uses Arc's in
// its implementation and therefore it means that when we clone it then it still references
// the same state.
let nonce_manager = self.nonce_manager.clone();
Box::pin(async move {
ProviderBuilder::new() ProviderBuilder::new()
.disable_recommended_fillers() .disable_recommended_fillers()
.filler(FallbackGasFiller::new(500_000_000, 500_000_000, 1)) .filler(FallbackGasFiller::new(
.filler(ChainIdFiller::default()) 25_000_000,
.filler(NonceFiller::new(nonce_manager)) 1_000_000_000,
.wallet(wallet) 1_000_000_000,
.connect(&connection_string) ))
.filler(self.chain_id_filler.clone())
.filler(NonceFiller::new(self.nonce_manager.clone()))
.wallet(self.wallet.clone())
.connect(&self.connection_string)
.await .await
.map_err(Into::into) .map_err(Into::into)
})
} }
} }
impl EthereumNode for Instance { impl EthereumNode for GethNode {
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] #[instrument(
fn execute_transaction( level = "info",
skip_all,
fields(geth_node_id = self.id, connection_string = self.connection_string),
err,
)]
async fn execute_transaction(
&self, &self,
transaction: TransactionRequest, transaction: TransactionRequest,
) -> anyhow::Result<alloy::rpc::types::TransactionReceipt> { ) -> anyhow::Result<alloy::rpc::types::TransactionReceipt> {
let provider = self.provider(); let provider = self.provider().await?;
BlockingExecutor::execute(async move {
let outer_span = tracing::debug_span!("Submitting transaction", ?transaction);
let _outer_guard = outer_span.enter();
let provider = provider.await?; let pending_transaction = provider.send_transaction(transaction).await.inspect_err(
|err| tracing::error!(%err, "Encountered an error when submitting the transaction"),
)?;
let transaction_hash = *pending_transaction.tx_hash();
let pending_transaction = provider.send_transaction(transaction).await?; // The following is a fix for the "transaction indexing is in progress" error that we used
let transaction_hash = pending_transaction.tx_hash(); // to get. You can find more information on this in the following GH issue in geth
let span = tracing::info_span!("Awaiting transaction receipt", ?transaction_hash);
let _guard = span.enter();
// The following is a fix for the "transaction indexing is in progress" error that we
// used to get. You can find more information on this in the following GH issue in geth
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on, // https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
// before we can get the receipt of the transaction it needs to have been indexed by the // before we can get the receipt of the transaction it needs to have been indexed by the
// node's indexer. Just because the transaction has been confirmed it doesn't mean that // node's indexer. Just because the transaction has been confirmed it doesn't mean that it
// it has been indexed. When we call alloy's `get_receipt` it checks if the transaction // has been indexed. When we call alloy's `get_receipt` it checks if the transaction was
// was confirmed. If it has been, then it will call `eth_getTransactionReceipt` method // confirmed. If it has been, then it will call `eth_getTransactionReceipt` method which
// which _might_ return the above error if the tx has not yet been indexed yet. So, we // _might_ return the above error if the tx has not yet been indexed yet. So, we need to
// need to implement a retry mechanism for the receipt to keep retrying to get it until // implement a retry mechanism for the receipt to keep retrying to get it until it
// it eventually works, but we only do that if the error we get back is the "transaction // eventually works, but we only do that if the error we get back is the "transaction
// indexing is in progress" error or if the receipt is None. // indexing is in progress" error or if the receipt is None.
// //
// Getting the transaction indexed and taking a receipt can take a long time especially // Getting the transaction indexed and taking a receipt can take a long time especially when
// when a lot of transactions are being submitted to the node. Thus, while initially we // a lot of transactions are being submitted to the node. Thus, while initially we only
// only allowed for 60 seconds of waiting with a 1 second delay in polling, we need to // allowed for 60 seconds of waiting with a 1 second delay in polling, we need to allow for
// allow for a larger wait time. Therefore, in here we allow for 5 minutes of waiting // a larger wait time. Therefore, in here we allow for 5 minutes of waiting with exponential
// with exponential backoff each time we attempt to get the receipt and find that it's // backoff each time we attempt to get the receipt and find that it's not available.
// not available. let provider = Arc::new(provider);
let mut retries = 0; poll(
let mut total_wait_duration = Duration::from_secs(0); Self::RECEIPT_POLLING_DURATION,
let max_allowed_wait_duration = Duration::from_secs(5 * 60); PollingWaitBehavior::Constant(Duration::from_millis(200)),
loop { move || {
if total_wait_duration >= max_allowed_wait_duration { let provider = provider.clone();
tracing::error!( async move {
?total_wait_duration, match provider.get_transaction_receipt(transaction_hash).await {
?max_allowed_wait_duration, Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
retry_count = retries, Ok(None) => Ok(ControlFlow::Continue(())),
"Failed to get receipt after polling for it"
);
anyhow::bail!(
"Polled for receipt for {total_wait_duration:?} but failed to get it"
);
}
match provider.get_transaction_receipt(*transaction_hash).await {
Ok(Some(receipt)) => {
tracing::info!(?total_wait_duration, "Found receipt");
break Ok(receipt);
}
Ok(None) => {}
Err(error) => { Err(error) => {
let error_string = error.to_string(); let error_string = error.to_string();
if !error_string.contains(Self::TRANSACTION_INDEXING_ERROR) { match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) {
break Err(error.into()); true => Ok(ControlFlow::Continue(())),
false => Err(error.into()),
} }
} }
};
let next_wait_duration = Duration::from_secs(2u64.pow(retries))
.min(max_allowed_wait_duration - total_wait_duration);
total_wait_duration += next_wait_duration;
retries += 1;
tokio::time::sleep(next_wait_duration).await;
} }
})? }
},
)
.instrument(tracing::info_span!(
"Awaiting transaction receipt",
?transaction_hash
))
.await
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn trace_transaction( async fn trace_transaction(
&self, &self,
transaction: &TransactionReceipt, transaction: &TransactionReceipt,
trace_options: GethDebugTracingOptions, trace_options: GethDebugTracingOptions,
) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> { ) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> {
let tx_hash = transaction.transaction_hash; let provider = Arc::new(self.provider().await?);
let provider = self.provider(); poll(
BlockingExecutor::execute(async move { Self::TRACE_POLLING_DURATION,
Ok(provider PollingWaitBehavior::Constant(Duration::from_millis(200)),
.await? move || {
.debug_trace_transaction(tx_hash, trace_options) let provider = provider.clone();
.await?) let trace_options = trace_options.clone();
})? async move {
match provider
.debug_trace_transaction(transaction.transaction_hash, trace_options)
.await
{
Ok(trace) => Ok(ControlFlow::Break(trace)),
Err(error) => {
let error_string = error.to_string();
match error_string.contains(Self::TRANSACTION_TRACING_ERROR) {
true => Ok(ControlFlow::Continue(())),
false => Err(error.into()),
}
}
}
}
},
)
.await
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result<DiffMode> { async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result<DiffMode> {
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
diff_mode: Some(true), diff_mode: Some(true),
disable_code: None, disable_code: None,
disable_storage: None, disable_storage: None,
}); });
match self match self
.trace_transaction(transaction, trace_options)? .trace_transaction(transaction, trace_options)
.await?
.try_into_pre_state_frame()? .try_into_pre_state_frame()?
{ {
PreStateFrame::Diff(diff) => Ok(diff), PreStateFrame::Diff(diff) => Ok(diff),
_ => anyhow::bail!("expected a diff mode trace"), _ => anyhow::bail!("expected a diff mode trace"),
} }
} }
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn balance_of(&self, address: Address) -> anyhow::Result<U256> {
self.provider()
.await?
.get_balance(address)
.await
.map_err(Into::into)
} }
impl ResolverApi for Instance { #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] async fn latest_state_proof(
fn chain_id(&self) -> anyhow::Result<alloy::primitives::ChainId> { &self,
let provider = self.provider(); address: Address,
BlockingExecutor::execute(async move { keys: Vec<StorageKey>,
provider.await?.get_chain_id().await.map_err(Into::into) ) -> anyhow::Result<EIP1186AccountProofResponse> {
})? self.provider()
.await?
.get_proof(address, keys)
.latest()
.await
.map_err(Into::into)
}
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] impl ResolverApi for GethNode {
fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result<u128> { #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
let provider = self.provider(); async fn chain_id(&self) -> anyhow::Result<alloy::primitives::ChainId> {
BlockingExecutor::execute(async move { self.provider()
provider .await?
.get_chain_id()
.await
.map_err(Into::into)
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result<u128> {
self.provider()
.await?
.get_transaction_receipt(*tx_hash)
.await?
.context("Failed to get the transaction receipt")
.map(|receipt| receipt.effective_gas_price)
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result<u128> {
self.provider()
.await? .await?
.get_block_by_number(number) .get_block_by_number(number)
.await? .await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks")) .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.map(|block| block.header.gas_limit as _) .map(|block| block.header.gas_limit as _)
})?
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result<Address> { async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result<Address> {
let provider = self.provider(); self.provider()
BlockingExecutor::execute(async move {
provider
.await? .await?
.get_block_by_number(number) .get_block_by_number(number)
.await? .await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks")) .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.map(|block| block.header.beneficiary) .map(|block| block.header.beneficiary)
})?
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result<U256> { async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result<U256> {
let provider = self.provider(); self.provider()
BlockingExecutor::execute(async move {
provider
.await? .await?
.get_block_by_number(number) .get_block_by_number(number)
.await? .await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks")) .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.map(|block| block.header.difficulty) .map(|block| U256::from_be_bytes(block.header.mix_hash.0))
})?
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockHash> { async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result<u64> {
let provider = self.provider(); self.provider()
BlockingExecutor::execute(async move { .await?
provider .get_block_by_number(number)
.await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.and_then(|block| {
block
.header
.base_fee_per_gas
.context("Failed to get the base fee per gas")
})
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockHash> {
self.provider()
.await? .await?
.get_block_by_number(number) .get_block_by_number(number)
.await? .await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks")) .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.map(|block| block.header.hash) .map(|block| block.header.hash)
})?
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> { async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> {
let provider = self.provider(); self.provider()
BlockingExecutor::execute(async move {
provider
.await? .await?
.get_block_by_number(number) .get_block_by_number(number)
.await? .await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks")) .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.map(|block| block.header.timestamp) .map(|block| block.header.timestamp)
})?
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn last_block_number(&self) -> anyhow::Result<BlockNumber> { async fn last_block_number(&self) -> anyhow::Result<BlockNumber> {
let provider = self.provider(); self.provider()
BlockingExecutor::execute(async move { .await?
provider.await?.get_block_number().await.map_err(Into::into) .get_block_number()
})? .await
.map_err(Into::into)
} }
} }
impl Node for Instance { impl Node for GethNode {
fn new(config: &Arguments) -> Self { fn new(config: &Arguments) -> Self {
let geth_directory = config.directory().join(Self::BASE_DIRECTORY); let geth_directory = config.directory().join(Self::BASE_DIRECTORY);
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
@@ -460,22 +521,27 @@ impl Node for Instance {
geth: config.geth.clone(), geth: config.geth.clone(),
id, id,
handle: None, handle: None,
network_id: config.network_id,
start_timeout: config.geth_start_timeout, start_timeout: config.geth_start_timeout,
wallet, wallet: Arc::new(wallet),
chain_id_filler: Default::default(),
nonce_manager: Default::default(),
// We know that we only need to be storing 2 files so we can specify that when creating // We know that we only need to be storing 2 files so we can specify that when creating
// the vector. It's the stdout and stderr of the geth node. // the vector. It's the stdout and stderr of the geth node.
logs_file_to_flush: Vec::with_capacity(2), logs_file_to_flush: Vec::with_capacity(2),
nonce_manager: Default::default(),
} }
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn id(&self) -> usize {
self.id as _
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn connection_string(&self) -> String { fn connection_string(&self) -> String {
self.connection_string.clone() self.connection_string.clone()
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn shutdown(&mut self) -> anyhow::Result<()> { fn shutdown(&mut self) -> anyhow::Result<()> {
// Terminate the processes in a graceful manner to allow for the output to be flushed. // Terminate the processes in a graceful manner to allow for the output to be flushed.
if let Some(mut child) = self.handle.take() { if let Some(mut child) = self.handle.take() {
@@ -497,13 +563,13 @@ impl Node for Instance {
Ok(()) Ok(())
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn spawn(&mut self, genesis: String) -> anyhow::Result<()> { fn spawn(&mut self, genesis: String) -> anyhow::Result<()> {
self.init(genesis)?.spawn_process()?; self.init(genesis)?.spawn_process()?;
Ok(()) Ok(())
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn version(&self) -> anyhow::Result<String> { fn version(&self) -> anyhow::Result<String> {
let output = Command::new(&self.geth) let output = Command::new(&self.geth)
.arg("--version") .arg("--version")
@@ -516,17 +582,20 @@ impl Node for Instance {
Ok(String::from_utf8_lossy(&output).into()) Ok(String::from_utf8_lossy(&output).into())
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] fn matches_target(targets: Option<&[String]>) -> bool {
fn matches_target(&self, targets: Option<&[String]>) -> bool {
match targets { match targets {
None => true, None => true,
Some(targets) => targets.iter().any(|str| str.as_str() == "evm"), Some(targets) => targets.iter().any(|str| str.as_str() == "evm"),
} }
} }
fn evm_version() -> EVMVersion {
EVMVersion::Cancun
}
} }
impl Drop for Instance { impl Drop for GethNode {
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn drop(&mut self) { fn drop(&mut self) {
self.shutdown().expect("Failed to shutdown") self.shutdown().expect("Failed to shutdown")
} }
@@ -550,9 +619,9 @@ mod tests {
(config, temp_dir) (config, temp_dir)
} }
fn new_node() -> (Instance, TempDir) { fn new_node() -> (GethNode, TempDir) {
let (args, temp_dir) = test_config(); let (args, temp_dir) = test_config();
let mut node = Instance::new(&args); let mut node = GethNode::new(&args);
node.init(GENESIS_JSON.to_owned()) node.init(GENESIS_JSON.to_owned())
.expect("Failed to initialize the node") .expect("Failed to initialize the node")
.spawn_process() .spawn_process()
@@ -562,110 +631,110 @@ mod tests {
#[test] #[test]
fn init_works() { fn init_works() {
Instance::new(&test_config().0) GethNode::new(&test_config().0)
.init(GENESIS_JSON.to_string()) .init(GENESIS_JSON.to_string())
.unwrap(); .unwrap();
} }
#[test] #[test]
fn spawn_works() { fn spawn_works() {
Instance::new(&test_config().0) GethNode::new(&test_config().0)
.spawn(GENESIS_JSON.to_string()) .spawn(GENESIS_JSON.to_string())
.unwrap(); .unwrap();
} }
#[test] #[test]
fn version_works() { fn version_works() {
let version = Instance::new(&test_config().0).version().unwrap(); let version = GethNode::new(&test_config().0).version().unwrap();
assert!( assert!(
version.starts_with("geth version"), version.starts_with("geth version"),
"expected version string, got: '{version}'" "expected version string, got: '{version}'"
); );
} }
#[test] #[tokio::test]
fn can_get_chain_id_from_node() { async fn can_get_chain_id_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (node, _temp_dir) = new_node();
// Act // Act
let chain_id = node.chain_id(); let chain_id = node.chain_id().await;
// Assert // Assert
let chain_id = chain_id.expect("Failed to get the chain id"); let chain_id = chain_id.expect("Failed to get the chain id");
assert_eq!(chain_id, 420_420_420); assert_eq!(chain_id, 420_420_420);
} }
#[test] #[tokio::test]
fn can_get_gas_limit_from_node() { async fn can_get_gas_limit_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (node, _temp_dir) = new_node();
// Act // Act
let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest); let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest).await;
// Assert // Assert
let gas_limit = gas_limit.expect("Failed to get the gas limit"); let gas_limit = gas_limit.expect("Failed to get the gas limit");
assert_eq!(gas_limit, u32::MAX as u128) assert_eq!(gas_limit, u32::MAX as u128)
} }
#[test] #[tokio::test]
fn can_get_coinbase_from_node() { async fn can_get_coinbase_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (node, _temp_dir) = new_node();
// Act // Act
let coinbase = node.block_coinbase(BlockNumberOrTag::Latest); let coinbase = node.block_coinbase(BlockNumberOrTag::Latest).await;
// Assert // Assert
let coinbase = coinbase.expect("Failed to get the coinbase"); let coinbase = coinbase.expect("Failed to get the coinbase");
assert_eq!(coinbase, Address::new([0xFF; 20])) assert_eq!(coinbase, Address::new([0xFF; 20]))
} }
#[test] #[tokio::test]
fn can_get_block_difficulty_from_node() { async fn can_get_block_difficulty_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (node, _temp_dir) = new_node();
// Act // Act
let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest); let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest).await;
// Assert // Assert
let block_difficulty = block_difficulty.expect("Failed to get the block difficulty"); let block_difficulty = block_difficulty.expect("Failed to get the block difficulty");
assert_eq!(block_difficulty, U256::ZERO) assert_eq!(block_difficulty, U256::ZERO)
} }
#[test] #[tokio::test]
fn can_get_block_hash_from_node() { async fn can_get_block_hash_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (node, _temp_dir) = new_node();
// Act // Act
let block_hash = node.block_hash(BlockNumberOrTag::Latest); let block_hash = node.block_hash(BlockNumberOrTag::Latest).await;
// Assert // Assert
let _ = block_hash.expect("Failed to get the block hash"); let _ = block_hash.expect("Failed to get the block hash");
} }
#[test] #[tokio::test]
fn can_get_block_timestamp_from_node() { async fn can_get_block_timestamp_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (node, _temp_dir) = new_node();
// Act // Act
let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest); let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest).await;
// Assert // Assert
let _ = block_timestamp.expect("Failed to get the block timestamp"); let _ = block_timestamp.expect("Failed to get the block timestamp");
} }
#[test] #[tokio::test]
fn can_get_block_number_from_node() { async fn can_get_block_number_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (node, _temp_dir) = new_node();
// Act // Act
let block_number = node.last_block_number(); let block_number = node.last_block_number().await;
// Assert // Assert
let block_number = block_number.expect("Failed to get the block number"); let block_number = block_number.expect("Failed to get the block number");
+157 -159
View File
@@ -3,7 +3,10 @@ use std::{
io::{BufRead, Write}, io::{BufRead, Write},
path::{Path, PathBuf}, path::{Path, PathBuf},
process::{Child, Command, Stdio}, process::{Child, Command, Stdio},
sync::atomic::{AtomicU32, Ordering}, sync::{
Arc,
atomic::{AtomicU32, Ordering},
},
time::Duration, time::Duration,
}; };
@@ -16,7 +19,8 @@ use alloy::{
TransactionBuilderError, UnbuiltTransactionError, TransactionBuilderError, UnbuiltTransactionError,
}, },
primitives::{ primitives::{
Address, B64, B256, BlockHash, BlockNumber, BlockTimestamp, Bloom, Bytes, FixedBytes, U256, Address, B64, B256, BlockHash, BlockNumber, BlockTimestamp, Bloom, Bytes, FixedBytes,
StorageKey, TxHash, U256,
}, },
providers::{ providers::{
Provider, ProviderBuilder, Provider, ProviderBuilder,
@@ -24,20 +28,21 @@ use alloy::{
fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller}, fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller},
}, },
rpc::types::{ rpc::types::{
TransactionReceipt, EIP1186AccountProofResponse, TransactionReceipt,
eth::{Block, Header, Transaction}, eth::{Block, Header, Transaction},
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame}, trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
}, },
signers::local::PrivateKeySigner, signers::local::PrivateKeySigner,
}; };
use anyhow::Context;
use revive_common::EVMVersion;
use revive_dt_common::fs::clear_directory;
use revive_dt_format::traits::ResolverApi; use revive_dt_format::traits::ResolverApi;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_json::{Value as JsonValue, json}; use serde_json::{Value as JsonValue, json};
use sp_core::crypto::Ss58Codec; use sp_core::crypto::Ss58Codec;
use sp_runtime::AccountId32; use sp_runtime::AccountId32;
use tracing::Level;
use revive_dt_common::concepts::BlockingExecutor;
use revive_dt_config::Arguments; use revive_dt_config::Arguments;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
@@ -51,12 +56,13 @@ pub struct KitchensinkNode {
substrate_binary: PathBuf, substrate_binary: PathBuf,
eth_proxy_binary: PathBuf, eth_proxy_binary: PathBuf,
rpc_url: String, rpc_url: String,
wallet: EthereumWallet,
base_directory: PathBuf, base_directory: PathBuf,
logs_directory: PathBuf, logs_directory: PathBuf,
process_substrate: Option<Child>, process_substrate: Option<Child>,
process_proxy: Option<Child>, process_proxy: Option<Child>,
wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
chain_id_filler: ChainIdFiller,
/// This vector stores [`File`] objects that we use for logging which we want to flush when the /// This vector stores [`File`] objects that we use for logging which we want to flush when the
/// node object is dropped. We do not store them in a structured fashion at the moment (in /// node object is dropped. We do not store them in a structured fashion at the moment (in
/// separate fields) as the logic that we need to apply to them is all the same regardless of /// separate fields) as the logic that we need to apply to them is all the same regardless of
@@ -84,8 +90,10 @@ impl KitchensinkNode {
const PROXY_STDOUT_LOG_FILE_NAME: &str = "proxy_stdout.log"; const PROXY_STDOUT_LOG_FILE_NAME: &str = "proxy_stdout.log";
const PROXY_STDERR_LOG_FILE_NAME: &str = "proxy_stderr.log"; const PROXY_STDERR_LOG_FILE_NAME: &str = "proxy_stderr.log";
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
fn init(&mut self, genesis: &str) -> anyhow::Result<&mut Self> { fn init(&mut self, genesis: &str) -> anyhow::Result<&mut Self> {
let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory);
create_dir_all(&self.base_directory)?; create_dir_all(&self.base_directory)?;
create_dir_all(&self.logs_directory)?; create_dir_all(&self.logs_directory)?;
@@ -154,7 +162,6 @@ impl KitchensinkNode {
Ok(self) Ok(self)
} }
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
fn spawn_process(&mut self) -> anyhow::Result<()> { fn spawn_process(&mut self) -> anyhow::Result<()> {
let substrate_rpc_port = Self::BASE_SUBSTRATE_RPC_PORT + self.id as u16; let substrate_rpc_port = Self::BASE_SUBSTRATE_RPC_PORT + self.id as u16;
let proxy_rpc_port = Self::BASE_PROXY_RPC_PORT + self.id as u16; let proxy_rpc_port = Self::BASE_PROXY_RPC_PORT + self.id as u16;
@@ -206,12 +213,8 @@ impl KitchensinkNode {
if let Err(error) = Self::wait_ready( if let Err(error) = Self::wait_ready(
self.kitchensink_stderr_log_file_path().as_path(), self.kitchensink_stderr_log_file_path().as_path(),
Self::SUBSTRATE_READY_MARKER, Self::SUBSTRATE_READY_MARKER,
Duration::from_secs(30), Duration::from_secs(60),
) { ) {
tracing::error!(
?error,
"Failed to start substrate, shutting down gracefully"
);
self.shutdown()?; self.shutdown()?;
return Err(error); return Err(error);
}; };
@@ -235,9 +238,8 @@ impl KitchensinkNode {
if let Err(error) = Self::wait_ready( if let Err(error) = Self::wait_ready(
self.proxy_stderr_log_file_path().as_path(), self.proxy_stderr_log_file_path().as_path(),
Self::ETH_PROXY_READY_MARKER, Self::ETH_PROXY_READY_MARKER,
Duration::from_secs(30), Duration::from_secs(60),
) { ) {
tracing::error!(?error, "Failed to start proxy, shutting down gracefully");
self.shutdown()?; self.shutdown()?;
return Err(error); return Err(error);
}; };
@@ -252,7 +254,6 @@ impl KitchensinkNode {
Ok(()) Ok(())
} }
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
fn extract_balance_from_genesis_file( fn extract_balance_from_genesis_file(
&self, &self,
genesis: &Genesis, genesis: &Genesis,
@@ -301,7 +302,6 @@ impl KitchensinkNode {
} }
} }
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
pub fn eth_rpc_version(&self) -> anyhow::Result<String> { pub fn eth_rpc_version(&self) -> anyhow::Result<String> {
let output = Command::new(&self.eth_proxy_binary) let output = Command::new(&self.eth_proxy_binary)
.arg("--version") .arg("--version")
@@ -314,200 +314,199 @@ impl KitchensinkNode {
Ok(String::from_utf8_lossy(&output).trim().to_string()) Ok(String::from_utf8_lossy(&output).trim().to_string())
} }
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)]
fn kitchensink_stdout_log_file_path(&self) -> PathBuf { fn kitchensink_stdout_log_file_path(&self) -> PathBuf {
self.logs_directory self.logs_directory
.join(Self::KITCHENSINK_STDOUT_LOG_FILE_NAME) .join(Self::KITCHENSINK_STDOUT_LOG_FILE_NAME)
} }
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)]
fn kitchensink_stderr_log_file_path(&self) -> PathBuf { fn kitchensink_stderr_log_file_path(&self) -> PathBuf {
self.logs_directory self.logs_directory
.join(Self::KITCHENSINK_STDERR_LOG_FILE_NAME) .join(Self::KITCHENSINK_STDERR_LOG_FILE_NAME)
} }
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)]
fn proxy_stdout_log_file_path(&self) -> PathBuf { fn proxy_stdout_log_file_path(&self) -> PathBuf {
self.logs_directory.join(Self::PROXY_STDOUT_LOG_FILE_NAME) self.logs_directory.join(Self::PROXY_STDOUT_LOG_FILE_NAME)
} }
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)]
fn proxy_stderr_log_file_path(&self) -> PathBuf { fn proxy_stderr_log_file_path(&self) -> PathBuf {
self.logs_directory.join(Self::PROXY_STDERR_LOG_FILE_NAME) self.logs_directory.join(Self::PROXY_STDERR_LOG_FILE_NAME)
} }
fn provider( async fn provider(
&self, &self,
) -> impl Future< ) -> anyhow::Result<
Output = anyhow::Result<
FillProvider< FillProvider<
impl TxFiller<KitchenSinkNetwork>, impl TxFiller<KitchenSinkNetwork>,
impl Provider<KitchenSinkNetwork>, impl Provider<KitchenSinkNetwork>,
KitchenSinkNetwork, KitchenSinkNetwork,
>, >,
>, > {
> + 'static {
let connection_string = self.connection_string();
let wallet = self.wallet.clone();
// Note: We would like all providers to make use of the same nonce manager so that we have
// monotonically increasing nonces that are cached. The cached nonce manager uses Arc's in
// its implementation and therefore it means that when we clone it then it still references
// the same state.
let nonce_manager = self.nonce_manager.clone();
Box::pin(async move {
ProviderBuilder::new() ProviderBuilder::new()
.disable_recommended_fillers() .disable_recommended_fillers()
.network::<KitchenSinkNetwork>() .network::<KitchenSinkNetwork>()
.filler(FallbackGasFiller::new( .filler(FallbackGasFiller::new(
30_000_000, 25_000_000,
200_000_000_000, 1_000_000_000,
3_000_000_000, 1_000_000_000,
)) ))
.filler(ChainIdFiller::default()) .filler(self.chain_id_filler.clone())
.filler(NonceFiller::new(nonce_manager)) .filler(NonceFiller::new(self.nonce_manager.clone()))
.wallet(wallet) .wallet(self.wallet.clone())
.connect(&connection_string) .connect(&self.rpc_url)
.await .await
.map_err(Into::into) .map_err(Into::into)
})
} }
} }
impl EthereumNode for KitchensinkNode { impl EthereumNode for KitchensinkNode {
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] async fn execute_transaction(
fn execute_transaction(
&self, &self,
transaction: alloy::rpc::types::TransactionRequest, transaction: alloy::rpc::types::TransactionRequest,
) -> anyhow::Result<TransactionReceipt> { ) -> anyhow::Result<TransactionReceipt> {
tracing::debug!(?transaction, "Submitting transaction"); let receipt = self
let provider = self.provider(); .provider()
let receipt = BlockingExecutor::execute(async move {
Ok(provider
.await? .await?
.send_transaction(transaction) .send_transaction(transaction)
.await? .await?
.get_receipt() .get_receipt()
.await?) .await?;
})?; Ok(receipt)
tracing::info!(?receipt, "Submitted tx to kitchensink");
receipt
} }
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] async fn trace_transaction(
fn trace_transaction(
&self, &self,
transaction: &TransactionReceipt, transaction: &TransactionReceipt,
trace_options: GethDebugTracingOptions, trace_options: GethDebugTracingOptions,
) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> { ) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> {
let tx_hash = transaction.transaction_hash; let tx_hash = transaction.transaction_hash;
let provider = self.provider(); Ok(self
BlockingExecutor::execute(async move { .provider()
Ok(provider
.await? .await?
.debug_trace_transaction(tx_hash, trace_options) .debug_trace_transaction(tx_hash, trace_options)
.await?) .await?)
})?
} }
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result<DiffMode> {
fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result<DiffMode> {
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
diff_mode: Some(true), diff_mode: Some(true),
disable_code: None, disable_code: None,
disable_storage: None, disable_storage: None,
}); });
match self match self
.trace_transaction(transaction, trace_options)? .trace_transaction(transaction, trace_options)
.await?
.try_into_pre_state_frame()? .try_into_pre_state_frame()?
{ {
PreStateFrame::Diff(diff) => Ok(diff), PreStateFrame::Diff(diff) => Ok(diff),
_ => anyhow::bail!("expected a diff mode trace"), _ => anyhow::bail!("expected a diff mode trace"),
} }
} }
async fn balance_of(&self, address: Address) -> anyhow::Result<U256> {
self.provider()
.await?
.get_balance(address)
.await
.map_err(Into::into)
}
async fn latest_state_proof(
&self,
address: Address,
keys: Vec<StorageKey>,
) -> anyhow::Result<EIP1186AccountProofResponse> {
self.provider()
.await?
.get_proof(address, keys)
.latest()
.await
.map_err(Into::into)
}
} }
impl ResolverApi for KitchensinkNode { impl ResolverApi for KitchensinkNode {
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] async fn chain_id(&self) -> anyhow::Result<alloy::primitives::ChainId> {
fn chain_id(&self) -> anyhow::Result<alloy::primitives::ChainId> { self.provider()
let provider = self.provider(); .await?
BlockingExecutor::execute(async move { .get_chain_id()
provider.await?.get_chain_id().await.map_err(Into::into) .await
})? .map_err(Into::into)
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result<u128> {
fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result<u128> { self.provider()
let provider = self.provider(); .await?
BlockingExecutor::execute(async move { .get_transaction_receipt(*tx_hash)
provider .await?
.context("Failed to get the transaction receipt")
.map(|receipt| receipt.effective_gas_price)
}
async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result<u128> {
self.provider()
.await? .await?
.get_block_by_number(number) .get_block_by_number(number)
.await? .await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks")) .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.map(|block| block.header.gas_limit) .map(|block| block.header.gas_limit as _)
})?
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result<Address> {
fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result<Address> { self.provider()
let provider = self.provider();
BlockingExecutor::execute(async move {
provider
.await? .await?
.get_block_by_number(number) .get_block_by_number(number)
.await? .await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks")) .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.map(|block| block.header.beneficiary) .map(|block| block.header.beneficiary)
})?
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result<U256> {
fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result<U256> { self.provider()
let provider = self.provider();
BlockingExecutor::execute(async move {
provider
.await? .await?
.get_block_by_number(number) .get_block_by_number(number)
.await? .await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks")) .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.map(|block| block.header.difficulty) .map(|block| U256::from_be_bytes(block.header.mix_hash.0))
})?
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result<u64> {
fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockHash> { self.provider()
let provider = self.provider(); .await?
BlockingExecutor::execute(async move { .get_block_by_number(number)
provider .await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.and_then(|block| {
block
.header
.base_fee_per_gas
.context("Failed to get the base fee per gas")
})
}
async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockHash> {
self.provider()
.await? .await?
.get_block_by_number(number) .get_block_by_number(number)
.await? .await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks")) .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.map(|block| block.header.hash) .map(|block| block.header.hash)
})?
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> {
fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> { self.provider()
let provider = self.provider();
BlockingExecutor::execute(async move {
provider
.await? .await?
.get_block_by_number(number) .get_block_by_number(number)
.await? .await?
.ok_or(anyhow::Error::msg("Blockchain has no blocks")) .ok_or(anyhow::Error::msg("Blockchain has no blocks"))
.map(|block| block.header.timestamp) .map(|block| block.header.timestamp)
})?
} }
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))] async fn last_block_number(&self) -> anyhow::Result<BlockNumber> {
fn last_block_number(&self) -> anyhow::Result<BlockNumber> { self.provider()
let provider = self.provider(); .await?
BlockingExecutor::execute(async move { .get_block_number()
provider.await?.get_block_number().await.map_err(Into::into) .await
})? .map_err(Into::into)
} }
} }
@@ -532,11 +531,12 @@ impl Node for KitchensinkNode {
substrate_binary: config.kitchensink.clone(), substrate_binary: config.kitchensink.clone(),
eth_proxy_binary: config.eth_proxy.clone(), eth_proxy_binary: config.eth_proxy.clone(),
rpc_url: String::new(), rpc_url: String::new(),
wallet,
base_directory, base_directory,
logs_directory, logs_directory,
process_substrate: None, process_substrate: None,
process_proxy: None, process_proxy: None,
wallet: Arc::new(wallet),
chain_id_filler: Default::default(),
nonce_manager: Default::default(), nonce_manager: Default::default(),
// We know that we only need to be storing 4 files so we can specify that when creating // We know that we only need to be storing 4 files so we can specify that when creating
// the vector. It's the stdout and stderr of the substrate-node and the eth-rpc. // the vector. It's the stdout and stderr of the substrate-node and the eth-rpc.
@@ -544,12 +544,14 @@ impl Node for KitchensinkNode {
} }
} }
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] fn id(&self) -> usize {
self.id as _
}
fn connection_string(&self) -> String { fn connection_string(&self) -> String {
self.rpc_url.clone() self.rpc_url.clone()
} }
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
fn shutdown(&mut self) -> anyhow::Result<()> { fn shutdown(&mut self) -> anyhow::Result<()> {
// Terminate the processes in a graceful manner to allow for the output to be flushed. // Terminate the processes in a graceful manner to allow for the output to be flushed.
if let Some(mut child) = self.process_proxy.take() { if let Some(mut child) = self.process_proxy.take() {
@@ -576,12 +578,10 @@ impl Node for KitchensinkNode {
Ok(()) Ok(())
} }
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
fn spawn(&mut self, genesis: String) -> anyhow::Result<()> { fn spawn(&mut self, genesis: String) -> anyhow::Result<()> {
self.init(&genesis)?.spawn_process() self.init(&genesis)?.spawn_process()
} }
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
fn version(&self) -> anyhow::Result<String> { fn version(&self) -> anyhow::Result<String> {
let output = Command::new(&self.substrate_binary) let output = Command::new(&self.substrate_binary)
.arg("--version") .arg("--version")
@@ -594,17 +594,19 @@ impl Node for KitchensinkNode {
Ok(String::from_utf8_lossy(&output).into()) Ok(String::from_utf8_lossy(&output).into())
} }
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] fn matches_target(targets: Option<&[String]>) -> bool {
fn matches_target(&self, targets: Option<&[String]>) -> bool {
match targets { match targets {
None => true, None => true,
Some(targets) => targets.iter().any(|str| str.as_str() == "pvm"), Some(targets) => targets.iter().any(|str| str.as_str() == "pvm"),
} }
} }
fn evm_version() -> EVMVersion {
EVMVersion::Cancun
}
} }
impl Drop for KitchensinkNode { impl Drop for KitchensinkNode {
#[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))]
fn drop(&mut self) { fn drop(&mut self) {
self.shutdown().expect("Failed to shutdown") self.shutdown().expect("Failed to shutdown")
} }
@@ -1043,26 +1045,21 @@ mod tests {
use revive_dt_config::Arguments; use revive_dt_config::Arguments;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::{LazyLock, Mutex}; use std::sync::{LazyLock, Mutex};
use temp_dir::TempDir;
use std::fs; use std::fs;
use super::*; use super::*;
use crate::{GENESIS_JSON, Node}; use crate::{GENESIS_JSON, Node};
fn test_config() -> (Arguments, TempDir) { fn test_config() -> Arguments {
let mut config = Arguments::default(); Arguments {
let temp_dir = TempDir::new().unwrap(); kitchensink: PathBuf::from("substrate-node"),
eth_proxy: PathBuf::from("eth-rpc"),
config.working_directory = temp_dir.path().to_path_buf().into(); ..Default::default()
}
config.kitchensink = PathBuf::from("substrate-node");
config.eth_proxy = PathBuf::from("eth-rpc");
(config, temp_dir)
} }
fn new_node() -> (KitchensinkNode, Arguments, TempDir) { fn new_node() -> (KitchensinkNode, Arguments) {
// Note: When we run the tests in the CI we found that if they're all // Note: When we run the tests in the CI we found that if they're all
// run in parallel then the CI is unable to start all of the nodes in // run in parallel then the CI is unable to start all of the nodes in
// time and their start up times-out. Therefore, we want all of the // time and their start up times-out. Therefore, we want all of the
@@ -1081,20 +1078,20 @@ mod tests {
static NODE_START_MUTEX: Mutex<()> = Mutex::new(()); static NODE_START_MUTEX: Mutex<()> = Mutex::new(());
let _guard = NODE_START_MUTEX.lock().unwrap(); let _guard = NODE_START_MUTEX.lock().unwrap();
let (args, temp_dir) = test_config(); let args = test_config();
let mut node = KitchensinkNode::new(&args); let mut node = KitchensinkNode::new(&args);
node.init(GENESIS_JSON) node.init(GENESIS_JSON)
.expect("Failed to initialize the node") .expect("Failed to initialize the node")
.spawn_process() .spawn_process()
.expect("Failed to spawn the node process"); .expect("Failed to spawn the node process");
(node, args, temp_dir) (node, args)
} }
/// A shared node that multiple tests can use. It starts up once. /// A shared node that multiple tests can use. It starts up once.
fn shared_node() -> &'static KitchensinkNode { fn shared_node() -> &'static KitchensinkNode {
static NODE: LazyLock<(KitchensinkNode, TempDir)> = LazyLock::new(|| { static NODE: LazyLock<(KitchensinkNode, Arguments)> = LazyLock::new(|| {
let (node, _, temp_dir) = new_node(); let (node, args) = new_node();
(node, temp_dir) (node, args)
}); });
&NODE.0 &NODE.0
} }
@@ -1102,7 +1099,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn node_mines_simple_transfer_transaction_and_returns_receipt() { async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
// Arrange // Arrange
let (node, args, _temp_dir) = new_node(); let (node, args) = new_node();
let provider = node.provider().await.expect("Failed to create provider"); let provider = node.provider().await.expect("Failed to create provider");
@@ -1137,7 +1134,7 @@ mod tests {
} }
"#; "#;
let mut dummy_node = KitchensinkNode::new(&test_config().0); let mut dummy_node = KitchensinkNode::new(&test_config());
// Call `init()` // Call `init()`
dummy_node.init(genesis_content).expect("init failed"); dummy_node.init(genesis_content).expect("init failed");
@@ -1181,7 +1178,7 @@ mod tests {
} }
"#; "#;
let node = KitchensinkNode::new(&test_config().0); let node = KitchensinkNode::new(&test_config());
let result = node let result = node
.extract_balance_from_genesis_file(&serde_json::from_str(genesis_json).unwrap()) .extract_balance_from_genesis_file(&serde_json::from_str(genesis_json).unwrap())
@@ -1252,15 +1249,16 @@ mod tests {
#[test] #[test]
fn spawn_works() { fn spawn_works() {
let (config, _temp_dir) = test_config(); let config = test_config();
let mut node = KitchensinkNode::new(&config); let mut node = KitchensinkNode::new(&config);
node.spawn(GENESIS_JSON.to_string()).unwrap(); node.spawn(GENESIS_JSON.to_string()).unwrap();
} }
#[test] #[test]
fn version_works() { fn version_works() {
let (config, _temp_dir) = test_config(); let config = test_config();
let node = KitchensinkNode::new(&config); let node = KitchensinkNode::new(&config);
let version = node.version().unwrap(); let version = node.version().unwrap();
@@ -1273,7 +1271,7 @@ mod tests {
#[test] #[test]
fn eth_rpc_version_works() { fn eth_rpc_version_works() {
let (config, _temp_dir) = test_config(); let config = test_config();
let node = KitchensinkNode::new(&config); let node = KitchensinkNode::new(&config);
let version = node.eth_rpc_version().unwrap(); let version = node.eth_rpc_version().unwrap();
@@ -1284,86 +1282,86 @@ mod tests {
); );
} }
#[test] #[tokio::test]
fn can_get_chain_id_from_node() { async fn can_get_chain_id_from_node() {
// Arrange // Arrange
let node = shared_node(); let node = shared_node();
// Act // Act
let chain_id = node.chain_id(); let chain_id = node.chain_id().await;
// Assert // Assert
let chain_id = chain_id.expect("Failed to get the chain id"); let chain_id = chain_id.expect("Failed to get the chain id");
assert_eq!(chain_id, 420_420_420); assert_eq!(chain_id, 420_420_420);
} }
#[test] #[tokio::test]
fn can_get_gas_limit_from_node() { async fn can_get_gas_limit_from_node() {
// Arrange // Arrange
let node = shared_node(); let node = shared_node();
// Act // Act
let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest); let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest).await;
// Assert // Assert
let _ = gas_limit.expect("Failed to get the gas limit"); let _ = gas_limit.expect("Failed to get the gas limit");
} }
#[test] #[tokio::test]
fn can_get_coinbase_from_node() { async fn can_get_coinbase_from_node() {
// Arrange // Arrange
let node = shared_node(); let node = shared_node();
// Act // Act
let coinbase = node.block_coinbase(BlockNumberOrTag::Latest); let coinbase = node.block_coinbase(BlockNumberOrTag::Latest).await;
// Assert // Assert
let _ = coinbase.expect("Failed to get the coinbase"); let _ = coinbase.expect("Failed to get the coinbase");
} }
#[test] #[tokio::test]
fn can_get_block_difficulty_from_node() { async fn can_get_block_difficulty_from_node() {
// Arrange // Arrange
let node = shared_node(); let node = shared_node();
// Act // Act
let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest); let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest).await;
// Assert // Assert
let _ = block_difficulty.expect("Failed to get the block difficulty"); let _ = block_difficulty.expect("Failed to get the block difficulty");
} }
#[test] #[tokio::test]
fn can_get_block_hash_from_node() { async fn can_get_block_hash_from_node() {
// Arrange // Arrange
let node = shared_node(); let node = shared_node();
// Act // Act
let block_hash = node.block_hash(BlockNumberOrTag::Latest); let block_hash = node.block_hash(BlockNumberOrTag::Latest).await;
// Assert // Assert
let _ = block_hash.expect("Failed to get the block hash"); let _ = block_hash.expect("Failed to get the block hash");
} }
#[test] #[tokio::test]
fn can_get_block_timestamp_from_node() { async fn can_get_block_timestamp_from_node() {
// Arrange // Arrange
let node = shared_node(); let node = shared_node();
// Act // Act
let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest); let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest).await;
// Assert // Assert
let _ = block_timestamp.expect("Failed to get the block timestamp"); let _ = block_timestamp.expect("Failed to get the block timestamp");
} }
#[test] #[tokio::test]
fn can_get_block_number_from_node() { async fn can_get_block_number_from_node() {
// Arrange // Arrange
let node = shared_node(); let node = shared_node();
// Act // Act
let block_number = node.last_block_number(); let block_number = node.last_block_number().await;
// Assert // Assert
let _ = block_number.expect("Failed to get the block number"); let _ = block_number.expect("Failed to get the block number");
+8 -1
View File
@@ -1,5 +1,6 @@
//! This crate implements the testing nodes. //! This crate implements the testing nodes.
use revive_common::EVMVersion;
use revive_dt_config::Arguments; use revive_dt_config::Arguments;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
@@ -17,6 +18,9 @@ pub trait Node: EthereumNode {
/// Create a new uninitialized instance. /// Create a new uninitialized instance.
fn new(config: &Arguments) -> Self; fn new(config: &Arguments) -> Self;
/// Returns the identifier of the node.
fn id(&self) -> usize;
/// Spawns a node configured according to the genesis json. /// Spawns a node configured according to the genesis json.
/// ///
/// Blocking until it's ready to accept transactions. /// Blocking until it's ready to accept transactions.
@@ -35,5 +39,8 @@ pub trait Node: EthereumNode {
/// Given a list of targets from the metadata file, this function determines if the metadata /// Given a list of targets from the metadata file, this function determines if the metadata
/// file can be ran on this node or not. /// file can be ran on this node or not.
fn matches_target(&self, targets: Option<&[String]>) -> bool; fn matches_target(targets: Option<&[String]>) -> bool;
/// Returns the EVM version of the node.
fn evm_version() -> EVMVersion;
} }
+3 -3
View File
@@ -1,11 +1,12 @@
//! This crate implements concurrent handling of testing node. //! This crate implements concurrent handling of testing node.
use std::{ use std::{
fs::read_to_string,
sync::atomic::{AtomicUsize, Ordering}, sync::atomic::{AtomicUsize, Ordering},
thread, thread,
}; };
use revive_dt_common::cached_fs::read_to_string;
use anyhow::Context; use anyhow::Context;
use revive_dt_config::Arguments; use revive_dt_config::Arguments;
@@ -24,7 +25,7 @@ where
{ {
/// Create a new Pool. This will start as many nodes as there are workers in `config`. /// Create a new Pool. This will start as many nodes as there are workers in `config`.
pub fn new(config: &Arguments) -> anyhow::Result<Self> { pub fn new(config: &Arguments) -> anyhow::Result<Self> {
let nodes = config.workers; let nodes = config.number_of_nodes;
let genesis = read_to_string(&config.genesis_file).context(format!( let genesis = read_to_string(&config.genesis_file).context(format!(
"can not read genesis file: {}", "can not read genesis file: {}",
config.genesis_file.display() config.genesis_file.display()
@@ -62,7 +63,6 @@ where
fn spawn_node<T: Node + Send>(args: &Arguments, genesis: String) -> anyhow::Result<T> { fn spawn_node<T: Node + Send>(args: &Arguments, genesis: String) -> anyhow::Result<T> {
let mut node = T::new(args); let mut node = T::new(args);
tracing::info!("starting node: {}", node.connection_string());
node.spawn(genesis)?; node.spawn(genesis)?;
Ok(node) Ok(node)
} }
+4 -1
View File
@@ -8,11 +8,14 @@ repository.workspace = true
rust-version.workspace = true rust-version.workspace = true
[dependencies] [dependencies]
revive-dt-common = { workspace = true }
revive-dt-config = { workspace = true } revive-dt-config = { workspace = true }
revive-dt-format = { workspace = true } revive-dt-format = { workspace = true }
revive-dt-compiler = { workspace = true } revive-dt-compiler = { workspace = true }
anyhow = { workspace = true } anyhow = { workspace = true }
tracing = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
serde_json = { workspace = true } serde_json = { workspace = true }
[lints]
workspace = true
+9 -10
View File
@@ -12,18 +12,19 @@ use std::{
}; };
use anyhow::Context; use anyhow::Context;
use revive_dt_compiler::{CompilerInput, CompilerOutput}; use serde::Serialize;
use serde::{Deserialize, Serialize};
use revive_dt_common::types::Mode;
use revive_dt_compiler::{CompilerInput, CompilerOutput};
use revive_dt_config::{Arguments, TestingPlatform}; use revive_dt_config::{Arguments, TestingPlatform};
use revive_dt_format::{corpus::Corpus, mode::SolcMode}; use revive_dt_format::corpus::Corpus;
use crate::analyzer::CompilerStatistics; use crate::analyzer::CompilerStatistics;
pub(crate) static REPORTER: OnceLock<Mutex<Report>> = OnceLock::new(); pub(crate) static REPORTER: OnceLock<Mutex<Report>> = OnceLock::new();
/// The `Report` datastructure stores all relevant inforamtion required for generating reports. /// The `Report` datastructure stores all relevant inforamtion required for generating reports.
#[derive(Clone, Debug, Default, Serialize, Deserialize)] #[derive(Clone, Debug, Default, Serialize)]
pub struct Report { pub struct Report {
/// The configuration used during the test. /// The configuration used during the test.
pub config: Arguments, pub config: Arguments,
@@ -41,14 +42,14 @@ pub struct Report {
} }
/// Contains a compiled contract. /// Contains a compiled contract.
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize)]
pub struct CompilationTask { pub struct CompilationTask {
/// The observed compiler input. /// The observed compiler input.
pub json_input: CompilerInput, pub json_input: CompilerInput,
/// The observed compiler output. /// The observed compiler output.
pub json_output: Option<CompilerOutput>, pub json_output: Option<CompilerOutput>,
/// The observed compiler mode. /// The observed compiler mode.
pub mode: SolcMode, pub mode: Mode,
/// The observed compiler version. /// The observed compiler version.
pub compiler_version: String, pub compiler_version: String,
/// The observed error, if any. /// The observed error, if any.
@@ -56,7 +57,7 @@ pub struct CompilationTask {
} }
/// Represents a report about a compilation task. /// Represents a report about a compilation task.
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize)]
pub struct CompilationResult { pub struct CompilationResult {
/// The observed compilation task. /// The observed compilation task.
pub compilation_task: CompilationTask, pub compilation_task: CompilationTask,
@@ -65,7 +66,7 @@ pub struct CompilationResult {
} }
/// The [Span] struct indicates the context of what is being reported. /// The [Span] struct indicates the context of what is being reported.
#[derive(Clone, Copy, Debug, Serialize, Deserialize)] #[derive(Clone, Copy, Debug, Serialize)]
pub struct Span { pub struct Span {
/// The corpus index this belongs to. /// The corpus index this belongs to.
corpus: usize, corpus: usize,
@@ -184,8 +185,6 @@ impl Report {
let file = File::create(&path).context(path.display().to_string())?; let file = File::create(&path).context(path.display().to_string())?;
serde_json::to_writer_pretty(file, &self)?; serde_json::to_writer_pretty(file, &self)?;
tracing::info!("report written to: {}", path.display());
Ok(()) Ok(())
} }
} }
+4
View File
@@ -14,7 +14,11 @@ revive-dt-common = { workspace = true }
anyhow = { workspace = true } anyhow = { workspace = true }
hex = { workspace = true } hex = { workspace = true }
tracing = { workspace = true } tracing = { workspace = true }
tokio = { workspace = true }
reqwest = { workspace = true } reqwest = { workspace = true }
semver = { workspace = true } semver = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
sha2 = { workspace = true } sha2 = { workspace = true }
[lints]
workspace = true
+10 -11
View File
@@ -6,41 +6,40 @@ use std::{
io::{BufWriter, Write}, io::{BufWriter, Write},
os::unix::fs::PermissionsExt, os::unix::fs::PermissionsExt,
path::{Path, PathBuf}, path::{Path, PathBuf},
sync::{LazyLock, Mutex}, sync::LazyLock,
}; };
use crate::download::GHDownloader; use tokio::sync::Mutex;
use crate::download::SolcDownloader;
pub const SOLC_CACHE_DIRECTORY: &str = "solc"; pub const SOLC_CACHE_DIRECTORY: &str = "solc";
pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default); pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default);
pub(crate) fn get_or_download( pub(crate) async fn get_or_download(
working_directory: &Path, working_directory: &Path,
downloader: &GHDownloader, downloader: &SolcDownloader,
) -> anyhow::Result<PathBuf> { ) -> anyhow::Result<PathBuf> {
let target_directory = working_directory let target_directory = working_directory
.join(SOLC_CACHE_DIRECTORY) .join(SOLC_CACHE_DIRECTORY)
.join(downloader.version.to_string()); .join(downloader.version.to_string());
let target_file = target_directory.join(downloader.target); let target_file = target_directory.join(downloader.target);
let mut cache = SOLC_CACHER.lock().unwrap(); let mut cache = SOLC_CACHER.lock().await;
if cache.contains(&target_file) { if cache.contains(&target_file) {
tracing::debug!("using cached solc: {}", target_file.display()); tracing::debug!("using cached solc: {}", target_file.display());
return Ok(target_file); return Ok(target_file);
} }
create_dir_all(target_directory)?; create_dir_all(target_directory)?;
download_to_file(&target_file, downloader)?; download_to_file(&target_file, downloader).await?;
cache.insert(target_file.clone()); cache.insert(target_file.clone());
Ok(target_file) Ok(target_file)
} }
fn download_to_file(path: &Path, downloader: &GHDownloader) -> anyhow::Result<()> { async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::Result<()> {
tracing::info!("caching file: {}", path.display());
let Ok(file) = File::create_new(path) else { let Ok(file) = File::create_new(path) else {
tracing::debug!("cache file already exists: {}", path.display());
return Ok(()); return Ok(());
}; };
@@ -52,7 +51,7 @@ fn download_to_file(path: &Path, downloader: &GHDownloader) -> anyhow::Result<()
} }
let mut file = BufWriter::new(file); let mut file = BufWriter::new(file);
file.write_all(&downloader.download()?)?; file.write_all(&downloader.download().await?)?;
file.flush()?; file.flush()?;
drop(file); drop(file);
+80 -49
View File
@@ -25,12 +25,12 @@ impl List {
/// ///
/// Caches the list retrieved from the `url` into [LIST_CACHE], /// Caches the list retrieved from the `url` into [LIST_CACHE],
/// subsequent calls with the same `url` will return the cached list. /// subsequent calls with the same `url` will return the cached list.
pub fn download(url: &'static str) -> anyhow::Result<Self> { pub async fn download(url: &'static str) -> anyhow::Result<Self> {
if let Some(list) = LIST_CACHE.lock().unwrap().get(url) { if let Some(list) = LIST_CACHE.lock().unwrap().get(url) {
return Ok(list.clone()); return Ok(list.clone());
} }
let body: List = reqwest::blocking::get(url)?.json()?; let body: List = reqwest::get(url).await?.json().await?;
LIST_CACHE.lock().unwrap().insert(url, body.clone()); LIST_CACHE.lock().unwrap().insert(url, body.clone());
@@ -38,23 +38,23 @@ impl List {
} }
} }
/// Download solc binaries from GitHub releases (IPFS links aren't reliable). /// Download solc binaries from the official SolidityLang site
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct GHDownloader { pub struct SolcDownloader {
pub version: Version, pub version: Version,
pub target: &'static str, pub target: &'static str,
pub list: &'static str, pub list: &'static str,
} }
impl GHDownloader { impl SolcDownloader {
pub const BASE_URL: &str = "https://github.com/ethereum/solidity/releases/download"; pub const BASE_URL: &str = "https://binaries.soliditylang.org";
pub const LINUX_NAME: &str = "solc-static-linux"; pub const LINUX_NAME: &str = "linux-amd64";
pub const MACOSX_NAME: &str = "solc-macos"; pub const MACOSX_NAME: &str = "macosx-amd64";
pub const WINDOWS_NAME: &str = "solc-windows.exe"; pub const WINDOWS_NAME: &str = "windows-amd64";
pub const WASM_NAME: &str = "soljson.js"; pub const WASM_NAME: &str = "wasm";
fn new( async fn new(
version: impl Into<VersionOrRequirement>, version: impl Into<VersionOrRequirement>,
target: &'static str, target: &'static str,
list: &'static str, list: &'static str,
@@ -67,7 +67,8 @@ impl GHDownloader {
list, list,
}), }),
VersionOrRequirement::Requirement(requirement) => { VersionOrRequirement::Requirement(requirement) => {
let Some(version) = List::download(list)? let Some(version) = List::download(list)
.await?
.builds .builds
.into_iter() .into_iter()
.map(|build| build.version) .map(|build| build.version)
@@ -85,41 +86,42 @@ impl GHDownloader {
} }
} }
pub fn linux(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> { pub async fn linux(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
Self::new(version, Self::LINUX_NAME, List::LINUX_URL) Self::new(version, Self::LINUX_NAME, List::LINUX_URL).await
} }
pub fn macosx(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> { pub async fn macosx(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
Self::new(version, Self::MACOSX_NAME, List::MACOSX_URL) Self::new(version, Self::MACOSX_NAME, List::MACOSX_URL).await
} }
pub fn windows(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> { pub async fn windows(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
Self::new(version, Self::WINDOWS_NAME, List::WINDOWS_URL) Self::new(version, Self::WINDOWS_NAME, List::WINDOWS_URL).await
} }
pub fn wasm(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> { pub async fn wasm(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
Self::new(version, Self::WASM_NAME, List::WASM_URL) Self::new(version, Self::WASM_NAME, List::WASM_URL).await
}
/// Returns the download link.
pub fn url(&self) -> String {
format!("{}/v{}/{}", Self::BASE_URL, &self.version, &self.target)
} }
/// Download the solc binary. /// Download the solc binary.
/// ///
/// Errors out if the download fails or the digest of the downloaded file /// Errors out if the download fails or the digest of the downloaded file
/// mismatches the expected digest from the release [List]. /// mismatches the expected digest from the release [List].
pub fn download(&self) -> anyhow::Result<Vec<u8>> { pub async fn download(&self) -> anyhow::Result<Vec<u8>> {
tracing::info!("downloading solc: {self:?}"); let builds = List::download(self.list).await?.builds;
let expected_digest = List::download(self.list)? let build = builds
.builds
.iter() .iter()
.find(|build| build.version == self.version) .find(|build| build.version == self.version)
.ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version)) .ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version))?;
.map(|b| b.sha256.strip_prefix("0x").unwrap_or(&b.sha256).to_string())?;
let file = reqwest::blocking::get(self.url())?.bytes()?.to_vec(); let path = build.path.clone();
let expected_digest = build
.sha256
.strip_prefix("0x")
.unwrap_or(&build.sha256)
.to_string();
let url = format!("{}/{}/{}", Self::BASE_URL, self.target, path.display());
let file = reqwest::get(url).await?.bytes().await?.to_vec();
if hex::encode(Sha256::digest(&file)) != expected_digest { if hex::encode(Sha256::digest(&file)) != expected_digest {
anyhow::bail!("sha256 mismatch for solc version {}", self.version); anyhow::bail!("sha256 mismatch for solc version {}", self.version);
@@ -131,29 +133,58 @@ impl GHDownloader {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::{download::GHDownloader, list::List}; use crate::{download::SolcDownloader, list::List};
#[test] #[tokio::test]
fn try_get_windows() { async fn try_get_windows() {
let version = List::download(List::WINDOWS_URL).unwrap().latest_release; let version = List::download(List::WINDOWS_URL)
GHDownloader::windows(version).unwrap().download().unwrap(); .await
.unwrap()
.latest_release;
SolcDownloader::windows(version)
.await
.unwrap()
.download()
.await
.unwrap();
} }
#[test] #[tokio::test]
fn try_get_macosx() { async fn try_get_macosx() {
let version = List::download(List::MACOSX_URL).unwrap().latest_release; let version = List::download(List::MACOSX_URL)
GHDownloader::macosx(version).unwrap().download().unwrap(); .await
.unwrap()
.latest_release;
SolcDownloader::macosx(version)
.await
.unwrap()
.download()
.await
.unwrap();
} }
#[test] #[tokio::test]
fn try_get_linux() { async fn try_get_linux() {
let version = List::download(List::LINUX_URL).unwrap().latest_release; let version = List::download(List::LINUX_URL)
GHDownloader::linux(version).unwrap().download().unwrap(); .await
.unwrap()
.latest_release;
SolcDownloader::linux(version)
.await
.unwrap()
.download()
.await
.unwrap();
} }
#[test] #[tokio::test]
fn try_get_wasm() { async fn try_get_wasm() {
let version = List::download(List::WASM_URL).unwrap().latest_release; let version = List::download(List::WASM_URL).await.unwrap().latest_release;
GHDownloader::wasm(version).unwrap().download().unwrap(); SolcDownloader::wasm(version)
.await
.unwrap()
.download()
.await
.unwrap();
} }
} }
+7 -7
View File
@@ -6,7 +6,7 @@
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use cache::get_or_download; use cache::get_or_download;
use download::GHDownloader; use download::SolcDownloader;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
@@ -19,22 +19,22 @@ pub mod list;
/// ///
/// Subsequent calls for the same version will use a cached artifact /// Subsequent calls for the same version will use a cached artifact
/// and not download it again. /// and not download it again.
pub fn download_solc( pub async fn download_solc(
cache_directory: &Path, cache_directory: &Path,
version: impl Into<VersionOrRequirement>, version: impl Into<VersionOrRequirement>,
wasm: bool, wasm: bool,
) -> anyhow::Result<PathBuf> { ) -> anyhow::Result<PathBuf> {
let downloader = if wasm { let downloader = if wasm {
GHDownloader::wasm(version) SolcDownloader::wasm(version).await
} else if cfg!(target_os = "linux") { } else if cfg!(target_os = "linux") {
GHDownloader::linux(version) SolcDownloader::linux(version).await
} else if cfg!(target_os = "macos") { } else if cfg!(target_os = "macos") {
GHDownloader::macosx(version) SolcDownloader::macosx(version).await
} else if cfg!(target_os = "windows") { } else if cfg!(target_os = "windows") {
GHDownloader::windows(version) SolcDownloader::windows(version).await
} else { } else {
unimplemented!() unimplemented!()
}?; }?;
get_or_download(cache_directory, &downloader) get_or_download(cache_directory, &downloader).await
} }