diff --git a/crates/compiler/src/lib.rs b/crates/compiler/src/lib.rs index 7e92cf0..e43a527 100644 --- a/crates/compiler/src/lib.rs +++ b/crates/compiler/src/lib.rs @@ -44,6 +44,8 @@ pub trait SolidityCompiler { pub struct CompilerInput { pub extra_options: T, pub input: SolcStandardJsonInput, + pub allow_paths: Vec, + pub base_path: Option, } /// The generic compilation output configuration. @@ -83,8 +85,8 @@ where pub struct Compiler { input: SolcStandardJsonInput, extra_options: T::Options, - allow_paths: Vec, - base_path: Option, + allow_paths: Vec, + base_path: Option, } impl Default for Compiler { @@ -145,12 +147,12 @@ where self } - pub fn allow_path(mut self, path: String) -> Self { + pub fn allow_path(mut self, path: PathBuf) -> Self { self.allow_paths.push(path); self } - pub fn base_path(mut self, base_path: String) -> Self { + pub fn base_path(mut self, base_path: PathBuf) -> Self { self.base_path = Some(base_path); self } @@ -159,6 +161,8 @@ where T::new(solc_path).build(CompilerInput { extra_options: self.extra_options, input: self.input, + allow_paths: self.allow_paths, + base_path: self.base_path, }) } diff --git a/crates/compiler/src/revive_resolc.rs b/crates/compiler/src/revive_resolc.rs index eb63719..501a2f9 100644 --- a/crates/compiler/src/revive_resolc.rs +++ b/crates/compiler/src/revive_resolc.rs @@ -23,13 +23,27 @@ impl SolidityCompiler for Resolc { &self, input: CompilerInput, ) -> anyhow::Result> { - let mut child = Command::new(&self.resolc_path) - .arg("--standard-json") - .args(&input.extra_options) + let mut command = Command::new(&self.resolc_path); + command .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::piped()) - .spawn()?; + .arg("--standard-json"); + + if let Some(ref base_path) = input.base_path { + command.arg("--base-path").arg(base_path); + } + if !input.allow_paths.is_empty() { + command.arg("--allow-paths").arg( + input + .allow_paths + .iter() + .map(|path| path.display().to_string()) + .collect::>() + .join(","), + ); + } + let mut child = command.spawn()?; let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped"); serde_json::to_writer(stdin_pipe, &input.input)?; @@ -55,13 +69,22 @@ impl SolidityCompiler for Resolc { }); } - let parsed: SolcStandardJsonOutput = serde_json::from_slice(&stdout).map_err(|e| { + let parsed = serde_json::from_slice::(&stdout).map_err(|e| { anyhow::anyhow!( "failed to parse resolc JSON output: {e}\nstderr: {}", String::from_utf8_lossy(&stderr) ) })?; + // Detecting if the compiler output contained errors and reporting them through logs and + // errors instead of returning the compiler output that might contain errors. + for error in parsed.errors.iter().flatten() { + if error.severity == "error" { + tracing::error!(?error, ?input, "Encountered an error in the compilation"); + anyhow::bail!("Encountered an error in the compilation: {error}") + } + } + Ok(CompilerOutput { input, output: parsed, diff --git a/crates/compiler/src/solc.rs b/crates/compiler/src/solc.rs index b40d18f..653dd33 100644 --- a/crates/compiler/src/solc.rs +++ b/crates/compiler/src/solc.rs @@ -9,6 +9,7 @@ use std::{ use crate::{CompilerInput, CompilerOutput, SolidityCompiler}; use revive_dt_config::Arguments; use revive_dt_solc_binaries::download_solc; +use revive_solc_json_interface::SolcStandardJsonOutput; pub struct Solc { solc_path: PathBuf, @@ -21,12 +22,27 @@ impl SolidityCompiler for Solc { &self, input: CompilerInput, ) -> anyhow::Result> { - let mut child = Command::new(&self.solc_path) + let mut command = Command::new(&self.solc_path); + command .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::piped()) - .arg("--standard-json") - .spawn()?; + .arg("--standard-json"); + + if let Some(ref base_path) = input.base_path { + command.arg("--base-path").arg(base_path); + } + if !input.allow_paths.is_empty() { + command.arg("--allow-paths").arg( + input + .allow_paths + .iter() + .map(|path| path.display().to_string()) + .collect::>() + .join(","), + ); + } + let mut child = command.spawn()?; let stdin = child.stdin.as_mut().expect("should be piped"); serde_json::to_writer(stdin, &input.input)?; @@ -42,9 +58,26 @@ impl SolidityCompiler for Solc { }); } + let parsed = + serde_json::from_slice::(&output.stdout).map_err(|e| { + anyhow::anyhow!( + "failed to parse resolc JSON output: {e}\nstderr: {}", + String::from_utf8_lossy(&output.stdout) + ) + })?; + + // Detecting if the compiler output contained errors and reporting them through logs and + // errors instead of returning the compiler output that might contain errors. + for error in parsed.errors.iter().flatten() { + if error.severity == "error" { + tracing::error!(?error, ?input, "Encountered an error in the compilation"); + anyhow::bail!("Encountered an error in the compilation: {error}") + } + } + Ok(CompilerOutput { input, - output: serde_json::from_slice(&output.stdout)?, + output: parsed, error: None, }) } diff --git a/crates/core/src/driver/mod.rs b/crates/core/src/driver/mod.rs index 731bdfd..c725c03 100644 --- a/crates/core/src/driver/mod.rs +++ b/crates/core/src/driver/mod.rs @@ -69,14 +69,13 @@ where anyhow::bail!("unsupported solc version: {:?}", &mode.solc_version); }; - let mut compiler = Compiler::::new() - .base_path(metadata.directory()?.display().to_string()) + let compiler = Compiler::::new() + .allow_path(metadata.directory()?) .solc_optimizer(mode.solc_optimize()); - for (file, _contract) in metadata.contract_sources()?.values() { - tracing::debug!("contract source {}", file.display()); - compiler = compiler.with_source(file)?; - } + let compiler = FilesWithExtensionIterator::new(metadata.directory()?) + .with_allowed_extension("sol") + .try_fold(compiler, |compiler, path| compiler.with_source(&path))?; let mut task = CompilationTask { json_input: compiler.input(), @@ -184,12 +183,15 @@ where } pub fn deploy_contracts(&mut self, input: &Input, node: &T::Blockchain) -> anyhow::Result<()> { - tracing::debug!( - "Deploying contracts {}, having address {} on node: {}", - &input.instance, - &input.caller, - std::any::type_name::() + let tracing_span = tracing::debug_span!( + "Deploying contracts", + ?input, + node = std::any::type_name::() ); + let _guard = tracing_span.enter(); + + tracing::debug!(number_of_contracts_to_deploy = self.contracts.len()); + for output in self.contracts.values() { let Some(contract_map) = &output.contracts else { tracing::debug!( @@ -482,3 +484,77 @@ where Ok(()) } } + +/// An iterator that finds files of a certain extension in the provided directory. You can think of +/// this a glob pattern similar to: `${path}/**/*.md` +struct FilesWithExtensionIterator { + /// The set of allowed extensions that that match the requirement and that should be returned + /// when found. + allowed_extensions: std::collections::HashSet>, + + /// The set of directories to visit next. This iterator does BFS and so these directories will + /// only be visited if we can't find any files in our state. + directories_to_search: Vec, + + /// The set of files matching the allowed extensions that were found. If there are entries in + /// this vector then they will be returned when the [`Iterator::next`] method is called. If not + /// then we visit one of the next directories to visit. + /// + /// [`Iterator`]: std::iter::Iterator + files_matching_allowed_extensions: Vec, +} + +impl FilesWithExtensionIterator { + fn new(root_directory: std::path::PathBuf) -> Self { + Self { + allowed_extensions: Default::default(), + directories_to_search: vec![root_directory], + files_matching_allowed_extensions: Default::default(), + } + } + + fn with_allowed_extension( + mut self, + allowed_extension: impl Into>, + ) -> Self { + self.allowed_extensions.insert(allowed_extension.into()); + self + } +} + +impl Iterator for FilesWithExtensionIterator { + type Item = std::path::PathBuf; + + fn next(&mut self) -> Option { + if let Some(file_path) = self.files_matching_allowed_extensions.pop() { + return Some(file_path); + }; + + let directory_to_search = self.directories_to_search.pop()?; + + // Read all of the entries in the directory. If we failed to read this dir's entires then we + // elect to just ignore it and look in the next directory, we do that by calling the next + // method again on the iterator, which is an intentional decision that we made here instead + // of panicking. + let Ok(dir_entries) = std::fs::read_dir(directory_to_search) else { + return self.next(); + }; + + for entry in dir_entries.flatten() { + let entry_path = entry.path(); + if entry_path.is_dir() { + self.directories_to_search.push(entry_path) + } else if entry_path.is_file() + && entry_path.extension().is_some_and(|ext| { + self.allowed_extensions + .iter() + .any(|allowed| ext.eq_ignore_ascii_case(allowed.as_ref())) + }) + { + self.files_matching_allowed_extensions.push(entry_path) + } + } + + self.next() + } +} diff --git a/crates/format/src/input.rs b/crates/format/src/input.rs index df868c2..1cdefe4 100644 --- a/crates/format/src/input.rs +++ b/crates/format/src/input.rs @@ -93,9 +93,14 @@ impl Input { return Ok(Bytes::default()); // fallback or deployer — no input }; - let abi = deployed_abis - .get(&self.instance) - .ok_or_else(|| anyhow::anyhow!("ABI for instance '{}' not found", &self.instance))?; + let Some(abi) = deployed_abis.get(&self.instance) else { + tracing::error!( + contract_name = self.instance, + available_abis = ?deployed_abis.keys().collect::>(), + "Attempted to lookup ABI of contract but it wasn't found" + ); + anyhow::bail!("ABI for instance '{}' not found", &self.instance); + }; tracing::trace!("ABI found for instance: {}", &self.instance); diff --git a/crates/node-interaction/src/blocking_executor.rs b/crates/node-interaction/src/blocking_executor.rs index 53123b5..baba0c1 100644 --- a/crates/node-interaction/src/blocking_executor.rs +++ b/crates/node-interaction/src/blocking_executor.rs @@ -51,35 +51,37 @@ impl BlockingExecutor { where R: Send + 'static, { - // A static of the state associated with the async runtime. This is initialized on the first - // access of the state. + // Note: The blocking executor is a singleton and therefore we store its state in a static + // so that it's assigned only once. Additionally, when we set the state of the executor we + // spawn the thread where the async runtime runs. static STATE: Lazy = Lazy::new(|| { tracing::trace!("Initializing the BlockingExecutor state"); - // Creating a multiple-producer-single-consumer channel which allows all of the other - // threads to communicate with this one async runtime thread. + // All communication with the tokio runtime thread happens over mspc channels where the + // producers here are the threads that want to run async tasks and the consumer here is + // the tokio runtime thread. let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel::(); - // We spawn a new thread which will house the async runtime and will always be listening - // for new tasks coming in and executing them as they come in. thread::spawn(move || { - // Creating the tokio runtime on this current thread. let runtime = Builder::new_current_thread() .enable_all() .build() .expect("Failed to create the async runtime"); runtime.block_on(async move { - // Keep getting new task messages from all of the other threads. while let Some(TaskMessage { future: task, response_tx: response_channel, }) = rx.recv().await { - // Spawn off each job so that the receive loop is not blocked. tracing::trace!("Received a new future to execute"); tokio::spawn(async move { + // One of the things that the blocking executor does is that it allows + // us to catch panics if they occur. By wrapping the given future in an + // AssertUnwindSafe::catch_unwind we are able to catch all panic unwinds + // in the given future and convert them into errors. let task = AssertUnwindSafe(task).catch_unwind(); + let result = task.await; let _ = response_channel.send(result); }); @@ -87,31 +89,32 @@ impl BlockingExecutor { }) }); - // Creating the state of the async runtime. ExecutorState { tx } }); - // Creating a one-shot channel for this task that will be used to send and receive the - // response of the task. + // We need to perform blocking synchronous communication between the current thread and the + // tokio runtime thread with the result of the async computation and the oneshot channels + // from tokio allows us to do that. The sender side of the channel will be given to the + // tokio runtime thread to send the result when the computation is completed and the receive + // side of the channel will be kept with this thread to await for the response of the async + // task to come back. let (response_tx, response_rx) = oneshot::channel::, Box>>(); - // Converting the future from the shape that it is in into the shape that the runtime is - // expecting it to be in. + // The tokio runtime thread expects a Future> + Send to be + // sent to it to execute. However, this function has a typed Future + Send and + // therefore we need to change the type of the future to fit what the runtime thread expects + // in the task message. In doing this conversion, we lose some of the type information since + // we're converting R => dyn Any. However, we will perform down-casting on the result to + // convert it back into R. let future = Box::pin(async move { Box::new(future.await) as Box }); - // Sending the task to the runtime, - let task = TaskMessage { - future, - response_tx, - }; - + let task = TaskMessage::new(future, response_tx); if let Err(error) = STATE.tx.send(task) { tracing::error!(?error, "Failed to send the task to the blocking executor"); anyhow::bail!("Failed to send the task to the blocking executor: {error:?}") } - // Await for the result of the execution to come back over the channel. let result = match response_rx.blocking_recv() { Ok(result) => result, Err(error) => { @@ -163,6 +166,18 @@ struct TaskMessage { response_tx: oneshot::Sender, Box>>, } +impl TaskMessage { + pub fn new( + future: Pin> + Send>>, + response_tx: oneshot::Sender, Box>>, + ) -> Self { + Self { + future, + response_tx, + } + } +} + #[cfg(test)] mod test { use super::*; diff --git a/crates/node/src/kitchensink.rs b/crates/node/src/kitchensink.rs index 68644cd..995b8ca 100644 --- a/crates/node/src/kitchensink.rs +++ b/crates/node/src/kitchensink.rs @@ -1020,6 +1020,7 @@ mod tests { use alloy::rpc::types::TransactionRequest; use revive_dt_config::Arguments; use std::path::PathBuf; + use std::sync::LazyLock; use temp_dir::TempDir; use std::fs; @@ -1067,6 +1068,15 @@ mod tests { (node, args, temp_dir) } + /// A shared node that multiple tests can use. It starts up once. + fn shared_node() -> &'static KitchensinkNode { + static NODE: LazyLock<(KitchensinkNode, TempDir)> = LazyLock::new(|| { + let (node, _, temp_dir) = new_node(); + (node, temp_dir) + }); + &NODE.0 + } + #[tokio::test] async fn node_mines_simple_transfer_transaction_and_returns_receipt() { // Arrange @@ -1255,7 +1265,7 @@ mod tests { #[test] fn can_get_chain_id_from_node() { // Arrange - let (node, _args, _temp_dir) = new_node(); + let node = shared_node(); // Act let chain_id = node.chain_id(); @@ -1268,7 +1278,7 @@ mod tests { #[test] fn can_get_gas_limit_from_node() { // Arrange - let (node, _args, _temp_dir) = new_node(); + let node = shared_node(); // Act let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest); @@ -1280,7 +1290,7 @@ mod tests { #[test] fn can_get_coinbase_from_node() { // Arrange - let (node, _args, _temp_dir) = new_node(); + let node = shared_node(); // Act let coinbase = node.block_coinbase(BlockNumberOrTag::Latest); @@ -1293,7 +1303,7 @@ mod tests { #[test] fn can_get_block_difficulty_from_node() { // Arrange - let (node, _args, _temp_dir) = new_node(); + let node = shared_node(); // Act let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest); @@ -1306,7 +1316,7 @@ mod tests { #[test] fn can_get_block_hash_from_node() { // Arrange - let (node, _args, _temp_dir) = new_node(); + let node = shared_node(); // Act let block_hash = node.block_hash(BlockNumberOrTag::Latest); @@ -1318,7 +1328,7 @@ mod tests { #[test] fn can_get_block_timestamp_from_node() { // Arrange - let (node, _args, _temp_dir) = new_node(); + let node = shared_node(); // Act let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest); @@ -1330,7 +1340,7 @@ mod tests { #[test] fn can_get_block_number_from_node() { // Arrange - let (node, _args, _temp_dir) = new_node(); + let node = shared_node(); // Act let block_number = node.last_block_number();