Compare commits

...

1 Commits

Author SHA1 Message Date
Omar Abdulla 7f4fadf7b1 Add a cached fs abstraction 2025-08-14 13:12:23 +03:00
7 changed files with 83 additions and 41 deletions
@@ -0,0 +1,38 @@
//! Implements a cached file system that allows for files to be read once into memory and then when
//! they're requested to be read again they will be returned from the cache.
use std::{
collections::HashMap,
path::{Path, PathBuf},
sync::{Arc, LazyLock},
};
use anyhow::Result;
use tokio::sync::RwLock;
#[allow(clippy::type_complexity)]
static CACHE: LazyLock<Arc<RwLock<HashMap<PathBuf, Vec<u8>>>>> = LazyLock::new(Default::default);
pub struct CachedFileSystem;
impl CachedFileSystem {
pub async fn read(path: impl AsRef<Path>) -> Result<Vec<u8>> {
let cache_read_lock = CACHE.read().await;
match cache_read_lock.get(path.as_ref()) {
Some(entry) => Ok(entry.clone()),
None => {
drop(cache_read_lock);
let content = std::fs::read(&path)?;
let mut cache_write_lock = CACHE.write().await;
cache_write_lock.insert(path.as_ref().to_path_buf(), content.clone());
Ok(content)
}
}
}
pub async fn read_to_string(path: impl AsRef<Path>) -> Result<String> {
let content = Self::read(path).await?;
String::from_utf8(content).map_err(Into::into)
}
}
+2
View File
@@ -1,3 +1,5 @@
mod cached_file_system;
mod clear_dir; mod clear_dir;
pub use cached_file_system::*;
pub use clear_dir::*; pub use clear_dir::*;
+6 -6
View File
@@ -5,7 +5,6 @@
use std::{ use std::{
collections::HashMap, collections::HashMap,
fs::read_to_string,
hash::Hash, hash::Hash,
path::{Path, PathBuf}, path::{Path, PathBuf},
}; };
@@ -16,7 +15,7 @@ use semver::Version;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use revive_common::EVMVersion; use revive_common::EVMVersion;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::{fs::CachedFileSystem, types::VersionOrRequirement};
use revive_dt_config::Arguments; use revive_dt_config::Arguments;
pub mod revive_js; pub mod revive_js;
@@ -123,10 +122,11 @@ where
self self
} }
pub fn with_source(mut self, path: impl AsRef<Path>) -> anyhow::Result<Self> { pub async fn with_source(mut self, path: impl AsRef<Path>) -> anyhow::Result<Self> {
self.input self.input.sources.insert(
.sources path.as_ref().to_path_buf(),
.insert(path.as_ref().to_path_buf(), read_to_string(path.as_ref())?); CachedFileSystem::read_to_string(path.as_ref()).await?,
);
Ok(self) Ok(self)
} }
+11 -11
View File
@@ -67,7 +67,7 @@ fn main() -> anyhow::Result<()> {
let args = init_cli()?; let args = init_cli()?;
let body = async { let body = async {
for (corpus, tests) in collect_corpora(&args)? { for (corpus, tests) in collect_corpora(&args).await? {
let span = Span::new(corpus, args.clone())?; let span = Span::new(corpus, args.clone())?;
match &args.compile_only { match &args.compile_only {
Some(platform) => compile_corpus(&args, &tests, platform, span).await, Some(platform) => compile_corpus(&args, &tests, platform, span).await,
@@ -117,13 +117,13 @@ fn init_cli() -> anyhow::Result<Arguments> {
Ok(args) Ok(args)
} }
fn collect_corpora(args: &Arguments) -> anyhow::Result<HashMap<Corpus, Vec<MetadataFile>>> { async fn collect_corpora(args: &Arguments) -> anyhow::Result<HashMap<Corpus, Vec<MetadataFile>>> {
let mut corpora = HashMap::new(); let mut corpora = HashMap::new();
for path in &args.corpus { for path in &args.corpus {
let corpus = Corpus::try_from_path(path)?; let corpus = Corpus::try_from_path(path)?;
tracing::info!("found corpus: {}", path.display()); tracing::info!("found corpus: {}", path.display());
let tests = corpus.enumerate_tests(); let tests = corpus.enumerate_tests().await;
tracing::info!("corpus '{}' contains {} tests", &corpus.name, tests.len()); tracing::info!("corpus '{}' contains {} tests", &corpus.name, tests.len());
corpora.insert(corpus, tests); corpora.insert(corpus, tests);
} }
@@ -145,7 +145,7 @@ where
let (report_tx, report_rx) = mpsc::unbounded_channel::<(Test, CaseResult)>(); let (report_tx, report_rx) = mpsc::unbounded_channel::<(Test, CaseResult)>();
let tests = prepare_tests::<L, F>(metadata_files); let tests = prepare_tests::<L, F>(metadata_files);
let driver_task = start_driver_task::<L, F>(args, tests, span, report_tx)?; let driver_task = start_driver_task::<L, F>(args, tests, span, report_tx).await?;
let status_reporter_task = start_reporter_task(report_rx); let status_reporter_task = start_reporter_task(report_rx);
tokio::join!(status_reporter_task, driver_task); tokio::join!(status_reporter_task, driver_task);
@@ -237,7 +237,7 @@ where
}) })
} }
fn start_driver_task<L, F>( async fn start_driver_task<L, F>(
args: &Arguments, args: &Arguments,
tests: impl Iterator<Item = Test>, tests: impl Iterator<Item = Test>,
span: Span, span: Span,
@@ -249,8 +249,8 @@ where
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
{ {
let leader_nodes = Arc::new(NodePool::<L::Blockchain>::new(args)?); let leader_nodes = Arc::new(NodePool::<L::Blockchain>::new(args).await?);
let follower_nodes = Arc::new(NodePool::<F::Blockchain>::new(args)?); let follower_nodes = Arc::new(NodePool::<F::Blockchain>::new(args).await?);
let compilation_cache = Arc::new(RwLock::new(HashMap::new())); let compilation_cache = Arc::new(RwLock::new(HashMap::new()));
let number_concurrent_tasks = args.number_of_concurrent_tasks(); let number_concurrent_tasks = args.number_of_concurrent_tasks();
@@ -693,12 +693,12 @@ async fn compile_contracts<P: Platform>(
"Compiling contracts" "Compiling contracts"
); );
let compiler = Compiler::<P::Compiler>::new() let mut compiler = Compiler::<P::Compiler>::new()
.with_allow_path(metadata.directory()?) .with_allow_path(metadata.directory()?)
.with_optimization(mode.solc_optimize()); .with_optimization(mode.solc_optimize());
let mut compiler = metadata for path in metadata.files_to_compile()? {
.files_to_compile()? compiler = compiler.with_source(path).await?;
.try_fold(compiler, |compiler, path| compiler.with_source(&path))?; }
for (library_instance, (library_address, _)) in deployed_libraries.iter() { for (library_instance, (library_address, _)) in deployed_libraries.iter() {
let library_ident = &metadata let library_ident = &metadata
.contracts .contracts
+6 -6
View File
@@ -39,9 +39,9 @@ impl Corpus {
} }
/// Scan the corpus base directory and return all tests found. /// Scan the corpus base directory and return all tests found.
pub fn enumerate_tests(&self) -> Vec<MetadataFile> { pub async fn enumerate_tests(&self) -> Vec<MetadataFile> {
let mut tests = Vec::new(); let mut tests = Vec::new();
collect_metadata(&self.path, &mut tests); collect_metadata(&self.path, &mut tests).await;
tests tests
} }
} }
@@ -52,7 +52,7 @@ impl Corpus {
/// Found tests are inserted into `tests`. /// Found tests are inserted into `tests`.
/// ///
/// `path` is expected to be a directory. /// `path` is expected to be a directory.
pub fn collect_metadata(path: &Path, tests: &mut Vec<MetadataFile>) { pub async fn collect_metadata(path: &Path, tests: &mut Vec<MetadataFile>) {
if path.is_dir() { if path.is_dir() {
let dir_entry = match std::fs::read_dir(path) { let dir_entry = match std::fs::read_dir(path) {
Ok(dir_entry) => dir_entry, Ok(dir_entry) => dir_entry,
@@ -73,12 +73,12 @@ pub fn collect_metadata(path: &Path, tests: &mut Vec<MetadataFile>) {
let path = entry.path(); let path = entry.path();
if path.is_dir() { if path.is_dir() {
collect_metadata(&path, tests); Box::pin(collect_metadata(&path, tests)).await;
continue; continue;
} }
if path.is_file() { if path.is_file() {
if let Some(metadata) = MetadataFile::try_from_file(&path) { if let Some(metadata) = MetadataFile::try_from_file(&path).await {
tests.push(metadata) tests.push(metadata)
} }
} }
@@ -89,7 +89,7 @@ pub fn collect_metadata(path: &Path, tests: &mut Vec<MetadataFile>) {
return; return;
}; };
if extension.eq_ignore_ascii_case("sol") || extension.eq_ignore_ascii_case("json") { if extension.eq_ignore_ascii_case("sol") || extension.eq_ignore_ascii_case("json") {
if let Some(metadata) = MetadataFile::try_from_file(path) { if let Some(metadata) = MetadataFile::try_from_file(path).await {
tests.push(metadata) tests.push(metadata)
} }
} else { } else {
+15 -12
View File
@@ -2,7 +2,6 @@ use std::{
cmp::Ordering, cmp::Ordering,
collections::BTreeMap, collections::BTreeMap,
fmt::Display, fmt::Display,
fs::{File, read_to_string},
ops::Deref, ops::Deref,
path::{Path, PathBuf}, path::{Path, PathBuf},
str::FromStr, str::FromStr,
@@ -11,7 +10,9 @@ use std::{
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use revive_common::EVMVersion; use revive_common::EVMVersion;
use revive_dt_common::{iterators::FilesWithExtensionIterator, macros::define_wrapper_type}; use revive_dt_common::{
fs::CachedFileSystem, iterators::FilesWithExtensionIterator, macros::define_wrapper_type,
};
use crate::{ use crate::{
case::Case, case::Case,
@@ -29,8 +30,8 @@ pub struct MetadataFile {
} }
impl MetadataFile { impl MetadataFile {
pub fn try_from_file(path: &Path) -> Option<Self> { pub async fn try_from_file(path: &Path) -> Option<Self> {
Metadata::try_from_file(path).map(|metadata| Self { Metadata::try_from_file(path).await.map(|metadata| Self {
path: path.to_owned(), path: path.to_owned(),
content: metadata, content: metadata,
}) })
@@ -151,7 +152,7 @@ impl Metadata {
/// ///
/// # Panics /// # Panics
/// Expects the supplied `path` to be a file. /// Expects the supplied `path` to be a file.
pub fn try_from_file(path: &Path) -> Option<Self> { pub async fn try_from_file(path: &Path) -> Option<Self> {
assert!(path.is_file(), "not a file: {}", path.display()); assert!(path.is_file(), "not a file: {}", path.display());
let Some(file_extension) = path.extension() else { let Some(file_extension) = path.extension() else {
@@ -160,19 +161,20 @@ impl Metadata {
}; };
if file_extension == METADATA_FILE_EXTENSION { if file_extension == METADATA_FILE_EXTENSION {
return Self::try_from_json(path); return Self::try_from_json(path).await;
} }
if file_extension == SOLIDITY_CASE_FILE_EXTENSION { if file_extension == SOLIDITY_CASE_FILE_EXTENSION {
return Self::try_from_solidity(path); return Self::try_from_solidity(path).await;
} }
tracing::debug!("ignoring invalid corpus file: {}", path.display()); tracing::debug!("ignoring invalid corpus file: {}", path.display());
None None
} }
fn try_from_json(path: &Path) -> Option<Self> { async fn try_from_json(path: &Path) -> Option<Self> {
let file = File::open(path) let content = CachedFileSystem::read(path)
.await
.inspect_err(|error| { .inspect_err(|error| {
tracing::error!( tracing::error!(
"opening JSON test metadata file '{}' error: {error}", "opening JSON test metadata file '{}' error: {error}",
@@ -181,7 +183,7 @@ impl Metadata {
}) })
.ok()?; .ok()?;
match serde_json::from_reader::<_, Metadata>(file) { match serde_json::from_slice::<Metadata>(content.as_slice()) {
Ok(mut metadata) => { Ok(mut metadata) => {
metadata.file_path = Some(path.to_path_buf()); metadata.file_path = Some(path.to_path_buf());
Some(metadata) Some(metadata)
@@ -196,8 +198,9 @@ impl Metadata {
} }
} }
fn try_from_solidity(path: &Path) -> Option<Self> { async fn try_from_solidity(path: &Path) -> Option<Self> {
let spec = read_to_string(path) let spec = CachedFileSystem::read_to_string(path)
.await
.inspect_err(|error| { .inspect_err(|error| {
tracing::error!( tracing::error!(
"opening JSON test metadata file '{}' error: {error}", "opening JSON test metadata file '{}' error: {error}",
+5 -6
View File
@@ -1,12 +1,12 @@
//! This crate implements concurrent handling of testing node. //! This crate implements concurrent handling of testing node.
use std::{ use std::{
fs::read_to_string,
sync::atomic::{AtomicUsize, Ordering}, sync::atomic::{AtomicUsize, Ordering},
thread, thread,
}; };
use anyhow::Context; use anyhow::Context;
use revive_dt_common::fs::CachedFileSystem;
use revive_dt_config::Arguments; use revive_dt_config::Arguments;
use crate::Node; use crate::Node;
@@ -23,12 +23,11 @@ where
T: Node + Send + 'static, T: Node + Send + 'static,
{ {
/// Create a new Pool. This will start as many nodes as there are workers in `config`. /// Create a new Pool. This will start as many nodes as there are workers in `config`.
pub fn new(config: &Arguments) -> anyhow::Result<Self> { pub async fn new(config: &Arguments) -> anyhow::Result<Self> {
let nodes = config.number_of_nodes; let nodes = config.number_of_nodes;
let genesis = read_to_string(&config.genesis_file).context(format!( let genesis = CachedFileSystem::read_to_string(&config.genesis_file)
"can not read genesis file: {}", .await
config.genesis_file.display() .context("Failed to read genesis file")?;
))?;
let mut handles = Vec::with_capacity(nodes); let mut handles = Vec::with_capacity(nodes);
for _ in 0..nodes { for _ in 0..nodes {