Compare commits

..

3 Commits

Author SHA1 Message Date
Omar Abdulla 49cbc51546 Generate schema for the metadata file 2025-09-08 17:09:35 +03:00
Omar c2526e48e7 Refactor the Global Configuration & Context (#157)
* Cleanup the config

* Update usage guides

* Update the run script

* Fix tests

* Use kitchensink in tests

* Use shared node more often in tests
2025-09-04 14:25:05 +00:00
Omar 7878f68c26 Better Compiler Interface & Shared Compiler Objects (#156)
* Add leader and follower node assignment to test

* Update the compilers interface

* Fix Cargo machete

* Add reporting back to the compilers

* Remove the static testing target from the report

* Uncomment instrument macro

* Switch to a for loop when reporting cases

* Update compilers to use interior caching

* Update tests stream func

* Fix tests
2025-08-28 15:03:45 +00:00
28 changed files with 1511 additions and 472 deletions
+3 -1
View File
@@ -10,4 +10,6 @@ node_modules
profile.json.gz profile.json.gz
resolc-compiler-tests resolc-compiler-tests
workdir workdir
!/schema.json
Generated
+32 -3
View File
@@ -4501,9 +4501,12 @@ name = "revive-dt-config"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"alloy", "alloy",
"anyhow",
"clap", "clap",
"semver 1.0.26", "semver 1.0.26",
"serde", "serde",
"serde_json",
"strum",
"temp-dir", "temp-dir",
] ]
@@ -4525,10 +4528,10 @@ dependencies = [
"revive-dt-node", "revive-dt-node",
"revive-dt-node-interaction", "revive-dt-node-interaction",
"revive-dt-report", "revive-dt-report",
"schemars 1.0.4",
"semver 1.0.26", "semver 1.0.26",
"serde", "serde",
"serde_json", "serde_json",
"temp-dir",
"tokio", "tokio",
"tracing", "tracing",
"tracing-appender", "tracing-appender",
@@ -4547,6 +4550,7 @@ dependencies = [
"regex", "regex",
"revive-common", "revive-common",
"revive-dt-common", "revive-dt-common",
"schemars 1.0.4",
"semver 1.0.26", "semver 1.0.26",
"serde", "serde",
"serde_json", "serde_json",
@@ -4870,10 +4874,24 @@ checksum = "82d20c4491bc164fa2f6c5d44565947a52ad80b9505d8e36f8d54c27c739fcd0"
dependencies = [ dependencies = [
"dyn-clone", "dyn-clone",
"ref-cast", "ref-cast",
"schemars_derive",
"semver 1.0.26",
"serde", "serde",
"serde_json", "serde_json",
] ]
[[package]]
name = "schemars_derive"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33d020396d1d138dc19f1165df7545479dcd58d93810dc5d646a16e55abefa80"
dependencies = [
"proc-macro2",
"quote",
"serde_derive_internals",
"syn 2.0.101",
]
[[package]] [[package]]
name = "schnellru" name = "schnellru"
version = "0.2.4" version = "0.2.4"
@@ -5058,6 +5076,17 @@ dependencies = [
"syn 2.0.101", "syn 2.0.101",
] ]
[[package]]
name = "serde_derive_internals"
version = "0.29.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.101",
]
[[package]] [[package]]
name = "serde_json" name = "serde_json"
version = "1.0.140" version = "1.0.140"
@@ -5690,9 +5719,9 @@ checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
[[package]] [[package]]
name = "strum" name = "strum"
version = "0.27.1" version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f64def088c51c9510a8579e3c5d67c65349dcf755e5479ad3d010aa6454e2c32" checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf"
dependencies = [ dependencies = [
"strum_macros", "strum_macros",
] ]
+2
View File
@@ -37,6 +37,7 @@ moka = "0.12.10"
paste = "1.0.15" paste = "1.0.15"
reqwest = { version = "0.12.15", features = ["json"] } reqwest = { version = "0.12.15", features = ["json"] }
once_cell = "1.21" once_cell = "1.21"
schemars = { version = "1.0.4", features = ["semver1"] }
semver = { version = "1.0", features = ["serde"] } semver = { version = "1.0", features = ["serde"] }
serde = { version = "1.0", default-features = false, features = ["derive"] } serde = { version = "1.0", default-features = false, features = ["derive"] }
serde_json = { version = "1.0", default-features = false, features = [ serde_json = { version = "1.0", default-features = false, features = [
@@ -48,6 +49,7 @@ serde_with = { version = "3.14.0" }
sha2 = { version = "0.10.9" } sha2 = { version = "0.10.9" }
sp-core = "36.1.0" sp-core = "36.1.0"
sp-runtime = "41.1.0" sp-runtime = "41.1.0"
strum = { version = "0.27.2", features = ["derive"] }
temp-dir = { version = "0.1.16" } temp-dir = { version = "0.1.16" }
tempfile = "3.3" tempfile = "3.3"
thiserror = "2" thiserror = "2"
+4 -3
View File
@@ -187,10 +187,11 @@ The above corpus file instructs the tool to look for all of the test cases conta
The simplest command to run this tool is the following: The simplest command to run this tool is the following:
```bash ```bash
RUST_LOG="info" cargo run --release -- \ RUST_LOG="info" cargo run --release -- execute-tests \
--follower geth \
--corpus path_to_your_corpus_file.json \ --corpus path_to_your_corpus_file.json \
--workdir path_to_a_temporary_directory_to_cache_things_in \ --working-directory path_to_a_temporary_directory_to_cache_things_in \
--number-of-nodes 5 \ --concurrency.number-of-nodes 5 \
> logs.log \ > logs.log \
2> output.log 2> output.log
``` ```
+6 -4
View File
@@ -11,14 +11,14 @@ use std::{
use alloy::json_abi::JsonAbi; use alloy::json_abi::JsonAbi;
use alloy_primitives::Address; use alloy_primitives::Address;
use anyhow::{Context, Result}; use anyhow::{Context as _, Result};
use semver::Version; use semver::Version;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use revive_common::EVMVersion; use revive_common::EVMVersion;
use revive_dt_common::cached_fs::read_to_string; use revive_dt_common::cached_fs::read_to_string;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::Arguments; use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
// Re-export this as it's a part of the compiler interface. // Re-export this as it's a part of the compiler interface.
pub use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline}; pub use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
@@ -31,11 +31,13 @@ pub mod solc;
pub trait SolidityCompiler: Sized { pub trait SolidityCompiler: Sized {
/// Instantiates a new compiler object. /// Instantiates a new compiler object.
/// ///
/// Based on the given [`Arguments`] and [`VersionOrRequirement`] this function instantiates a /// Based on the given [`Context`] and [`VersionOrRequirement`] this function instantiates a
/// new compiler object. Certain implementations of this trait might choose to cache cache the /// new compiler object. Certain implementations of this trait might choose to cache cache the
/// compiler objects and return the same ones over and over again. /// compiler objects and return the same ones over and over again.
fn new( fn new(
config: &Arguments, context: impl AsRef<SolcConfiguration>
+ AsRef<ResolcConfiguration>
+ AsRef<WorkingDirectoryConfiguration>,
version: impl Into<Option<VersionOrRequirement>>, version: impl Into<Option<VersionOrRequirement>>,
) -> impl Future<Output = Result<Self>>; ) -> impl Future<Output = Result<Self>>;
+9 -5
View File
@@ -9,7 +9,7 @@ use std::{
use dashmap::DashMap; use dashmap::DashMap;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::Arguments; use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
use revive_solc_json_interface::{ use revive_solc_json_interface::{
SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings, SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection, SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection,
@@ -21,7 +21,7 @@ use crate::{
}; };
use alloy::json_abi::JsonAbi; use alloy::json_abi::JsonAbi;
use anyhow::{Context, Result}; use anyhow::{Context as _, Result};
use semver::Version; use semver::Version;
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand}; use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
@@ -39,7 +39,9 @@ struct ResolcInner {
impl SolidityCompiler for Resolc { impl SolidityCompiler for Resolc {
async fn new( async fn new(
config: &Arguments, context: impl AsRef<SolcConfiguration>
+ AsRef<ResolcConfiguration>
+ AsRef<WorkingDirectoryConfiguration>,
version: impl Into<Option<VersionOrRequirement>>, version: impl Into<Option<VersionOrRequirement>>,
) -> Result<Self> { ) -> Result<Self> {
/// This is a cache of all of the resolc compiler objects. Since we do not currently support /// This is a cache of all of the resolc compiler objects. Since we do not currently support
@@ -47,7 +49,9 @@ impl SolidityCompiler for Resolc {
/// its version to the resolc compiler. /// its version to the resolc compiler.
static COMPILERS_CACHE: LazyLock<DashMap<Solc, Resolc>> = LazyLock::new(Default::default); static COMPILERS_CACHE: LazyLock<DashMap<Solc, Resolc>> = LazyLock::new(Default::default);
let solc = Solc::new(config, version) let resolc_configuration = AsRef::<ResolcConfiguration>::as_ref(&context);
let solc = Solc::new(&context, version)
.await .await
.context("Failed to create the solc compiler frontend for resolc")?; .context("Failed to create the solc compiler frontend for resolc")?;
@@ -56,7 +60,7 @@ impl SolidityCompiler for Resolc {
.or_insert_with(|| { .or_insert_with(|| {
Self(Arc::new(ResolcInner { Self(Arc::new(ResolcInner {
solc, solc,
resolc_path: config.resolc.clone(), resolc_path: resolc_configuration.path.clone(),
})) }))
}) })
.clone()) .clone())
+19 -9
View File
@@ -9,12 +9,12 @@ use std::{
use dashmap::DashMap; use dashmap::DashMap;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::Arguments; use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
use revive_dt_solc_binaries::download_solc; use revive_dt_solc_binaries::download_solc;
use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler}; use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
use anyhow::{Context, Result}; use anyhow::{Context as _, Result};
use foundry_compilers_artifacts::{ use foundry_compilers_artifacts::{
output_selection::{ output_selection::{
BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection, BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection,
@@ -38,25 +38,35 @@ struct SolcInner {
impl SolidityCompiler for Solc { impl SolidityCompiler for Solc {
async fn new( async fn new(
config: &Arguments, context: impl AsRef<SolcConfiguration>
+ AsRef<ResolcConfiguration>
+ AsRef<WorkingDirectoryConfiguration>,
version: impl Into<Option<VersionOrRequirement>>, version: impl Into<Option<VersionOrRequirement>>,
) -> Result<Self> { ) -> Result<Self> {
// This is a cache for the compiler objects so that whenever the same compiler version is // This is a cache for the compiler objects so that whenever the same compiler version is
// requested the same object is returned. We do this as we do not want to keep cloning the // requested the same object is returned. We do this as we do not want to keep cloning the
// compiler around. // compiler around.
static COMPILERS_CACHE: LazyLock<DashMap<Version, Solc>> = LazyLock::new(Default::default); static COMPILERS_CACHE: LazyLock<DashMap<(PathBuf, Version), Solc>> =
LazyLock::new(Default::default);
let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
let solc_configuration = AsRef::<SolcConfiguration>::as_ref(&context);
// We attempt to download the solc binary. Note the following: this call does the version // We attempt to download the solc binary. Note the following: this call does the version
// resolution for us. Therefore, even if the download didn't proceed, this function will // resolution for us. Therefore, even if the download didn't proceed, this function will
// resolve the version requirement into a canonical version of the compiler. It's then up // resolve the version requirement into a canonical version of the compiler. It's then up
// to us to either use the provided path or not. // to us to either use the provided path or not.
let version = version.into().unwrap_or_else(|| config.solc.clone().into()); let version = version
let (version, path) = download_solc(config.directory(), version, false) .into()
.await .unwrap_or_else(|| solc_configuration.version.clone().into());
.context("Failed to download/get path to solc binary")?; let (version, path) =
download_solc(working_directory_configuration.as_path(), version, false)
.await
.context("Failed to download/get path to solc binary")?;
Ok(COMPILERS_CACHE Ok(COMPILERS_CACHE
.entry(version.clone()) .entry((path.clone(), version.clone()))
.or_insert_with(|| { .or_insert_with(|| {
Self(Arc::new(SolcInner { Self(Arc::new(SolcInner {
solc_path: path, solc_path: path,
+3 -3
View File
@@ -2,13 +2,13 @@ use std::path::PathBuf;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
use revive_dt_compiler::{Compiler, SolidityCompiler, revive_resolc::Resolc, solc::Solc}; use revive_dt_compiler::{Compiler, SolidityCompiler, revive_resolc::Resolc, solc::Solc};
use revive_dt_config::Arguments; use revive_dt_config::ExecutionContext;
use semver::Version; use semver::Version;
#[tokio::test] #[tokio::test]
async fn contracts_can_be_compiled_with_solc() { async fn contracts_can_be_compiled_with_solc() {
// Arrange // Arrange
let args = Arguments::default(); let args = ExecutionContext::default();
let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30))) let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
.await .await
.unwrap(); .unwrap();
@@ -49,7 +49,7 @@ async fn contracts_can_be_compiled_with_solc() {
#[tokio::test] #[tokio::test]
async fn contracts_can_be_compiled_with_resolc() { async fn contracts_can_be_compiled_with_resolc() {
// Arrange // Arrange
let args = Arguments::default(); let args = ExecutionContext::default();
let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30))) let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
.await .await
.unwrap(); .unwrap();
+3
View File
@@ -10,10 +10,13 @@ rust-version.workspace = true
[dependencies] [dependencies]
alloy = { workspace = true } alloy = { workspace = true }
anyhow = { workspace = true }
clap = { workspace = true } clap = { workspace = true }
semver = { workspace = true } semver = { workspace = true }
temp-dir = { workspace = true } temp-dir = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
serde_json = { workspace = true }
strum = { workspace = true }
[lints] [lints]
workspace = true workspace = true
+485 -135
View File
@@ -2,215 +2,565 @@
use std::{ use std::{
fmt::Display, fmt::Display,
fs::read_to_string,
ops::Deref,
path::{Path, PathBuf}, path::{Path, PathBuf},
sync::LazyLock, str::FromStr,
sync::{Arc, LazyLock, OnceLock},
time::Duration,
}; };
use alloy::{network::EthereumWallet, signers::local::PrivateKeySigner}; use alloy::{
use clap::{Parser, ValueEnum}; genesis::Genesis,
hex::ToHexExt,
network::EthereumWallet,
primitives::{FixedBytes, U256},
signers::local::PrivateKeySigner,
};
use clap::{Parser, ValueEnum, ValueHint};
use semver::Version; use semver::Version;
use serde::{Deserialize, Serialize}; use serde::{Serialize, Serializer};
use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
use temp_dir::TempDir; use temp_dir::TempDir;
#[derive(Debug, Parser, Clone, Serialize, Deserialize)] #[derive(Clone, Debug, Parser, Serialize)]
#[command(name = "retester")] #[command(name = "retester")]
pub struct Arguments { pub enum Context {
/// The `solc` version to use if the test didn't specify it explicitly. /// Executes tests in the MatterLabs format differentially against a leader and a follower.
#[arg(long = "solc", short, default_value = "0.8.29")] ExecuteTests(Box<ExecutionContext>),
pub solc: Version, /// Exports the JSON schema of the MatterLabs test format used by the tool.
ExportJsonSchema,
}
/// Use the Wasm compiler versions. impl Context {
#[arg(long = "wasm")] pub fn working_directory_configuration(&self) -> &WorkingDirectoryConfiguration {
pub wasm: bool, self.as_ref()
}
/// The path to the `resolc` executable to be tested. pub fn report_configuration(&self) -> &ReportConfiguration {
self.as_ref()
}
}
impl AsRef<WorkingDirectoryConfiguration> for Context {
fn as_ref(&self) -> &WorkingDirectoryConfiguration {
match self {
Context::ExecuteTests(execution_context) => &execution_context.working_directory,
Context::ExportJsonSchema => unreachable!(),
}
}
}
impl AsRef<ReportConfiguration> for Context {
fn as_ref(&self) -> &ReportConfiguration {
match self {
Context::ExecuteTests(execution_context) => &execution_context.report_configuration,
Context::ExportJsonSchema => unreachable!(),
}
}
}
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ExecutionContext {
/// The working directory that the program will use for all of the temporary artifacts needed at
/// runtime.
/// ///
/// By default it uses the `resolc` binary found in `$PATH`. /// If not specified, then a temporary directory will be created and used by the program for all
/// /// temporary artifacts.
/// If `--wasm` is set, this should point to the resolc Wasm ile. #[clap(
#[arg(long = "resolc", short, default_value = "resolc")] short,
pub resolc: PathBuf, long,
default_value = "",
value_hint = ValueHint::DirPath,
)]
pub working_directory: WorkingDirectoryConfiguration,
/// The differential testing leader node implementation.
#[arg(short, long = "leader", default_value_t = TestingPlatform::Geth)]
pub leader: TestingPlatform,
/// The differential testing follower node implementation.
#[arg(short, long = "follower", default_value_t = TestingPlatform::Kitchensink)]
pub follower: TestingPlatform,
/// A list of test corpus JSON files to be tested. /// A list of test corpus JSON files to be tested.
#[arg(long = "corpus", short)] #[arg(long = "corpus", short)]
pub corpus: Vec<PathBuf>, pub corpus: Vec<PathBuf>,
/// A place to store temporary artifacts during test execution. /// Configuration parameters for the solc compiler.
/// #[clap(flatten, next_help_heading = "Solc Configuration")]
/// Creates a temporary dir if not specified. pub solc_configuration: SolcConfiguration,
#[arg(long = "workdir", short)]
pub working_directory: Option<PathBuf>,
/// Add a tempdir manually if `working_directory` was not given. /// Configuration parameters for the resolc compiler.
#[clap(flatten, next_help_heading = "Resolc Configuration")]
pub resolc_configuration: ResolcConfiguration,
/// Configuration parameters for the geth node.
#[clap(flatten, next_help_heading = "Geth Configuration")]
pub geth_configuration: GethConfiguration,
/// Configuration parameters for the Kitchensink.
#[clap(flatten, next_help_heading = "Kitchensink Configuration")]
pub kitchensink_configuration: KitchensinkConfiguration,
/// Configuration parameters for the Revive Dev Node.
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
/// Configuration parameters for the Eth Rpc.
#[clap(flatten, next_help_heading = "Eth RPC Configuration")]
pub eth_rpc_configuration: EthRpcConfiguration,
/// Configuration parameters for the genesis.
#[clap(flatten, next_help_heading = "Genesis Configuration")]
pub genesis_configuration: GenesisConfiguration,
/// Configuration parameters for the wallet.
#[clap(flatten, next_help_heading = "Wallet Configuration")]
pub wallet_configuration: WalletConfiguration,
/// Configuration parameters for concurrency.
#[clap(flatten, next_help_heading = "Concurrency Configuration")]
pub concurrency_configuration: ConcurrencyConfiguration,
/// Configuration parameters for the compilers and compilation.
#[clap(flatten, next_help_heading = "Compilation Configuration")]
pub compilation_configuration: CompilationConfiguration,
/// Configuration parameters for the report.
#[clap(flatten, next_help_heading = "Report Configuration")]
pub report_configuration: ReportConfiguration,
}
impl Default for ExecutionContext {
fn default() -> Self {
Self::parse_from(["execution-context"])
}
}
impl AsRef<WorkingDirectoryConfiguration> for ExecutionContext {
fn as_ref(&self) -> &WorkingDirectoryConfiguration {
&self.working_directory
}
}
impl AsRef<SolcConfiguration> for ExecutionContext {
fn as_ref(&self) -> &SolcConfiguration {
&self.solc_configuration
}
}
impl AsRef<ResolcConfiguration> for ExecutionContext {
fn as_ref(&self) -> &ResolcConfiguration {
&self.resolc_configuration
}
}
impl AsRef<GethConfiguration> for ExecutionContext {
fn as_ref(&self) -> &GethConfiguration {
&self.geth_configuration
}
}
impl AsRef<KitchensinkConfiguration> for ExecutionContext {
fn as_ref(&self) -> &KitchensinkConfiguration {
&self.kitchensink_configuration
}
}
impl AsRef<ReviveDevNodeConfiguration> for ExecutionContext {
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
&self.revive_dev_node_configuration
}
}
impl AsRef<EthRpcConfiguration> for ExecutionContext {
fn as_ref(&self) -> &EthRpcConfiguration {
&self.eth_rpc_configuration
}
}
impl AsRef<GenesisConfiguration> for ExecutionContext {
fn as_ref(&self) -> &GenesisConfiguration {
&self.genesis_configuration
}
}
impl AsRef<WalletConfiguration> for ExecutionContext {
fn as_ref(&self) -> &WalletConfiguration {
&self.wallet_configuration
}
}
impl AsRef<ConcurrencyConfiguration> for ExecutionContext {
fn as_ref(&self) -> &ConcurrencyConfiguration {
&self.concurrency_configuration
}
}
impl AsRef<CompilationConfiguration> for ExecutionContext {
fn as_ref(&self) -> &CompilationConfiguration {
&self.compilation_configuration
}
}
impl AsRef<ReportConfiguration> for ExecutionContext {
fn as_ref(&self) -> &ReportConfiguration {
&self.report_configuration
}
}
/// A set of configuration parameters for Solc.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct SolcConfiguration {
/// Specifies the default version of the Solc compiler that should be used if there is no
/// override specified by one of the test cases.
#[clap(long = "solc.version", default_value = "0.8.29")]
pub version: Version,
}
/// A set of configuration parameters for Resolc.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ResolcConfiguration {
/// Specifies the path of the resolc compiler to be used by the tool.
/// ///
/// We attach it here because [TempDir] prunes itself on drop. /// If this is not specified, then the tool assumes that it should use the resolc binary that's
/// provided in the user's $PATH.
#[clap(id = "resolc.path", long = "resolc.path", default_value = "resolc")]
pub path: PathBuf,
}
/// A set of configuration parameters for Geth.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct GethConfiguration {
/// Specifies the path of the geth node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the geth binary that's
/// provided in the user's $PATH.
#[clap(id = "geth.path", long = "geth.path", default_value = "geth")]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "geth.start-timeout-ms",
long = "geth.start-timeout-ms",
default_value = "5000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
}
/// A set of configuration parameters for Kitchensink.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct KitchensinkConfiguration {
/// Specifies the path of the kitchensink node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the kitchensink binary
/// that's provided in the user's $PATH.
#[clap(
id = "kitchensink.path",
long = "kitchensink.path",
default_value = "substrate-node"
)]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "kitchensink.start-timeout-ms",
long = "kitchensink.start-timeout-ms",
default_value = "5000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
/// This configures the tool to use Kitchensink instead of using the revive-dev-node.
#[clap(long = "kitchensink.dont-use-dev-node")]
pub use_kitchensink: bool,
}
/// A set of configuration parameters for the revive dev node.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ReviveDevNodeConfiguration {
/// Specifies the path of the revive dev node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the revive dev node binary
/// that's provided in the user's $PATH.
#[clap(
id = "revive-dev-node.path",
long = "revive-dev-node.path",
default_value = "revive-dev-node"
)]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "revive-dev-node.start-timeout-ms",
long = "revive-dev-node.start-timeout-ms",
default_value = "5000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
}
/// A set of configuration parameters for the ETH RPC.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct EthRpcConfiguration {
/// Specifies the path of the ETH RPC to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the ETH RPC binary
/// that's provided in the user's $PATH.
#[clap(id = "eth-rpc.path", long = "eth-rpc.path", default_value = "eth-rpc")]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "eth-rpc.start-timeout-ms",
long = "eth-rpc.start-timeout-ms",
default_value = "5000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
}
/// A set of configuration parameters for the genesis.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct GenesisConfiguration {
/// Specifies the path of the genesis file to use for the nodes that are started.
///
/// This is expected to be the path of a JSON geth genesis file.
#[clap(id = "genesis.path", long = "genesis.path")]
path: Option<PathBuf>,
/// The genesis object found at the provided path.
#[clap(skip)] #[clap(skip)]
#[serde(skip)] #[serde(skip)]
pub temp_dir: Option<&'static TempDir>, genesis: OnceLock<Genesis>,
}
/// The path to the `geth` executable. impl GenesisConfiguration {
/// pub fn genesis(&self) -> anyhow::Result<&Genesis> {
/// By default it uses `geth` binary found in `$PATH`. static DEFAULT_GENESIS: LazyLock<Genesis> = LazyLock::new(|| {
#[arg(short, long = "geth", default_value = "geth")] let genesis = include_str!("../../../genesis.json");
pub geth: PathBuf, serde_json::from_str(genesis).unwrap()
});
/// The maximum time in milliseconds to wait for geth to start. match self.genesis.get() {
#[arg(long = "geth-start-timeout", default_value = "5000")] Some(genesis) => Ok(genesis),
pub geth_start_timeout: u64, None => {
let genesis = match self.path.as_ref() {
Some(genesis_path) => {
let genesis_content = read_to_string(genesis_path)?;
serde_json::from_str(genesis_content.as_str())?
}
None => DEFAULT_GENESIS.clone(),
};
Ok(self.genesis.get_or_init(|| genesis))
}
}
}
}
/// Configure nodes according to this genesis.json file. /// A set of configuration parameters for the wallet.
#[arg(long = "genesis", default_value = "genesis.json")] #[derive(Clone, Debug, Parser, Serialize)]
pub genesis_file: PathBuf, pub struct WalletConfiguration {
/// The private key of the default signer.
/// The signing account private key. #[clap(
#[arg( long = "wallet.default-private-key",
short,
long = "account",
default_value = "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d" default_value = "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d"
)] )]
pub account: String, #[serde(serialize_with = "serialize_private_key")]
default_key: PrivateKeySigner,
/// This argument controls which private keys the nodes should have access to and be added to /// This argument controls which private keys the nodes should have access to and be added to
/// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set /// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set
/// of the node. /// of the node.
#[arg(long = "private-keys-count", default_value_t = 100_000)] #[clap(long = "wallet.additional-keys", default_value_t = 100_000)]
pub private_keys_to_add: usize, additional_keys: usize,
/// The differential testing leader node implementation. /// The wallet object that will be used.
#[arg(short, long = "leader", default_value = "geth")] #[clap(skip)]
pub leader: TestingPlatform, #[serde(skip)]
wallet: OnceLock<Arc<EthereumWallet>>,
}
/// The differential testing follower node implementation. impl WalletConfiguration {
#[arg(short, long = "follower", default_value = "kitchensink")] pub fn wallet(&self) -> Arc<EthereumWallet> {
pub follower: TestingPlatform, self.wallet
.get_or_init(|| {
let mut wallet = EthereumWallet::new(self.default_key.clone());
for signer in (1..=self.additional_keys)
.map(|id| U256::from(id))
.map(|id| id.to_be_bytes::<32>())
.map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap())
{
wallet.register_signer(signer);
}
Arc::new(wallet)
})
.clone()
}
}
fn serialize_private_key<S>(value: &PrivateKeySigner, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
value.to_bytes().encode_hex().serialize(serializer)
}
/// A set of configuration for concurrency.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ConcurrencyConfiguration {
/// Determines the amount of nodes that will be spawned for each chain. /// Determines the amount of nodes that will be spawned for each chain.
#[arg(long, default_value = "1")] #[clap(long = "concurrency.number-of-nodes", default_value_t = 5)]
pub number_of_nodes: usize, pub number_of_nodes: usize,
/// Determines the amount of tokio worker threads that will will be used. /// Determines the amount of tokio worker threads that will will be used.
#[arg( #[arg(
long, long = "concurrency.number-of-threads",
default_value_t = std::thread::available_parallelism() default_value_t = std::thread::available_parallelism()
.map(|n| n.get()) .map(|n| n.get())
.unwrap_or(1) .unwrap_or(1)
)] )]
pub number_of_threads: usize, pub number_of_threads: usize,
/// Determines the amount of concurrent tasks that will be spawned to run tests. Defaults to 10 x the number of nodes. /// Determines the amount of concurrent tasks that will be spawned to run tests.
#[arg(long)]
pub number_concurrent_tasks: Option<usize>,
/// Extract problems back to the test corpus.
#[arg(short, long = "extract-problems")]
pub extract_problems: bool,
/// The path to the `kitchensink` executable.
/// ///
/// By default it uses `substrate-node` binary found in `$PATH`. /// Defaults to 10 x the number of nodes.
#[arg(short, long = "kitchensink", default_value = "substrate-node")] #[arg(long = "concurrency.number-of-concurrent-tasks")]
pub kitchensink: PathBuf, number_concurrent_tasks: Option<usize>,
/// The path to the `revive-dev-node` executable. /// Determines if the concurrency limit should be ignored or not.
/// #[arg(long = "concurrency.ignore-concurrency-limit")]
/// By default it uses `revive-dev-node` binary found in `$PATH`. ignore_concurrency_limit: bool,
#[arg(long = "revive-dev-node", default_value = "revive-dev-node")] }
pub revive_dev_node: PathBuf,
/// By default the tool uses the revive-dev-node when it's running differential tests against impl ConcurrencyConfiguration {
/// PolkaVM since the dev-node is much faster than kitchensink. This flag allows the caller to pub fn concurrency_limit(&self) -> Option<usize> {
/// configure the tool to use kitchensink rather than the dev-node. match self.ignore_concurrency_limit {
#[arg(long)] true => None,
pub use_kitchensink_not_dev_node: bool, false => Some(
self.number_concurrent_tasks
/// The path to the `eth_proxy` executable. .unwrap_or(20 * self.number_of_nodes),
/// ),
/// By default it uses `eth-rpc` binary found in `$PATH`. }
#[arg(short = 'p', long = "eth_proxy", default_value = "eth-rpc")] }
pub eth_proxy: PathBuf, }
#[derive(Clone, Debug, Parser, Serialize)]
pub struct CompilationConfiguration {
/// Controls if the compilation cache should be invalidated or not. /// Controls if the compilation cache should be invalidated or not.
#[arg(short, long)] #[arg(long = "compilation.invalidate-cache")]
pub invalidate_compilation_cache: bool, pub invalidate_compilation_cache: bool,
}
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ReportConfiguration {
/// Controls if the compiler input is included in the final report. /// Controls if the compiler input is included in the final report.
#[clap(long = "report.include-compiler-input")] #[clap(long = "report.include-compiler-input")]
pub report_include_compiler_input: bool, pub include_compiler_input: bool,
/// Controls if the compiler output is included in the final report. /// Controls if the compiler output is included in the final report.
#[clap(long = "report.include-compiler-output")] #[clap(long = "report.include-compiler-output")]
pub report_include_compiler_output: bool, pub include_compiler_output: bool,
} }
impl Arguments { /// Represents the working directory that the program uses.
/// Return the configured working directory with the following precedence: #[derive(Debug, Clone)]
/// 1. `self.working_directory` if it was provided. pub enum WorkingDirectoryConfiguration {
/// 2. `self.temp_dir` if it it was provided /// A temporary directory is used as the working directory. This will be removed when dropped.
/// 3. Panic. TemporaryDirectory(Arc<TempDir>),
pub fn directory(&self) -> &Path { /// A directory with a path is used as the working directory.
if let Some(path) = &self.working_directory { Path(PathBuf),
return path.as_path(); }
}
if let Some(temp_dir) = &self.temp_dir { impl WorkingDirectoryConfiguration {
return temp_dir.path(); pub fn as_path(&self) -> &Path {
} self.as_ref()
panic!("should have a workdir configured")
}
/// Return the number of concurrent tasks to run. This is provided via the
/// `--number-concurrent-tasks` argument, and otherwise defaults to --number-of-nodes * 20.
pub fn number_of_concurrent_tasks(&self) -> usize {
self.number_concurrent_tasks
.unwrap_or(20 * self.number_of_nodes)
}
/// Try to parse `self.account` into a [PrivateKeySigner],
/// panicing on error.
pub fn wallet(&self) -> EthereumWallet {
let signer = self
.account
.parse::<PrivateKeySigner>()
.unwrap_or_else(|error| {
panic!("private key '{}' parsing error: {error}", self.account);
});
EthereumWallet::new(signer)
} }
} }
impl Default for Arguments { impl Deref for WorkingDirectoryConfiguration {
type Target = Path;
fn deref(&self) -> &Self::Target {
self.as_path()
}
}
impl AsRef<Path> for WorkingDirectoryConfiguration {
fn as_ref(&self) -> &Path {
match self {
WorkingDirectoryConfiguration::TemporaryDirectory(temp_dir) => temp_dir.path(),
WorkingDirectoryConfiguration::Path(path) => path.as_path(),
}
}
}
impl Default for WorkingDirectoryConfiguration {
fn default() -> Self { fn default() -> Self {
static TEMP_DIR: LazyLock<TempDir> = LazyLock::new(|| TempDir::new().unwrap()); TempDir::new()
.map(Arc::new)
.map(Self::TemporaryDirectory)
.expect("Failed to create the temporary directory")
}
}
let default = Arguments::parse_from(["retester"]); impl FromStr for WorkingDirectoryConfiguration {
type Err = anyhow::Error;
Arguments { fn from_str(s: &str) -> Result<Self, Self::Err> {
temp_dir: Some(&TEMP_DIR), match s {
..default "" => Ok(Default::default()),
_ => Ok(Self::Path(PathBuf::from(s))),
} }
} }
} }
impl Display for WorkingDirectoryConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self.as_path().display(), f)
}
}
impl Serialize for WorkingDirectoryConfiguration {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.as_path().serialize(serializer)
}
}
fn parse_duration(s: &str) -> anyhow::Result<Duration> {
u64::from_str(s)
.map(Duration::from_millis)
.map_err(Into::into)
}
/// The Solidity compatible node implementation. /// The Solidity compatible node implementation.
/// ///
/// This describes the solutions to be tested against on a high level. /// This describes the solutions to be tested against on a high level.
#[derive( #[derive(
Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, ValueEnum, Serialize, Deserialize, Clone,
Copy,
Debug,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Serialize,
ValueEnum,
EnumString,
Display,
AsRefStr,
IntoStaticStr,
)] )]
#[clap(rename_all = "lower")] #[strum(serialize_all = "kebab-case")]
pub enum TestingPlatform { pub enum TestingPlatform {
/// The go-ethereum reference full node EVM implementation. /// The go-ethereum reference full node EVM implementation.
Geth, Geth,
/// The kitchensink runtime provides the PolkaVM (PVM) based node implentation. /// The kitchensink runtime provides the PolkaVM (PVM) based node implementation.
Kitchensink, Kitchensink,
} }
impl Display for TestingPlatform {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Geth => f.write_str("geth"),
Self::Kitchensink => f.write_str("revive"),
}
}
}
+1 -1
View File
@@ -32,10 +32,10 @@ tokio = { workspace = true }
tracing = { workspace = true } tracing = { workspace = true }
tracing-appender = { workspace = true } tracing-appender = { workspace = true }
tracing-subscriber = { workspace = true } tracing-subscriber = { workspace = true }
schemars = { workspace = true }
semver = { workspace = true } semver = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
serde_json = { workspace = true } serde_json = { workspace = true }
temp-dir = { workspace = true }
[lints] [lints]
workspace = true workspace = true
+1 -1
View File
@@ -18,7 +18,7 @@ use alloy::{
primitives::Address, primitives::Address,
rpc::types::{TransactionRequest, trace::geth::DiffMode}, rpc::types::{TransactionRequest, trace::geth::DiffMode},
}; };
use anyhow::Context; use anyhow::Context as _;
use futures::TryStreamExt; use futures::TryStreamExt;
use indexmap::IndexMap; use indexmap::IndexMap;
use revive_dt_format::traits::{ResolutionContext, ResolverApi}; use revive_dt_format::traits::{ResolutionContext, ResolverApi};
+69 -98
View File
@@ -5,7 +5,7 @@ use std::{
collections::{BTreeMap, HashMap}, collections::{BTreeMap, HashMap},
io::{BufWriter, Write, stderr}, io::{BufWriter, Write, stderr},
path::Path, path::Path,
sync::{Arc, LazyLock}, sync::Arc,
time::Instant, time::Instant,
}; };
@@ -13,7 +13,7 @@ use alloy::{
network::{Ethereum, TransactionBuilder}, network::{Ethereum, TransactionBuilder},
rpc::types::TransactionRequest, rpc::types::TransactionRequest,
}; };
use anyhow::Context; use anyhow::Context as _;
use clap::Parser; use clap::Parser;
use futures::stream; use futures::stream;
use futures::{Stream, StreamExt}; use futures::{Stream, StreamExt};
@@ -23,16 +23,15 @@ use revive_dt_report::{
NodeDesignation, ReportAggregator, Reporter, ReporterEvent, TestCaseStatus, NodeDesignation, ReportAggregator, Reporter, ReporterEvent, TestCaseStatus,
TestSpecificReporter, TestSpecifier, TestSpecificReporter, TestSpecifier,
}; };
use schemars::schema_for;
use serde_json::{Value, json}; use serde_json::{Value, json};
use temp_dir::TempDir;
use tokio::try_join; use tokio::try_join;
use tracing::{debug, error, info, info_span, instrument}; use tracing::{debug, error, info, info_span, instrument};
use tracing_appender::non_blocking::WorkerGuard;
use tracing_subscriber::{EnvFilter, FmtSubscriber}; use tracing_subscriber::{EnvFilter, FmtSubscriber};
use revive_dt_common::{iterators::EitherIter, types::Mode}; use revive_dt_common::{iterators::EitherIter, types::Mode};
use revive_dt_compiler::{CompilerOutput, SolidityCompiler}; use revive_dt_compiler::{CompilerOutput, SolidityCompiler};
use revive_dt_config::*; use revive_dt_config::{Context, *};
use revive_dt_core::{ use revive_dt_core::{
Geth, Kitchensink, Platform, Geth, Kitchensink, Platform,
driver::{CaseDriver, CaseState}, driver::{CaseDriver, CaseState},
@@ -41,65 +40,15 @@ use revive_dt_format::{
case::{Case, CaseIdx}, case::{Case, CaseIdx},
corpus::Corpus, corpus::Corpus,
input::{Input, Step}, input::{Input, Step},
metadata::{ContractPathAndIdent, MetadataFile}, metadata::{ContractPathAndIdent, Metadata, MetadataFile},
mode::ParsedMode, mode::ParsedMode,
}; };
use revive_dt_node::{Node, pool::NodePool}; use revive_dt_node::{Node, pool::NodePool};
use crate::cached_compiler::CachedCompiler; use crate::cached_compiler::CachedCompiler;
static TEMP_DIR: LazyLock<TempDir> = LazyLock::new(|| TempDir::new().unwrap());
fn main() -> anyhow::Result<()> { fn main() -> anyhow::Result<()> {
let (args, _guard) = init_cli().context("Failed to initialize CLI and tracing subscriber")?; let (writer, _guard) = tracing_appender::non_blocking::NonBlockingBuilder::default()
info!(
leader = args.leader.to_string(),
follower = args.follower.to_string(),
working_directory = %args.directory().display(),
number_of_nodes = args.number_of_nodes,
invalidate_compilation_cache = args.invalidate_compilation_cache,
"Differential testing tool has been initialized"
);
let (reporter, report_aggregator_task) = ReportAggregator::new(args.clone()).into_task();
let number_of_threads = args.number_of_threads;
let body = async move {
let tests = collect_corpora(&args)
.context("Failed to collect corpus files from provided arguments")?
.into_iter()
.inspect(|(corpus, _)| {
reporter
.report_corpus_file_discovery_event(corpus.clone())
.expect("Can't fail")
})
.flat_map(|(_, files)| files.into_iter())
.inspect(|metadata_file| {
reporter
.report_metadata_file_discovery_event(
metadata_file.metadata_file_path.clone(),
metadata_file.content.clone(),
)
.expect("Can't fail")
})
.collect::<Vec<_>>();
execute_corpus(&args, &tests, reporter, report_aggregator_task)
.await
.context("Failed to execute corpus")?;
Ok(())
};
tokio::runtime::Builder::new_multi_thread()
.worker_threads(number_of_threads)
.enable_all()
.build()
.expect("Failed building the Runtime")
.block_on(body)
}
fn init_cli() -> anyhow::Result<(Arguments, WorkerGuard)> {
let (writer, guard) = tracing_appender::non_blocking::NonBlockingBuilder::default()
.lossy(false) .lossy(false)
// Assuming that each line contains 255 characters and that each character is one byte, then // Assuming that each line contains 255 characters and that each character is one byte, then
// this means that our buffer is about 4GBs large. // this means that our buffer is about 4GBs large.
@@ -118,31 +67,56 @@ fn init_cli() -> anyhow::Result<(Arguments, WorkerGuard)> {
tracing::subscriber::set_global_default(subscriber)?; tracing::subscriber::set_global_default(subscriber)?;
info!("Differential testing tool is starting"); info!("Differential testing tool is starting");
let mut args = Arguments::parse(); let context = Context::try_parse()?;
let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task();
if args.corpus.is_empty() { match context {
anyhow::bail!("no test corpus specified"); Context::ExecuteTests(context) => {
} let tests = collect_corpora(&context)
.context("Failed to collect corpus files from provided arguments")?
.into_iter()
.inspect(|(corpus, _)| {
reporter
.report_corpus_file_discovery_event(corpus.clone())
.expect("Can't fail")
})
.flat_map(|(_, files)| files.into_iter())
.inspect(|metadata_file| {
reporter
.report_metadata_file_discovery_event(
metadata_file.metadata_file_path.clone(),
metadata_file.content.clone(),
)
.expect("Can't fail")
})
.collect::<Vec<_>>();
match args.working_directory.as_ref() { tokio::runtime::Builder::new_multi_thread()
Some(dir) => { .worker_threads(context.concurrency_configuration.number_of_threads)
if !dir.exists() { .enable_all()
anyhow::bail!("workdir {} does not exist", dir.display()); .build()
} .expect("Failed building the Runtime")
.block_on(async move {
execute_corpus(*context, &tests, reporter, report_aggregator_task)
.await
.context("Failed to execute corpus")
})
} }
None => { Context::ExportJsonSchema => {
args.temp_dir = Some(&TEMP_DIR); let schema = schema_for!(Metadata);
println!("{}", serde_json::to_string_pretty(&schema).unwrap());
Ok(())
} }
} }
Ok((args, guard))
} }
#[instrument(level = "debug", name = "Collecting Corpora", skip_all)] #[instrument(level = "debug", name = "Collecting Corpora", skip_all)]
fn collect_corpora(args: &Arguments) -> anyhow::Result<HashMap<Corpus, Vec<MetadataFile>>> { fn collect_corpora(
context: &ExecutionContext,
) -> anyhow::Result<HashMap<Corpus, Vec<MetadataFile>>> {
let mut corpora = HashMap::new(); let mut corpora = HashMap::new();
for path in &args.corpus { for path in &context.corpus {
let span = info_span!("Processing corpus file", path = %path.display()); let span = info_span!("Processing corpus file", path = %path.display());
let _guard = span.enter(); let _guard = span.enter();
@@ -160,7 +134,7 @@ fn collect_corpora(args: &Arguments) -> anyhow::Result<HashMap<Corpus, Vec<Metad
} }
async fn run_driver<L, F>( async fn run_driver<L, F>(
args: &Arguments, context: ExecutionContext,
metadata_files: &[MetadataFile], metadata_files: &[MetadataFile],
reporter: Reporter, reporter: Reporter,
report_aggregator_task: impl Future<Output = anyhow::Result<()>>, report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
@@ -171,20 +145,20 @@ where
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
{ {
let leader_nodes = let leader_nodes = NodePool::<L::Blockchain>::new(context.clone())
NodePool::<L::Blockchain>::new(args).context("Failed to initialize leader node pool")?; .context("Failed to initialize leader node pool")?;
let follower_nodes = let follower_nodes = NodePool::<F::Blockchain>::new(context.clone())
NodePool::<F::Blockchain>::new(args).context("Failed to initialize follower node pool")?; .context("Failed to initialize follower node pool")?;
let tests_stream = tests_stream( let tests_stream = tests_stream(
args, &context,
metadata_files.iter(), metadata_files.iter(),
&leader_nodes, &leader_nodes,
&follower_nodes, &follower_nodes,
reporter.clone(), reporter.clone(),
) )
.await; .await;
let driver_task = start_driver_task::<L, F>(args, tests_stream) let driver_task = start_driver_task::<L, F>(&context, tests_stream)
.await .await
.context("Failed to start driver task")?; .context("Failed to start driver task")?;
let cli_reporting_task = start_cli_reporting_task(reporter); let cli_reporting_task = start_cli_reporting_task(reporter);
@@ -196,7 +170,7 @@ where
} }
async fn tests_stream<'a, L, F>( async fn tests_stream<'a, L, F>(
args: &Arguments, args: &ExecutionContext,
metadata_files: impl IntoIterator<Item = &'a MetadataFile> + Clone, metadata_files: impl IntoIterator<Item = &'a MetadataFile> + Clone,
leader_node_pool: &'a NodePool<L::Blockchain>, leader_node_pool: &'a NodePool<L::Blockchain>,
follower_node_pool: &'a NodePool<F::Blockchain>, follower_node_pool: &'a NodePool<F::Blockchain>,
@@ -320,7 +294,7 @@ where
} }
async fn start_driver_task<'a, L, F>( async fn start_driver_task<'a, L, F>(
args: &Arguments, context: &ExecutionContext,
tests: impl Stream<Item = Test<'a, L, F>>, tests: impl Stream<Item = Test<'a, L, F>>,
) -> anyhow::Result<impl Future<Output = ()>> ) -> anyhow::Result<impl Future<Output = ()>>
where where
@@ -333,25 +307,22 @@ where
{ {
info!("Starting driver task"); info!("Starting driver task");
let number_concurrent_tasks = args.number_of_concurrent_tasks();
let cached_compiler = Arc::new( let cached_compiler = Arc::new(
CachedCompiler::new( CachedCompiler::new(
args.directory().join("compilation_cache"), context
args.invalidate_compilation_cache, .working_directory
.as_path()
.join("compilation_cache"),
context
.compilation_configuration
.invalidate_compilation_cache,
) )
.await .await
.context("Failed to initialize cached compiler")?, .context("Failed to initialize cached compiler")?,
); );
Ok(tests.for_each_concurrent( Ok(tests.for_each_concurrent(
// We want to limit the concurrent tasks here because: context.concurrency_configuration.concurrency_limit(),
//
// 1. We don't want to overwhelm the nodes with too many requests, leading to responses timing out.
// 2. We don't want to open too many files at once, leading to the OS running out of file descriptors.
//
// By default, we allow maximum of 10 ongoing requests per node in order to limit (1), and assume that
// this number will automatically be low enough to address (2). The user can override this.
Some(number_concurrent_tasks),
move |test| { move |test| {
let cached_compiler = cached_compiler.clone(); let cached_compiler = cached_compiler.clone();
@@ -387,8 +358,7 @@ where
)) ))
} }
#[allow(clippy::uninlined_format_args)] #[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
#[allow(irrefutable_let_patterns)]
async fn start_cli_reporting_task(reporter: Reporter) { async fn start_cli_reporting_task(reporter: Reporter) {
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail"); let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
drop(reporter); drop(reporter);
@@ -710,17 +680,18 @@ where
} }
async fn execute_corpus( async fn execute_corpus(
args: &Arguments, context: ExecutionContext,
tests: &[MetadataFile], tests: &[MetadataFile],
reporter: Reporter, reporter: Reporter,
report_aggregator_task: impl Future<Output = anyhow::Result<()>>, report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
match (&args.leader, &args.follower) { match (&context.leader, &context.follower) {
(TestingPlatform::Geth, TestingPlatform::Kitchensink) => { (TestingPlatform::Geth, TestingPlatform::Kitchensink) => {
run_driver::<Geth, Kitchensink>(args, tests, reporter, report_aggregator_task).await? run_driver::<Geth, Kitchensink>(context, tests, reporter, report_aggregator_task)
.await?
} }
(TestingPlatform::Geth, TestingPlatform::Geth) => { (TestingPlatform::Geth, TestingPlatform::Geth) => {
run_driver::<Geth, Geth>(args, tests, reporter, report_aggregator_task).await? run_driver::<Geth, Geth>(context, tests, reporter, report_aggregator_task).await?
} }
_ => unimplemented!(), _ => unimplemented!(),
} }
+1
View File
@@ -20,6 +20,7 @@ anyhow = { workspace = true }
futures = { workspace = true } futures = { workspace = true }
regex = { workspace = true } regex = { workspace = true }
tracing = { workspace = true } tracing = { workspace = true }
schemars = { workspace = true }
semver = { workspace = true } semver = { workspace = true }
serde = { workspace = true, features = ["derive"] } serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true } serde_json = { workspace = true }
+24 -1
View File
@@ -1,3 +1,4 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use revive_dt_common::{macros::define_wrapper_type, types::Mode}; use revive_dt_common::{macros::define_wrapper_type, types::Mode};
@@ -7,26 +8,48 @@ use crate::{
mode::ParsedMode, mode::ParsedMode,
}; };
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq)] #[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)]
pub struct Case { pub struct Case {
/// An optional name of the test case.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>, pub name: Option<String>,
/// An optional comment on the case which has no impact on the execution in any way.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
/// This represents a mode that has been parsed from test metadata.
///
/// Mode strings can take the following form (in pseudo-regex):
///
/// ```text
/// [YEILV][+-]? (M[0123sz])? <semver>?
/// ```
///
/// If this is provided then it takes higher priority than the modes specified in the metadata
/// file.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub modes: Option<Vec<ParsedMode>>, pub modes: Option<Vec<ParsedMode>>,
/// The set of steps to run as part of this test case.
#[serde(rename = "inputs")] #[serde(rename = "inputs")]
pub steps: Vec<Step>, pub steps: Vec<Step>,
/// An optional name of the group of tests that this test belongs to.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub group: Option<String>, pub group: Option<String>,
/// An optional set of expectations and assertions to make about the transaction after it ran.
///
/// If this is not specified then the only assertion that will be ran is that the transaction
/// was successful.
///
/// This expectation that's on the case itself will be attached to the final step of the case.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub expected: Option<Expected>, pub expected: Option<Expected>,
/// An optional boolean which defines if the case as a whole should be ignored. If null then the
/// case will not be ignored.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub ignore: Option<bool>, pub ignore: Option<bool>,
} }
+61 -16
View File
@@ -8,8 +8,9 @@ use alloy::{
rpc::types::TransactionRequest, rpc::types::TransactionRequest,
}; };
use alloy_primitives::{FixedBytes, utils::parse_units}; use alloy_primitives::{FixedBytes, utils::parse_units};
use anyhow::Context; use anyhow::Context as _;
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, stream}; use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, stream};
use schemars::JsonSchema;
use semver::VersionReq; use semver::VersionReq;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -23,7 +24,7 @@ use crate::{metadata::ContractInstance, traits::ResolutionContext};
/// ///
/// A test step can be anything. It could be an invocation to a function, an assertion, or any other /// A test step can be anything. It could be an invocation to a function, an assertion, or any other
/// action that needs to be run or executed on the nodes used in the tests. /// action that needs to be run or executed on the nodes used in the tests.
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] #[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
#[serde(untagged)] #[serde(untagged)]
pub enum Step { pub enum Step {
/// A function call or an invocation to some function on some smart contract. /// A function call or an invocation to some function on some smart contract.
@@ -39,36 +40,51 @@ define_wrapper_type!(
pub struct StepIdx(usize) impl Display; pub struct StepIdx(usize) impl Display;
); );
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq)] /// This is an input step which is a transaction description that the framework translates into a
/// transaction and executes on the nodes.
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
pub struct Input { pub struct Input {
/// The address of the account performing the call and paying the fees for it.
#[serde(default = "Input::default_caller")] #[serde(default = "Input::default_caller")]
#[schemars(with = "String")]
pub caller: Address, pub caller: Address,
/// An optional comment on the step which has no impact on the execution in any way.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
/// The contract instance that's being called in this transaction step.
#[serde(default = "Input::default_instance")] #[serde(default = "Input::default_instance")]
pub instance: ContractInstance, pub instance: ContractInstance,
/// The method that's being called in this step.
pub method: Method, pub method: Method,
/// The calldata that the function should be invoked with.
#[serde(default)] #[serde(default)]
pub calldata: Calldata, pub calldata: Calldata,
/// A set of assertions and expectations to have for the transaction.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub expected: Option<Expected>, pub expected: Option<Expected>,
/// An optional value to provide as part of the transaction.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub value: Option<EtherValue>, pub value: Option<EtherValue>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
#[schemars(skip)]
pub storage: Option<HashMap<String, Calldata>>, pub storage: Option<HashMap<String, Calldata>>,
/// Variable assignment to perform in the framework allowing us to reference them again later on
/// during the execution.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub variable_assignments: Option<VariableAssignments>, pub variable_assignments: Option<VariableAssignments>,
} }
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq)] /// This represents a balance assertion step where the framework needs to query the balance of some
/// account or contract and assert that it's some amount.
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
pub struct BalanceAssertion { pub struct BalanceAssertion {
/// An optional comment on the balance assertion. /// An optional comment on the balance assertion.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
@@ -82,11 +98,13 @@ pub struct BalanceAssertion {
/// followed in the calldata. /// followed in the calldata.
pub address: String, pub address: String,
/// The amount of balance to assert that the account or contract has. /// The amount of balance to assert that the account or contract has. This is a 256 bit string
/// that's serialized and deserialized into a decimal string.
#[schemars(with = "String")]
pub expected_balance: U256, pub expected_balance: U256,
} }
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq)] #[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
pub struct StorageEmptyAssertion { pub struct StorageEmptyAssertion {
/// An optional comment on the storage empty assertion. /// An optional comment on the storage empty assertion.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
@@ -104,31 +122,52 @@ pub struct StorageEmptyAssertion {
pub is_storage_empty: bool, pub is_storage_empty: bool,
} }
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] /// A set of expectations and assertions to make about the transaction after it ran.
///
/// If this is not specified then the only assertion that will be ran is that the transaction
/// was successful.
#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, Eq, PartialEq)]
#[serde(untagged)] #[serde(untagged)]
pub enum Expected { pub enum Expected {
/// An assertion that the transaction succeeded and returned the provided set of data.
Calldata(Calldata), Calldata(Calldata),
/// A more complex assertion.
Expected(ExpectedOutput), Expected(ExpectedOutput),
/// A set of assertions.
ExpectedMany(Vec<ExpectedOutput>), ExpectedMany(Vec<ExpectedOutput>),
} }
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq)] /// A set of assertions to run on the transaction.
#[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema, Eq, PartialEq)]
pub struct ExpectedOutput { pub struct ExpectedOutput {
/// An optional compiler version that's required in order for this assertion to run.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
#[schemars(with = "Option<String>")]
pub compiler_version: Option<VersionReq>, pub compiler_version: Option<VersionReq>,
/// An optional field of the expected returns from the invocation.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub return_data: Option<Calldata>, pub return_data: Option<Calldata>,
/// An optional set of assertions to run on the emitted events from the transaction.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub events: Option<Vec<Event>>, pub events: Option<Vec<Event>>,
/// A boolean which defines whether we expect the transaction to succeed or fail.
#[serde(default)] #[serde(default)]
pub exception: bool, pub exception: bool,
} }
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq)] #[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema, Eq, PartialEq)]
pub struct Event { pub struct Event {
/// An optional field of the address of the emitter of the event.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub address: Option<String>, pub address: Option<String>,
/// The set of topics to expect the event to have.
pub topics: Vec<String>, pub topics: Vec<String>,
/// The set of values to expect the event to have.
pub values: Calldata, pub values: Calldata,
} }
@@ -183,16 +222,17 @@ pub struct Event {
/// [`Single`]: Calldata::Single /// [`Single`]: Calldata::Single
/// [`Compound`]: Calldata::Compound /// [`Compound`]: Calldata::Compound
/// [reverse polish notation]: https://en.wikipedia.org/wiki/Reverse_Polish_notation /// [reverse polish notation]: https://en.wikipedia.org/wiki/Reverse_Polish_notation
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, Eq, PartialEq)]
#[serde(untagged)] #[serde(untagged)]
pub enum Calldata { pub enum Calldata {
Single(Bytes), Single(#[schemars(with = "String")] Bytes),
Compound(Vec<CalldataItem>), Compound(Vec<CalldataItem>),
} }
define_wrapper_type! { define_wrapper_type! {
/// This represents an item in the [`Calldata::Compound`] variant. /// This represents an item in the [`Calldata::Compound`] variant. Each item will be resolved
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] /// according to the resolution rules of the tool.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema)]
#[serde(transparent)] #[serde(transparent)]
pub struct CalldataItem(String) impl Display; pub struct CalldataItem(String) impl Display;
} }
@@ -217,7 +257,7 @@ enum Operation {
} }
/// Specify how the contract is called. /// Specify how the contract is called.
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq)] #[derive(Debug, Default, Serialize, Deserialize, JsonSchema, Clone, Eq, PartialEq)]
pub enum Method { pub enum Method {
/// Initiate a deploy transaction, calling contracts constructor. /// Initiate a deploy transaction, calling contracts constructor.
/// ///
@@ -238,11 +278,16 @@ pub enum Method {
} }
define_wrapper_type!( define_wrapper_type!(
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] /// Defines an Ether value.
///
/// This is an unsigned 256 bit integer that's followed by some denomination which can either be
/// eth, ether, gwei, or wei.
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, JsonSchema)]
#[schemars(with = "String")]
pub struct EtherValue(U256) impl Display; pub struct EtherValue(U256) impl Display;
); );
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq)] #[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
pub struct VariableAssignments { pub struct VariableAssignments {
/// A vector of the variable names to assign to the return data. /// A vector of the variable names to assign to the return data.
/// ///
+80 -13
View File
@@ -8,6 +8,7 @@ use std::{
str::FromStr, str::FromStr,
}; };
use schemars::JsonSchema;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use revive_common::EVMVersion; use revive_common::EVMVersion;
@@ -56,30 +57,62 @@ impl Deref for MetadataFile {
} }
} }
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq)] /// A MatterLabs metadata file.
///
/// This defines the structure that the MatterLabs metadata files follow for defining the tests or
/// the workloads.
///
/// Each metadata file is composed of multiple test cases where each test case is isolated from the
/// others and runs in a completely different address space. Each test case is composed of a number
/// of steps and assertions that should be performed as part of the test case.
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema, Clone, Eq, PartialEq)]
pub struct Metadata { pub struct Metadata {
/// A comment on the test case that's added for human-readability. /// This is an optional comment on the metadata file which has no impact on the execution in any
/// way.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
/// An optional boolean which defines if the metadata file as a whole should be ignored. If null
/// then the metadata file will not be ignored.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub ignore: Option<bool>, pub ignore: Option<bool>,
/// An optional vector of targets that this Metadata file's cases can be executed on. As an
/// example, if we wish for the metadata file's cases to only be run on PolkaVM then we'd
/// specify a target of "PolkaVM" in here.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub targets: Option<Vec<String>>, pub targets: Option<Vec<String>>,
/// A vector of the test cases and workloads contained within the metadata file. This is their
/// primary description.
pub cases: Vec<Case>, pub cases: Vec<Case>,
/// A map of all of the contracts that the test requires to run.
///
/// This is a map where the key is the name of the contract instance and the value is the
/// contract's path and ident in the file.
///
/// If any contract is to be used by the test then it must be included in here first so that the
/// framework is aware of its path, compiles it, and prepares it.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub contracts: Option<BTreeMap<ContractInstance, ContractPathAndIdent>>, pub contracts: Option<BTreeMap<ContractInstance, ContractPathAndIdent>>,
/// The set of libraries that this metadata file requires.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub libraries: Option<BTreeMap<PathBuf, BTreeMap<ContractIdent, ContractInstance>>>, pub libraries: Option<BTreeMap<PathBuf, BTreeMap<ContractIdent, ContractInstance>>>,
/// This represents a mode that has been parsed from test metadata.
///
/// Mode strings can take the following form (in pseudo-regex):
///
/// ```text
/// [YEILV][+-]? (M[0123sz])? <semver>?
/// ```
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub modes: Option<Vec<ParsedMode>>, pub modes: Option<Vec<ParsedMode>>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
#[schemars(skip)]
pub file_path: Option<PathBuf>, pub file_path: Option<PathBuf>,
/// This field specifies an EVM version requirement that the test case has where the test might /// This field specifies an EVM version requirement that the test case has where the test might
@@ -87,9 +120,9 @@ pub struct Metadata {
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub required_evm_version: Option<EvmVersionRequirement>, pub required_evm_version: Option<EvmVersionRequirement>,
/// A set of compilation directives that will be passed to the compiler whenever the contracts for /// A set of compilation directives that will be passed to the compiler whenever the contracts
/// the test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`] is /// for the test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`]
/// just a filter for when a test can run whereas this is an instruction to the compiler. /// is just a filter for when a test can run whereas this is an instruction to the compiler.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub compiler_directives: Option<CompilationDirectives>, pub compiler_directives: Option<CompilationDirectives>,
} }
@@ -262,7 +295,7 @@ define_wrapper_type!(
/// ///
/// Typically, this is used as the key to the "contracts" field of metadata files. /// Typically, this is used as the key to the "contracts" field of metadata files.
#[derive( #[derive(
Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema
)] )]
#[serde(transparent)] #[serde(transparent)]
pub struct ContractInstance(String) impl Display; pub struct ContractInstance(String) impl Display;
@@ -273,7 +306,7 @@ define_wrapper_type!(
/// ///
/// A contract identifier is the name of the contract in the source code. /// A contract identifier is the name of the contract in the source code.
#[derive( #[derive(
Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema
)] )]
#[serde(transparent)] #[serde(transparent)]
pub struct ContractIdent(String) impl Display; pub struct ContractIdent(String) impl Display;
@@ -286,7 +319,9 @@ define_wrapper_type!(
/// ```text /// ```text
/// ${path}:${contract_ident} /// ${path}:${contract_ident}
/// ``` /// ```
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[derive(
Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema,
)]
#[serde(try_from = "String", into = "String")] #[serde(try_from = "String", into = "String")]
pub struct ContractPathAndIdent { pub struct ContractPathAndIdent {
/// The path of the contract source code relative to the directory containing the metadata file. /// The path of the contract source code relative to the directory containing the metadata file.
@@ -363,9 +398,15 @@ impl From<ContractPathAndIdent> for String {
} }
} }
/// An EVM version requirement that the test case has. This gets serialized and /// An EVM version requirement that the test case has. This gets serialized and deserialized from
/// deserialized from and into [`String`]. /// and into [`String`]. This follows a simple format of (>=|<=|=|>|<) followed by a string of the
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] /// EVM version.
///
/// When specified, the framework will only run the test if the node's EVM version matches that
/// required by the metadata file.
#[derive(
Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema,
)]
#[serde(try_from = "String", into = "String")] #[serde(try_from = "String", into = "String")]
pub struct EvmVersionRequirement { pub struct EvmVersionRequirement {
ordering: Ordering, ordering: Ordering,
@@ -493,7 +534,18 @@ impl From<EvmVersionRequirement> for String {
/// just a filter for when a test can run whereas this is an instruction to the compiler. /// just a filter for when a test can run whereas this is an instruction to the compiler.
/// Defines how the compiler should handle revert strings. /// Defines how the compiler should handle revert strings.
#[derive( #[derive(
Clone, Debug, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize, Clone,
Debug,
Copy,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Default,
Serialize,
Deserialize,
JsonSchema,
)] )]
pub struct CompilationDirectives { pub struct CompilationDirectives {
/// Defines how the revert strings should be handled. /// Defines how the revert strings should be handled.
@@ -502,14 +554,29 @@ pub struct CompilationDirectives {
/// Defines how the compiler should handle revert strings. /// Defines how the compiler should handle revert strings.
#[derive( #[derive(
Clone, Debug, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize, Clone,
Debug,
Copy,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Default,
Serialize,
Deserialize,
JsonSchema,
)] )]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub enum RevertString { pub enum RevertString {
/// The default handling of the revert strings.
#[default] #[default]
Default, Default,
/// The debug handling of the revert strings.
Debug, Debug,
/// Strip the revert strings.
Strip, Strip,
/// Provide verbose debug strings for the revert string.
VerboseDebug, VerboseDebug,
} }
+3 -2
View File
@@ -1,7 +1,8 @@
use anyhow::Context; use anyhow::Context as _;
use regex::Regex; use regex::Regex;
use revive_dt_common::iterators::EitherIter; use revive_dt_common::iterators::EitherIter;
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline}; use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::HashSet; use std::collections::HashSet;
use std::fmt::Display; use std::fmt::Display;
@@ -17,7 +18,7 @@ use std::sync::LazyLock;
/// ``` /// ```
/// ///
/// We can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`]. /// We can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].
#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize)] #[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)]
#[serde(try_from = "String", into = "String")] #[serde(try_from = "String", into = "String")]
pub struct ParsedMode { pub struct ParsedMode {
pub pipeline: Option<ModePipeline>, pub pipeline: Option<ModePipeline>,
+46 -65
View File
@@ -17,9 +17,7 @@ use alloy::{
eips::BlockNumberOrTag, eips::BlockNumberOrTag,
genesis::{Genesis, GenesisAccount}, genesis::{Genesis, GenesisAccount},
network::{Ethereum, EthereumWallet, NetworkWallet}, network::{Ethereum, EthereumWallet, NetworkWallet},
primitives::{ primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
Address, BlockHash, BlockNumber, BlockTimestamp, FixedBytes, StorageKey, TxHash, U256,
},
providers::{ providers::{
Provider, ProviderBuilder, Provider, ProviderBuilder,
ext::DebugApi, ext::DebugApi,
@@ -29,9 +27,8 @@ use alloy::{
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest, EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame}, trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
}, },
signers::local::PrivateKeySigner,
}; };
use anyhow::Context; use anyhow::Context as _;
use revive_common::EVMVersion; use revive_common::EVMVersion;
use tracing::{Instrument, instrument}; use tracing::{Instrument, instrument};
@@ -39,7 +36,7 @@ use revive_dt_common::{
fs::clear_directory, fs::clear_directory,
futures::{PollingWaitBehavior, poll}, futures::{PollingWaitBehavior, poll},
}; };
use revive_dt_config::Arguments; use revive_dt_config::*;
use revive_dt_format::traits::ResolverApi; use revive_dt_format::traits::ResolverApi;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
@@ -64,7 +61,7 @@ pub struct GethNode {
geth: PathBuf, geth: PathBuf,
id: u32, id: u32,
handle: Option<Child>, handle: Option<Child>,
start_timeout: u64, start_timeout: Duration,
wallet: Arc<EthereumWallet>, wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
chain_id_filler: ChainIdFiller, chain_id_filler: ChainIdFiller,
@@ -97,7 +94,7 @@ impl GethNode {
/// Create the node directory and call `geth init` to configure the genesis. /// Create the node directory and call `geth init` to configure the genesis.
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn init(&mut self, genesis: String) -> anyhow::Result<&mut Self> { fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
let _ = clear_directory(&self.base_directory); let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory); let _ = clear_directory(&self.logs_directory);
@@ -106,8 +103,6 @@ impl GethNode {
create_dir_all(&self.logs_directory) create_dir_all(&self.logs_directory)
.context("Failed to create logs directory for geth node")?; .context("Failed to create logs directory for geth node")?;
let mut genesis = serde_json::from_str::<Genesis>(&genesis)
.context("Failed to deserialize geth genesis JSON")?;
for signer_address in for signer_address in
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet) <EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
{ {
@@ -240,7 +235,7 @@ impl GethNode {
.open(self.geth_stderr_log_file_path()) .open(self.geth_stderr_log_file_path())
.context("Failed to open geth stderr logs file for readiness check")?; .context("Failed to open geth stderr logs file for readiness check")?;
let maximum_wait_time = Duration::from_millis(self.start_timeout); let maximum_wait_time = self.start_timeout;
let mut stderr = BufReader::new(logs_file).lines(); let mut stderr = BufReader::new(logs_file).lines();
let mut lines = vec![]; let mut lines = vec![];
loop { loop {
@@ -256,7 +251,7 @@ impl GethNode {
if Instant::now().duration_since(start_time) > maximum_wait_time { if Instant::now().duration_since(start_time) > maximum_wait_time {
anyhow::bail!( anyhow::bail!(
"Timeout in starting geth: took longer than {}ms. stdout:\n\n{}\n", "Timeout in starting geth: took longer than {}ms. stdout:\n\n{}\n",
self.start_timeout, self.start_timeout.as_millis(),
lines.join("\n") lines.join("\n")
); );
} }
@@ -556,30 +551,40 @@ impl ResolverApi for GethNode {
} }
impl Node for GethNode { impl Node for GethNode {
fn new(config: &Arguments) -> Self { fn new(
let geth_directory = config.directory().join(Self::BASE_DIRECTORY); context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<ConcurrencyConfiguration>
+ AsRef<GenesisConfiguration>
+ AsRef<WalletConfiguration>
+ AsRef<GethConfiguration>
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Clone,
) -> Self {
let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context);
let geth_configuration = AsRef::<GethConfiguration>::as_ref(&context);
let geth_directory = working_directory_configuration
.as_path()
.join(Self::BASE_DIRECTORY);
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
let base_directory = geth_directory.join(id.to_string()); let base_directory = geth_directory.join(id.to_string());
let mut wallet = config.wallet(); let wallet = wallet_configuration.wallet();
for signer in (1..=config.private_keys_to_add)
.map(|id| U256::from(id))
.map(|id| id.to_be_bytes::<32>())
.map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap())
{
wallet.register_signer(signer);
}
Self { Self {
connection_string: base_directory.join(Self::IPC_FILE).display().to_string(), connection_string: base_directory.join(Self::IPC_FILE).display().to_string(),
data_directory: base_directory.join(Self::DATA_DIRECTORY), data_directory: base_directory.join(Self::DATA_DIRECTORY),
logs_directory: base_directory.join(Self::LOGS_DIRECTORY), logs_directory: base_directory.join(Self::LOGS_DIRECTORY),
base_directory, base_directory,
geth: config.geth.clone(), geth: geth_configuration.path.clone(),
id, id,
handle: None, handle: None,
start_timeout: config.geth_start_timeout, start_timeout: geth_configuration.start_timeout_ms,
wallet: Arc::new(wallet), wallet: wallet.clone(),
chain_id_filler: Default::default(), chain_id_filler: Default::default(),
nonce_manager: Default::default(), nonce_manager: Default::default(),
// We know that we only need to be storing 2 files so we can specify that when creating // We know that we only need to be storing 2 files so we can specify that when creating
@@ -621,7 +626,7 @@ impl Node for GethNode {
} }
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn spawn(&mut self, genesis: String) -> anyhow::Result<()> { fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
self.init(genesis)?.spawn_process()?; self.init(genesis)?.spawn_process()?;
Ok(()) Ok(())
} }
@@ -662,49 +667,25 @@ impl Drop for GethNode {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use revive_dt_config::Arguments;
use temp_dir::TempDir;
use crate::{GENESIS_JSON, Node};
use super::*; use super::*;
fn test_config() -> (Arguments, TempDir) { fn test_config() -> ExecutionContext {
let mut config = Arguments::default(); ExecutionContext::default()
let temp_dir = TempDir::new().unwrap();
config.working_directory = temp_dir.path().to_path_buf().into();
(config, temp_dir)
} }
fn new_node() -> (GethNode, TempDir) { fn new_node() -> (ExecutionContext, GethNode) {
let (args, temp_dir) = test_config(); let context = test_config();
let mut node = GethNode::new(&args); let mut node = GethNode::new(&context);
node.init(GENESIS_JSON.to_owned()) node.init(context.genesis_configuration.genesis().unwrap().clone())
.expect("Failed to initialize the node") .expect("Failed to initialize the node")
.spawn_process() .spawn_process()
.expect("Failed to spawn the node process"); .expect("Failed to spawn the node process");
(node, temp_dir) (context, node)
}
#[test]
fn init_works() {
GethNode::new(&test_config().0)
.init(GENESIS_JSON.to_string())
.unwrap();
}
#[test]
fn spawn_works() {
GethNode::new(&test_config().0)
.spawn(GENESIS_JSON.to_string())
.unwrap();
} }
#[test] #[test]
fn version_works() { fn version_works() {
let version = GethNode::new(&test_config().0).version().unwrap(); let version = GethNode::new(&test_config()).version().unwrap();
assert!( assert!(
version.starts_with("geth version"), version.starts_with("geth version"),
"expected version string, got: '{version}'" "expected version string, got: '{version}'"
@@ -714,7 +695,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_chain_id_from_node() { async fn can_get_chain_id_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (_context, node) = new_node();
// Act // Act
let chain_id = node.chain_id().await; let chain_id = node.chain_id().await;
@@ -727,7 +708,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_gas_limit_from_node() { async fn can_get_gas_limit_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (_context, node) = new_node();
// Act // Act
let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest).await; let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest).await;
@@ -740,7 +721,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_coinbase_from_node() { async fn can_get_coinbase_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (_context, node) = new_node();
// Act // Act
let coinbase = node.block_coinbase(BlockNumberOrTag::Latest).await; let coinbase = node.block_coinbase(BlockNumberOrTag::Latest).await;
@@ -753,7 +734,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_block_difficulty_from_node() { async fn can_get_block_difficulty_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (_context, node) = new_node();
// Act // Act
let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest).await; let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest).await;
@@ -766,7 +747,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_block_hash_from_node() { async fn can_get_block_hash_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (_context, node) = new_node();
// Act // Act
let block_hash = node.block_hash(BlockNumberOrTag::Latest).await; let block_hash = node.block_hash(BlockNumberOrTag::Latest).await;
@@ -778,7 +759,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_block_timestamp_from_node() { async fn can_get_block_timestamp_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (_context, node) = new_node();
// Act // Act
let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest).await; let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest).await;
@@ -790,7 +771,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_block_number_from_node() { async fn can_get_block_number_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (_context, node) = new_node();
// Act // Act
let block_number = node.last_block_number().await; let block_number = node.last_block_number().await;
+59 -60
View File
@@ -19,8 +19,8 @@ use alloy::{
TransactionBuilderError, UnbuiltTransactionError, TransactionBuilderError, UnbuiltTransactionError,
}, },
primitives::{ primitives::{
Address, B64, B256, BlockHash, BlockNumber, BlockTimestamp, Bloom, Bytes, FixedBytes, Address, B64, B256, BlockHash, BlockNumber, BlockTimestamp, Bloom, Bytes, StorageKey,
StorageKey, TxHash, U256, TxHash, U256,
}, },
providers::{ providers::{
Provider, ProviderBuilder, Provider, ProviderBuilder,
@@ -32,9 +32,8 @@ use alloy::{
eth::{Block, Header, Transaction}, eth::{Block, Header, Transaction},
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame}, trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
}, },
signers::local::PrivateKeySigner,
}; };
use anyhow::Context; use anyhow::Context as _;
use revive_common::EVMVersion; use revive_common::EVMVersion;
use revive_dt_common::fs::clear_directory; use revive_dt_common::fs::clear_directory;
use revive_dt_format::traits::ResolverApi; use revive_dt_format::traits::ResolverApi;
@@ -43,7 +42,7 @@ use serde_json::{Value as JsonValue, json};
use sp_core::crypto::Ss58Codec; use sp_core::crypto::Ss58Codec;
use sp_runtime::AccountId32; use sp_runtime::AccountId32;
use revive_dt_config::Arguments; use revive_dt_config::*;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE}; use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE};
@@ -92,7 +91,7 @@ impl KitchensinkNode {
const PROXY_STDOUT_LOG_FILE_NAME: &str = "proxy_stdout.log"; const PROXY_STDOUT_LOG_FILE_NAME: &str = "proxy_stdout.log";
const PROXY_STDERR_LOG_FILE_NAME: &str = "proxy_stderr.log"; const PROXY_STDERR_LOG_FILE_NAME: &str = "proxy_stderr.log";
fn init(&mut self, genesis: &str) -> anyhow::Result<&mut Self> { fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
let _ = clear_directory(&self.base_directory); let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory); let _ = clear_directory(&self.logs_directory);
@@ -153,8 +152,6 @@ impl KitchensinkNode {
}) })
.collect(); .collect();
let mut eth_balances = { let mut eth_balances = {
let mut genesis = serde_json::from_str::<Genesis>(genesis)
.context("Failed to deserialize EVM genesis JSON for kitchensink")?;
for signer_address in for signer_address in
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet) <EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
{ {
@@ -586,35 +583,47 @@ impl ResolverApi for KitchensinkNode {
} }
impl Node for KitchensinkNode { impl Node for KitchensinkNode {
fn new(config: &Arguments) -> Self { fn new(
let kitchensink_directory = config.directory().join(Self::BASE_DIRECTORY); context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<ConcurrencyConfiguration>
+ AsRef<GenesisConfiguration>
+ AsRef<WalletConfiguration>
+ AsRef<GethConfiguration>
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Clone,
) -> Self {
let kitchensink_configuration = AsRef::<KitchensinkConfiguration>::as_ref(&context);
let dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
let eth_rpc_configuration = AsRef::<EthRpcConfiguration>::as_ref(&context);
let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context);
let kitchensink_directory = working_directory_configuration
.as_path()
.join(Self::BASE_DIRECTORY);
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
let base_directory = kitchensink_directory.join(id.to_string()); let base_directory = kitchensink_directory.join(id.to_string());
let logs_directory = base_directory.join(Self::LOGS_DIRECTORY); let logs_directory = base_directory.join(Self::LOGS_DIRECTORY);
let mut wallet = config.wallet(); let wallet = wallet_configuration.wallet();
for signer in (1..=config.private_keys_to_add)
.map(|id| U256::from(id))
.map(|id| id.to_be_bytes::<32>())
.map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap())
{
wallet.register_signer(signer);
}
Self { Self {
id, id,
substrate_binary: config.kitchensink.clone(), substrate_binary: kitchensink_configuration.path.clone(),
dev_node_binary: config.revive_dev_node.clone(), dev_node_binary: dev_node_configuration.path.clone(),
eth_proxy_binary: config.eth_proxy.clone(), eth_proxy_binary: eth_rpc_configuration.path.clone(),
rpc_url: String::new(), rpc_url: String::new(),
base_directory, base_directory,
logs_directory, logs_directory,
process_substrate: None, process_substrate: None,
process_proxy: None, process_proxy: None,
wallet: Arc::new(wallet), wallet: wallet.clone(),
chain_id_filler: Default::default(), chain_id_filler: Default::default(),
nonce_manager: Default::default(), nonce_manager: Default::default(),
use_kitchensink_not_dev_node: config.use_kitchensink_not_dev_node, use_kitchensink_not_dev_node: kitchensink_configuration.use_kitchensink,
// We know that we only need to be storing 4 files so we can specify that when creating // We know that we only need to be storing 4 files so we can specify that when creating
// the vector. It's the stdout and stderr of the substrate-node and the eth-rpc. // the vector. It's the stdout and stderr of the substrate-node and the eth-rpc.
logs_file_to_flush: Vec::with_capacity(4), logs_file_to_flush: Vec::with_capacity(4),
@@ -655,8 +664,8 @@ impl Node for KitchensinkNode {
Ok(()) Ok(())
} }
fn spawn(&mut self, genesis: String) -> anyhow::Result<()> { fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
self.init(&genesis)?.spawn_process() self.init(genesis)?.spawn_process()
} }
fn version(&self) -> anyhow::Result<String> { fn version(&self) -> anyhow::Result<String> {
@@ -1121,25 +1130,20 @@ impl BlockHeader for KitchenSinkHeader {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use alloy::rpc::types::TransactionRequest; use alloy::rpc::types::TransactionRequest;
use revive_dt_config::Arguments;
use std::path::PathBuf;
use std::sync::{LazyLock, Mutex}; use std::sync::{LazyLock, Mutex};
use std::fs; use std::fs;
use super::*; use super::*;
use crate::{GENESIS_JSON, Node}; use crate::Node;
fn test_config() -> Arguments { fn test_config() -> ExecutionContext {
Arguments { let mut context = ExecutionContext::default();
kitchensink: PathBuf::from("substrate-node"), context.kitchensink_configuration.use_kitchensink = true;
eth_proxy: PathBuf::from("eth-rpc"), context
use_kitchensink_not_dev_node: true,
..Default::default()
}
} }
fn new_node() -> (KitchensinkNode, Arguments) { fn new_node() -> (ExecutionContext, KitchensinkNode) {
// Note: When we run the tests in the CI we found that if they're all // Note: When we run the tests in the CI we found that if they're all
// run in parallel then the CI is unable to start all of the nodes in // run in parallel then the CI is unable to start all of the nodes in
// time and their start up times-out. Therefore, we want all of the // time and their start up times-out. Therefore, we want all of the
@@ -1158,32 +1162,36 @@ mod tests {
static NODE_START_MUTEX: Mutex<()> = Mutex::new(()); static NODE_START_MUTEX: Mutex<()> = Mutex::new(());
let _guard = NODE_START_MUTEX.lock().unwrap(); let _guard = NODE_START_MUTEX.lock().unwrap();
let args = test_config(); let context = test_config();
let mut node = KitchensinkNode::new(&args); let mut node = KitchensinkNode::new(&context);
node.init(GENESIS_JSON) node.init(context.genesis_configuration.genesis().unwrap().clone())
.expect("Failed to initialize the node") .expect("Failed to initialize the node")
.spawn_process() .spawn_process()
.expect("Failed to spawn the node process"); .expect("Failed to spawn the node process");
(node, args) (context, node)
} }
/// A shared node that multiple tests can use. It starts up once. /// A shared node that multiple tests can use. It starts up once.
fn shared_node() -> &'static KitchensinkNode { fn shared_node() -> &'static KitchensinkNode {
static NODE: LazyLock<(KitchensinkNode, Arguments)> = LazyLock::new(|| { static NODE: LazyLock<(ExecutionContext, KitchensinkNode)> = LazyLock::new(|| {
let (node, args) = new_node(); let (context, node) = new_node();
(node, args) (context, node)
}); });
&NODE.0 &NODE.1
} }
#[tokio::test] #[tokio::test]
async fn node_mines_simple_transfer_transaction_and_returns_receipt() { async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
// Arrange // Arrange
let (node, args) = new_node(); let (context, node) = new_node();
let provider = node.provider().await.expect("Failed to create provider"); let provider = node.provider().await.expect("Failed to create provider");
let account_address = args.wallet().default_signer().address(); let account_address = context
.wallet_configuration
.wallet()
.default_signer()
.address();
let transaction = TransactionRequest::default() let transaction = TransactionRequest::default()
.to(account_address) .to(account_address)
.value(U256::from(100_000_000_000_000u128)); .value(U256::from(100_000_000_000_000u128));
@@ -1217,7 +1225,9 @@ mod tests {
let mut dummy_node = KitchensinkNode::new(&test_config()); let mut dummy_node = KitchensinkNode::new(&test_config());
// Call `init()` // Call `init()`
dummy_node.init(genesis_content).expect("init failed"); dummy_node
.init(serde_json::from_str(genesis_content).unwrap())
.expect("init failed");
// Check that the patched chainspec file was generated // Check that the patched chainspec file was generated
let final_chainspec_path = dummy_node let final_chainspec_path = dummy_node
@@ -1327,20 +1337,10 @@ mod tests {
} }
} }
#[test]
fn spawn_works() {
let config = test_config();
let mut node = KitchensinkNode::new(&config);
node.spawn(GENESIS_JSON.to_string()).unwrap();
}
#[test] #[test]
fn version_works() { fn version_works() {
let config = test_config(); let node = shared_node();
let node = KitchensinkNode::new(&config);
let version = node.version().unwrap(); let version = node.version().unwrap();
assert!( assert!(
@@ -1351,9 +1351,8 @@ mod tests {
#[test] #[test]
fn eth_rpc_version_works() { fn eth_rpc_version_works() {
let config = test_config(); let node = shared_node();
let node = KitchensinkNode::new(&config);
let version = node.eth_rpc_version().unwrap(); let version = node.eth_rpc_version().unwrap();
assert!( assert!(
+14 -6
View File
@@ -1,7 +1,8 @@
//! This crate implements the testing nodes. //! This crate implements the testing nodes.
use alloy::genesis::Genesis;
use revive_common::EVMVersion; use revive_common::EVMVersion;
use revive_dt_config::Arguments; use revive_dt_config::*;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
pub mod common; pub mod common;
@@ -10,13 +11,20 @@ pub mod geth;
pub mod kitchensink; pub mod kitchensink;
pub mod pool; pub mod pool;
/// The default genesis configuration.
pub const GENESIS_JSON: &str = include_str!("../../../genesis.json");
/// An abstract interface for testing nodes. /// An abstract interface for testing nodes.
pub trait Node: EthereumNode { pub trait Node: EthereumNode {
/// Create a new uninitialized instance. /// Create a new uninitialized instance.
fn new(config: &Arguments) -> Self; fn new(
context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<ConcurrencyConfiguration>
+ AsRef<GenesisConfiguration>
+ AsRef<WalletConfiguration>
+ AsRef<GethConfiguration>
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Clone,
) -> Self;
/// Returns the identifier of the node. /// Returns the identifier of the node.
fn id(&self) -> usize; fn id(&self) -> usize;
@@ -24,7 +32,7 @@ pub trait Node: EthereumNode {
/// Spawns a node configured according to the genesis json. /// Spawns a node configured according to the genesis json.
/// ///
/// Blocking until it's ready to accept transactions. /// Blocking until it's ready to accept transactions.
fn spawn(&mut self, genesis: String) -> anyhow::Result<()>; fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()>;
/// Prune the node instance and related data. /// Prune the node instance and related data.
/// ///
+42 -14
View File
@@ -5,10 +5,13 @@ use std::{
thread, thread,
}; };
use revive_dt_common::cached_fs::read_to_string; use alloy::genesis::Genesis;
use anyhow::Context as _;
use anyhow::Context; use revive_dt_config::{
use revive_dt_config::Arguments; ConcurrencyConfiguration, EthRpcConfiguration, GenesisConfiguration, GethConfiguration,
KitchensinkConfiguration, ReviveDevNodeConfiguration, WalletConfiguration,
WorkingDirectoryConfiguration,
};
use tracing::info; use tracing::info;
use crate::Node; use crate::Node;
@@ -25,18 +28,31 @@ where
T: Node + Send + 'static, T: Node + Send + 'static,
{ {
/// Create a new Pool. This will start as many nodes as there are workers in `config`. /// Create a new Pool. This will start as many nodes as there are workers in `config`.
pub fn new(config: &Arguments) -> anyhow::Result<Self> { pub fn new(
let nodes = config.number_of_nodes; context: impl AsRef<WorkingDirectoryConfiguration>
let genesis = read_to_string(&config.genesis_file).context(format!( + AsRef<ConcurrencyConfiguration>
"can not read genesis file: {}", + AsRef<GenesisConfiguration>
config.genesis_file.display() + AsRef<WalletConfiguration>
))?; + AsRef<GethConfiguration>
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Send
+ Sync
+ Clone
+ 'static,
) -> anyhow::Result<Self> {
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let nodes = concurrency_configuration.number_of_nodes;
let genesis = genesis_configuration.genesis()?;
let mut handles = Vec::with_capacity(nodes); let mut handles = Vec::with_capacity(nodes);
for _ in 0..nodes { for _ in 0..nodes {
let config = config.clone(); let context = context.clone();
let genesis = genesis.clone(); let genesis = genesis.clone();
handles.push(thread::spawn(move || spawn_node::<T>(&config, genesis))); handles.push(thread::spawn(move || spawn_node::<T>(context, genesis)));
} }
let mut nodes = Vec::with_capacity(nodes); let mut nodes = Vec::with_capacity(nodes);
@@ -64,8 +80,20 @@ where
} }
} }
fn spawn_node<T: Node + Send>(args: &Arguments, genesis: String) -> anyhow::Result<T> { fn spawn_node<T: Node + Send>(
let mut node = T::new(args); context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<ConcurrencyConfiguration>
+ AsRef<GenesisConfiguration>
+ AsRef<WalletConfiguration>
+ AsRef<GethConfiguration>
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Clone
+ 'static,
genesis: Genesis,
) -> anyhow::Result<T> {
let mut node = T::new(context);
info!( info!(
id = node.id(), id = node.id(),
connection_string = node.connection_string(), connection_string = node.connection_string(),
+38 -23
View File
@@ -12,7 +12,7 @@ use alloy_primitives::Address;
use anyhow::{Context as _, Result}; use anyhow::{Context as _, Result};
use indexmap::IndexMap; use indexmap::IndexMap;
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode}; use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
use revive_dt_config::{Arguments, TestingPlatform}; use revive_dt_config::{Context, TestingPlatform};
use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance}; use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance};
use semver::Version; use semver::Version;
use serde::Serialize; use serde::Serialize;
@@ -36,11 +36,11 @@ pub struct ReportAggregator {
} }
impl ReportAggregator { impl ReportAggregator {
pub fn new(config: Arguments) -> Self { pub fn new(context: Context) -> Self {
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>(); let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
let (listener_tx, _) = channel::<ReporterEvent>(1024); let (listener_tx, _) = channel::<ReporterEvent>(1024);
Self { Self {
report: Report::new(config), report: Report::new(context),
remaining_cases: Default::default(), remaining_cases: Default::default(),
runner_tx: Some(runner_tx), runner_tx: Some(runner_tx),
runner_rx, runner_rx,
@@ -121,7 +121,12 @@ impl ReportAggregator {
file_name.push_str(".json"); file_name.push_str(".json");
file_name file_name
}; };
let file_path = self.report.config.directory().join(file_name); let file_path = self
.report
.context
.working_directory_configuration()
.as_path()
.join(file_name);
let file = OpenOptions::new() let file = OpenOptions::new()
.create(true) .create(true)
.write(true) .write(true)
@@ -282,8 +287,16 @@ impl ReportAggregator {
&mut self, &mut self,
event: PreLinkContractsCompilationSucceededEvent, event: PreLinkContractsCompilationSucceededEvent,
) { ) {
let include_input = self.report.config.report_include_compiler_input; let include_input = self
let include_output = self.report.config.report_include_compiler_output; .report
.context
.report_configuration()
.include_compiler_input;
let include_output = self
.report
.context
.report_configuration()
.include_compiler_output;
let execution_information = self.execution_information(&event.execution_specifier); let execution_information = self.execution_information(&event.execution_specifier);
@@ -311,8 +324,16 @@ impl ReportAggregator {
&mut self, &mut self,
event: PostLinkContractsCompilationSucceededEvent, event: PostLinkContractsCompilationSucceededEvent,
) { ) {
let include_input = self.report.config.report_include_compiler_input; let include_input = self
let include_output = self.report.config.report_include_compiler_output; .report
.context
.report_configuration()
.include_compiler_input;
let include_output = self
.report
.context
.report_configuration()
.include_compiler_output;
let execution_information = self.execution_information(&event.execution_specifier); let execution_information = self.execution_information(&event.execution_specifier);
@@ -406,12 +427,8 @@ impl ReportAggregator {
#[serde_as] #[serde_as]
#[derive(Clone, Debug, Serialize)] #[derive(Clone, Debug, Serialize)]
pub struct Report { pub struct Report {
/// The configuration that the tool was started up with. /// The context that the tool was started up with.
pub config: Arguments, pub context: Context,
/// The platform of the leader chain.
pub leader_platform: TestingPlatform,
/// The platform of the follower chain.
pub follower_platform: TestingPlatform,
/// The list of corpus files that the tool found. /// The list of corpus files that the tool found.
pub corpora: Vec<Corpus>, pub corpora: Vec<Corpus>,
/// The list of metadata files that were found by the tool. /// The list of metadata files that were found by the tool.
@@ -423,11 +440,9 @@ pub struct Report {
} }
impl Report { impl Report {
pub fn new(config: Arguments) -> Self { pub fn new(context: Context) -> Self {
Self { Self {
leader_platform: config.leader, context,
follower_platform: config.follower,
config,
corpora: Default::default(), corpora: Default::default(),
metadata_files: Default::default(), metadata_files: Default::default(),
test_case_information: Default::default(), test_case_information: Default::default(),
@@ -517,12 +532,12 @@ pub enum CompilationStatus {
/// The path of the compiler used to compile the contracts. /// The path of the compiler used to compile the contracts.
compiler_path: PathBuf, compiler_path: PathBuf,
/// The input provided to the compiler to compile the contracts. This is only included if /// The input provided to the compiler to compile the contracts. This is only included if
/// the appropriate flag is set in the CLI configuration and if the contracts were not /// the appropriate flag is set in the CLI context and if the contracts were not cached and
/// cached and the compiler was invoked. /// the compiler was invoked.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_input: Option<CompilerInput>, compiler_input: Option<CompilerInput>,
/// The output of the compiler. This is only included if the appropriate flag is set in the /// The output of the compiler. This is only included if the appropriate flag is set in the
/// CLI configurations. /// CLI contexts.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_output: Option<CompilerOutput>, compiler_output: Option<CompilerOutput>,
}, },
@@ -537,8 +552,8 @@ pub enum CompilationStatus {
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_path: Option<PathBuf>, compiler_path: Option<PathBuf>,
/// The input provided to the compiler to compile the contracts. This is only included if /// The input provided to the compiler to compile the contracts. This is only included if
/// the appropriate flag is set in the CLI configuration and if the contracts were not /// the appropriate flag is set in the CLI context and if the contracts were not cached and
/// cached and the compiler was invoked. /// the compiler was invoked.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_input: Option<CompilerInput>, compiler_input: Option<CompilerInput>,
}, },
+1 -1
View File
@@ -13,7 +13,7 @@ use semver::Version;
use tokio::sync::Mutex; use tokio::sync::Mutex;
use crate::download::SolcDownloader; use crate::download::SolcDownloader;
use anyhow::Context; use anyhow::Context as _;
pub const SOLC_CACHE_DIRECTORY: &str = "solc"; pub const SOLC_CACHE_DIRECTORY: &str = "solc";
pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default); pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default);
+1 -1
View File
@@ -11,7 +11,7 @@ use semver::Version;
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
use crate::list::List; use crate::list::List;
use anyhow::Context; use anyhow::Context as _;
pub static LIST_CACHE: LazyLock<Mutex<HashMap<&'static str, List>>> = pub static LIST_CACHE: LazyLock<Mutex<HashMap<&'static str, List>>> =
LazyLock::new(Default::default); LazyLock::new(Default::default);
+1 -1
View File
@@ -5,7 +5,7 @@
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use anyhow::Context; use anyhow::Context as _;
use cache::get_or_download; use cache::get_or_download;
use download::SolcDownloader; use download::SolcDownloader;
+6 -6
View File
@@ -89,13 +89,13 @@ echo "This may take a while..."
echo "" echo ""
# Run the tool # Run the tool
RUST_LOG="error" cargo run --release -- \ RUST_LOG="error" cargo run --release -- execute-tests \
--corpus "$CORPUS_FILE" \ --corpus "$CORPUS_FILE" \
--workdir "$WORKDIR" \ --working-directory "$WORKDIR" \
--number-of-nodes 5 \ --concurrency.number-of-nodes 5 \
--kitchensink "$SUBSTRATE_NODE_BIN" \ --kitchensink.path "$SUBSTRATE_NODE_BIN" \
--revive-dev-node "$REVIVE_DEV_NODE_BIN" \ --revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
--eth_proxy "$ETH_RPC_BIN" \ --eth-rpc.path "$ETH_RPC_BIN" \
> logs.log \ > logs.log \
2> output.log 2> output.log
+497
View File
@@ -0,0 +1,497 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"title": "Metadata",
"description": "A MatterLabs metadata file.\n\nThis defines the structure that the MatterLabs metadata files follow for defining the tests or\nthe workloads.\n\nEach metadata file is composed of multiple test cases where each test case is isolated from the\nothers and runs in a completely different address space. Each test case is composed of a number\nof steps and assertions that should be performed as part of the test case.",
"type": "object",
"properties": {
"comment": {
"description": "This is an optional comment on the metadata file which has no impact on the execution in any\nway.",
"type": [
"string",
"null"
]
},
"ignore": {
"description": "An optional boolean which defines if the metadata file as a whole should be ignored. If null\nthen the metadata file will not be ignored.",
"type": [
"boolean",
"null"
]
},
"targets": {
"description": "An optional vector of targets that this Metadata file's cases can be executed on. As an\nexample, if we wish for the metadata file's cases to only be run on PolkaVM then we'd\nspecify a target of \"PolkaVM\" in here.",
"type": [
"array",
"null"
],
"items": {
"type": "string"
}
},
"cases": {
"description": "A vector of the test cases and workloads contained within the metadata file. This is their\nprimary description.",
"type": "array",
"items": {
"$ref": "#/$defs/Case"
}
},
"contracts": {
"description": "A map of all of the contracts that the test requires to run.\n\nThis is a map where the key is the name of the contract instance and the value is the\ncontract's path and ident in the file.\n\nIf any contract is to be used by the test then it must be included in here first so that the\nframework is aware of its path, compiles it, and prepares it.",
"type": [
"object",
"null"
],
"additionalProperties": {
"$ref": "#/$defs/ContractPathAndIdent"
}
},
"libraries": {
"description": "The set of libraries that this metadata file requires.",
"type": [
"object",
"null"
],
"additionalProperties": {
"type": "object",
"additionalProperties": {
"$ref": "#/$defs/ContractInstance"
}
}
},
"modes": {
"description": "This represents a mode that has been parsed from test metadata.\n\nMode strings can take the following form (in pseudo-regex):\n\n```text\n[YEILV][+-]? (M[0123sz])? <semver>?\n```",
"type": [
"array",
"null"
],
"items": {
"$ref": "#/$defs/ParsedMode"
}
},
"required_evm_version": {
"description": "This field specifies an EVM version requirement that the test case has where the test might\nbe run of the evm version of the nodes match the evm version specified here.",
"anyOf": [
{
"$ref": "#/$defs/EvmVersionRequirement"
},
{
"type": "null"
}
]
},
"compiler_directives": {
"description": "A set of compilation directives that will be passed to the compiler whenever the contracts\nfor the test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`]\nis just a filter for when a test can run whereas this is an instruction to the compiler.",
"anyOf": [
{
"$ref": "#/$defs/CompilationDirectives"
},
{
"type": "null"
}
]
}
},
"required": [
"cases"
],
"$defs": {
"Case": {
"type": "object",
"properties": {
"name": {
"description": "An optional name of the test case.",
"type": [
"string",
"null"
]
},
"comment": {
"description": "An optional comment on the case which has no impact on the execution in any way.",
"type": [
"string",
"null"
]
},
"modes": {
"description": "This represents a mode that has been parsed from test metadata.\n\nMode strings can take the following form (in pseudo-regex):\n\n```text\n[YEILV][+-]? (M[0123sz])? <semver>?\n```\n\nIf this is provided then it takes higher priority than the modes specified in the metadata\nfile.",
"type": [
"array",
"null"
],
"items": {
"$ref": "#/$defs/ParsedMode"
}
},
"inputs": {
"description": "The set of steps to run as part of this test case.",
"type": "array",
"items": {
"$ref": "#/$defs/Step"
}
},
"group": {
"description": "An optional name of the group of tests that this test belongs to.",
"type": [
"string",
"null"
]
},
"expected": {
"description": "An optional set of expectations and assertions to make about the transaction after it ran.\n\nIf this is not specified then the only assertion that will be ran is that the transaction\nwas successful.\n\nThis expectation that's on the case itself will be attached to the final step of the case.",
"anyOf": [
{
"$ref": "#/$defs/Expected"
},
{
"type": "null"
}
]
},
"ignore": {
"description": "An optional boolean which defines if the case as a whole should be ignored. If null then the\ncase will not be ignored.",
"type": [
"boolean",
"null"
]
}
},
"required": [
"inputs"
]
},
"ParsedMode": {
"description": "This represents a mode that has been parsed from test metadata.\n\nMode strings can take the following form (in pseudo-regex):\n\n```text\n[YEILV][+-]? (M[0123sz])? <semver>?\n```\n\nWe can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].",
"type": "string"
},
"Step": {
"description": "A test step.\n\nA test step can be anything. It could be an invocation to a function, an assertion, or any other\naction that needs to be run or executed on the nodes used in the tests.",
"anyOf": [
{
"description": "A function call or an invocation to some function on some smart contract.",
"$ref": "#/$defs/Input"
},
{
"description": "A step for performing a balance assertion on some account or contract.",
"$ref": "#/$defs/BalanceAssertion"
},
{
"description": "A step for asserting that the storage of some contract or account is empty.",
"$ref": "#/$defs/StorageEmptyAssertion"
}
]
},
"Input": {
"description": "This is an input step which is a transaction description that the framework translates into a\ntransaction and executes on the nodes.",
"type": "object",
"properties": {
"caller": {
"description": "The address of the account performing the call and paying the fees for it.",
"type": "string",
"default": "0x90f8bf6a479f320ead074411a4b0e7944ea8c9c1"
},
"comment": {
"description": "An optional comment on the step which has no impact on the execution in any way.",
"type": [
"string",
"null"
]
},
"instance": {
"description": "The contract instance that's being called in this transaction step.",
"$ref": "#/$defs/ContractInstance",
"default": "Test"
},
"method": {
"description": "The method that's being called in this step.",
"$ref": "#/$defs/Method"
},
"calldata": {
"description": "The calldata that the function should be invoked with.",
"$ref": "#/$defs/Calldata",
"default": []
},
"expected": {
"description": "A set of assertions and expectations to have for the transaction.",
"anyOf": [
{
"$ref": "#/$defs/Expected"
},
{
"type": "null"
}
]
},
"value": {
"description": "An optional value to provide as part of the transaction.",
"anyOf": [
{
"$ref": "#/$defs/EtherValue"
},
{
"type": "null"
}
]
},
"variable_assignments": {
"description": "Variable assignment to perform in the framework allowing us to reference them again later on\nduring the execution.",
"anyOf": [
{
"$ref": "#/$defs/VariableAssignments"
},
{
"type": "null"
}
]
}
},
"required": [
"method"
]
},
"ContractInstance": {
"description": "Represents a contract instance found a metadata file.\n\nTypically, this is used as the key to the \"contracts\" field of metadata files.",
"type": "string"
},
"Method": {
"description": "Specify how the contract is called.",
"anyOf": [
{
"description": "Initiate a deploy transaction, calling contracts constructor.\n\nIndicated by `#deployer`.",
"type": "string",
"const": "#deployer"
},
{
"description": "Does not calculate and insert a function selector.\n\nIndicated by `#fallback`.",
"type": "string",
"const": "#fallback"
},
{
"description": "Call the public function with the given name.",
"type": "string"
}
]
},
"Calldata": {
"description": "A type definition for the calldata supported by the testing framework.\n\nWe choose to document all of the types used in [`Calldata`] in this one doc comment to elaborate\non why they exist and consolidate all of the documentation for calldata in a single place where\nit can be viewed and understood.\n\nThe [`Single`] variant of this enum is quite simple and straightforward: it's a hex-encoded byte\narray of the calldata.\n\nThe [`Compound`] type is more intricate and allows for capabilities such as resolution and some\nsimple arithmetic operations. It houses a vector of [`CalldataItem`]s which is just a wrapper\naround an owned string.\n\nA [`CalldataItem`] could be a simple hex string of a single calldata argument, but it could also\nbe something that requires resolution such as `MyContract.address` which is a variable that is\nunderstood by the resolution logic to mean \"Lookup the address of this particular contract\ninstance\".\n\nIn addition to the above, the format supports some simple arithmetic operations like add, sub,\ndivide, multiply, bitwise AND, bitwise OR, and bitwise XOR. Our parser understands the [reverse\npolish notation] simply because it's easy to write a calculator for that notation and since we\ndo not have plans to use arithmetic too often in tests. In reverse polish notation a typical\n`2 + 4` would be written as `2 4 +` which makes this notation very simple to implement through\na stack.\n\nCombining the above, a single [`CalldataItem`] could employ both resolution and arithmetic at\nthe same time. For example, a [`CalldataItem`] of `$BLOCK_NUMBER $BLOCK_NUMBER +` means that\nthe block number should be retrieved and then it should be added to itself.\n\nInternally, we split the [`CalldataItem`] by spaces. Therefore, `$BLOCK_NUMBER $BLOCK_NUMBER+`\nis invalid but `$BLOCK_NUMBER $BLOCK_NUMBER +` is valid and can be understood by the parser and\ncalculator. After the split is done, each token is parsed into a [`CalldataToken<&str>`] forming\nan [`Iterator`] over [`CalldataToken<&str>`]. A [`CalldataToken<&str>`] can then be resolved\ninto a [`CalldataToken<U256>`] through the resolution logic. Finally, after resolution is done,\nthis iterator of [`CalldataToken<U256>`] is collapsed into the final result by applying the\narithmetic operations requested.\n\nFor example, supplying a [`Compound`] calldata of `0xdeadbeef` produces an iterator of a single\n[`CalldataToken<&str>`] items of the value [`CalldataToken::Item`] of the string value 12 which\nwe can then resolve into the appropriate [`U256`] value and convert into calldata.\n\nIn summary, the various types used in [`Calldata`] represent the following:\n- [`CalldataItem`]: A calldata string from the metadata files.\n- [`CalldataToken<&str>`]: Typically used in an iterator of items from the space splitted\n [`CalldataItem`] and represents a token that has not yet been resolved into its value.\n- [`CalldataToken<U256>`]: Represents a token that's been resolved from being a string and into\n the word-size calldata argument on which we can perform arithmetic.\n\n[`Single`]: Calldata::Single\n[`Compound`]: Calldata::Compound\n[reverse polish notation]: https://en.wikipedia.org/wiki/Reverse_Polish_notation",
"anyOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"$ref": "#/$defs/CalldataItem"
}
}
]
},
"CalldataItem": {
"description": "This represents an item in the [`Calldata::Compound`] variant. Each item will be resolved\naccording to the resolution rules of the tool.",
"type": "string"
},
"Expected": {
"description": "A set of expectations and assertions to make about the transaction after it ran.\n\nIf this is not specified then the only assertion that will be ran is that the transaction\nwas successful.",
"anyOf": [
{
"description": "An assertion that the transaction succeeded and returned the provided set of data.",
"$ref": "#/$defs/Calldata"
},
{
"description": "A more complex assertion.",
"$ref": "#/$defs/ExpectedOutput"
},
{
"description": "A set of assertions.",
"type": "array",
"items": {
"$ref": "#/$defs/ExpectedOutput"
}
}
]
},
"ExpectedOutput": {
"description": "A set of assertions to run on the transaction.",
"type": "object",
"properties": {
"compiler_version": {
"description": "An optional compiler version that's required in order for this assertion to run.",
"type": [
"string",
"null"
]
},
"return_data": {
"description": "An optional field of the expected returns from the invocation.",
"anyOf": [
{
"$ref": "#/$defs/Calldata"
},
{
"type": "null"
}
]
},
"events": {
"description": "An optional set of assertions to run on the emitted events from the transaction.",
"type": [
"array",
"null"
],
"items": {
"$ref": "#/$defs/Event"
}
},
"exception": {
"description": "A boolean which defines whether we expect the transaction to succeed or fail.",
"type": "boolean",
"default": false
}
}
},
"Event": {
"type": "object",
"properties": {
"address": {
"description": "An optional field of the address of the emitter of the event.",
"type": [
"string",
"null"
]
},
"topics": {
"description": "The set of topics to expect the event to have.",
"type": "array",
"items": {
"type": "string"
}
},
"values": {
"description": "The set of values to expect the event to have.",
"$ref": "#/$defs/Calldata"
}
},
"required": [
"topics",
"values"
]
},
"EtherValue": {
"description": "Defines an Ether value.\n\nThis is an unsigned 256 bit integer that's followed by some denomination which can either be\neth, ether, gwei, or wei.",
"type": "string"
},
"VariableAssignments": {
"type": "object",
"properties": {
"return_data": {
"description": "A vector of the variable names to assign to the return data.\n\nExample: `UniswapV3PoolAddress`",
"type": "array",
"items": {
"type": "string"
}
}
},
"required": [
"return_data"
]
},
"BalanceAssertion": {
"description": "This represents a balance assertion step where the framework needs to query the balance of some\naccount or contract and assert that it's some amount.",
"type": "object",
"properties": {
"comment": {
"description": "An optional comment on the balance assertion.",
"type": [
"string",
"null"
]
},
"address": {
"description": "The address that the balance assertion should be done on.\n\nThis is a string which will be resolved into an address when being processed. Therefore,\nthis could be a normal hex address, a variable such as `Test.address`, or perhaps even a\nfull on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are\nfollowed in the calldata.",
"type": "string"
},
"expected_balance": {
"description": "The amount of balance to assert that the account or contract has. This is a 256 bit string\nthat's serialized and deserialized into a decimal string.",
"type": "string"
}
},
"required": [
"address",
"expected_balance"
]
},
"StorageEmptyAssertion": {
"type": "object",
"properties": {
"comment": {
"description": "An optional comment on the storage empty assertion.",
"type": [
"string",
"null"
]
},
"address": {
"description": "The address that the balance assertion should be done on.\n\nThis is a string which will be resolved into an address when being processed. Therefore,\nthis could be a normal hex address, a variable such as `Test.address`, or perhaps even a\nfull on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are\nfollowed in the calldata.",
"type": "string"
},
"is_storage_empty": {
"description": "A boolean of whether the storage of the address is empty or not.",
"type": "boolean"
}
},
"required": [
"address",
"is_storage_empty"
]
},
"ContractPathAndIdent": {
"description": "Represents an identifier used for contracts.\n\nThe type supports serialization from and into the following string format:\n\n```text\n${path}:${contract_ident}\n```",
"type": "string"
},
"EvmVersionRequirement": {
"description": "An EVM version requirement that the test case has. This gets serialized and deserialized from\nand into [`String`]. This follows a simple format of (>=|<=|=|>|<) followed by a string of the\nEVM version.\n\nWhen specified, the framework will only run the test if the node's EVM version matches that\nrequired by the metadata file.",
"type": "string"
},
"CompilationDirectives": {
"description": "A set of compilation directives that will be passed to the compiler whenever the contracts for\nthe test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`] is\njust a filter for when a test can run whereas this is an instruction to the compiler.\nDefines how the compiler should handle revert strings.",
"type": "object",
"properties": {
"revert_string_handling": {
"description": "Defines how the revert strings should be handled.",
"anyOf": [
{
"$ref": "#/$defs/RevertString"
},
{
"type": "null"
}
]
}
}
},
"RevertString": {
"description": "Defines how the compiler should handle revert strings.",
"oneOf": [
{
"description": "The default handling of the revert strings.",
"type": "string",
"const": "default"
},
{
"description": "The debug handling of the revert strings.",
"type": "string",
"const": "debug"
},
{
"description": "Strip the revert strings.",
"type": "string",
"const": "strip"
},
{
"description": "Provide verbose debug strings for the revert string.",
"type": "string",
"const": "verboseDebug"
}
]
}
}
}