Compare commits

...

6 Commits

Author SHA1 Message Date
Omar Abdulla 77a875ed6f Use shared node more often in tests 2025-09-04 17:10:46 +03:00
Omar Abdulla e06dd491b9 Use kitchensink in tests 2025-09-04 17:09:01 +03:00
Omar Abdulla a30d4f9b9e Fix tests 2025-09-04 14:40:09 +03:00
Omar Abdulla b4118faa3d Update the run script 2025-09-02 19:39:26 +03:00
Omar Abdulla 762b45ffd1 Update usage guides 2025-09-02 18:55:31 +03:00
Omar Abdulla 1ec1778e32 Cleanup the config 2025-09-02 18:03:42 +03:00
23 changed files with 805 additions and 441 deletions
Generated
+5 -3
View File
@@ -4501,9 +4501,12 @@ name = "revive-dt-config"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"alloy", "alloy",
"anyhow",
"clap", "clap",
"semver 1.0.26", "semver 1.0.26",
"serde", "serde",
"serde_json",
"strum",
"temp-dir", "temp-dir",
] ]
@@ -4528,7 +4531,6 @@ dependencies = [
"semver 1.0.26", "semver 1.0.26",
"serde", "serde",
"serde_json", "serde_json",
"temp-dir",
"tokio", "tokio",
"tracing", "tracing",
"tracing-appender", "tracing-appender",
@@ -5690,9 +5692,9 @@ checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
[[package]] [[package]]
name = "strum" name = "strum"
version = "0.27.1" version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f64def088c51c9510a8579e3c5d67c65349dcf755e5479ad3d010aa6454e2c32" checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf"
dependencies = [ dependencies = [
"strum_macros", "strum_macros",
] ]
+1
View File
@@ -48,6 +48,7 @@ serde_with = { version = "3.14.0" }
sha2 = { version = "0.10.9" } sha2 = { version = "0.10.9" }
sp-core = "36.1.0" sp-core = "36.1.0"
sp-runtime = "41.1.0" sp-runtime = "41.1.0"
strum = { version = "0.27.2", features = ["derive"] }
temp-dir = { version = "0.1.16" } temp-dir = { version = "0.1.16" }
tempfile = "3.3" tempfile = "3.3"
thiserror = "2" thiserror = "2"
+4 -3
View File
@@ -187,10 +187,11 @@ The above corpus file instructs the tool to look for all of the test cases conta
The simplest command to run this tool is the following: The simplest command to run this tool is the following:
```bash ```bash
RUST_LOG="info" cargo run --release -- \ RUST_LOG="info" cargo run --release -- execute-tests \
--follower geth \
--corpus path_to_your_corpus_file.json \ --corpus path_to_your_corpus_file.json \
--workdir path_to_a_temporary_directory_to_cache_things_in \ --working-directory path_to_a_temporary_directory_to_cache_things_in \
--number-of-nodes 5 \ --concurrency.number-of-nodes 5 \
> logs.log \ > logs.log \
2> output.log 2> output.log
``` ```
+6 -4
View File
@@ -11,14 +11,14 @@ use std::{
use alloy::json_abi::JsonAbi; use alloy::json_abi::JsonAbi;
use alloy_primitives::Address; use alloy_primitives::Address;
use anyhow::{Context, Result}; use anyhow::{Context as _, Result};
use semver::Version; use semver::Version;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use revive_common::EVMVersion; use revive_common::EVMVersion;
use revive_dt_common::cached_fs::read_to_string; use revive_dt_common::cached_fs::read_to_string;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::Arguments; use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
// Re-export this as it's a part of the compiler interface. // Re-export this as it's a part of the compiler interface.
pub use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline}; pub use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
@@ -31,11 +31,13 @@ pub mod solc;
pub trait SolidityCompiler: Sized { pub trait SolidityCompiler: Sized {
/// Instantiates a new compiler object. /// Instantiates a new compiler object.
/// ///
/// Based on the given [`Arguments`] and [`VersionOrRequirement`] this function instantiates a /// Based on the given [`Context`] and [`VersionOrRequirement`] this function instantiates a
/// new compiler object. Certain implementations of this trait might choose to cache cache the /// new compiler object. Certain implementations of this trait might choose to cache cache the
/// compiler objects and return the same ones over and over again. /// compiler objects and return the same ones over and over again.
fn new( fn new(
config: &Arguments, context: impl AsRef<SolcConfiguration>
+ AsRef<ResolcConfiguration>
+ AsRef<WorkingDirectoryConfiguration>,
version: impl Into<Option<VersionOrRequirement>>, version: impl Into<Option<VersionOrRequirement>>,
) -> impl Future<Output = Result<Self>>; ) -> impl Future<Output = Result<Self>>;
+9 -5
View File
@@ -9,7 +9,7 @@ use std::{
use dashmap::DashMap; use dashmap::DashMap;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::Arguments; use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
use revive_solc_json_interface::{ use revive_solc_json_interface::{
SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings, SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection, SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection,
@@ -21,7 +21,7 @@ use crate::{
}; };
use alloy::json_abi::JsonAbi; use alloy::json_abi::JsonAbi;
use anyhow::{Context, Result}; use anyhow::{Context as _, Result};
use semver::Version; use semver::Version;
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand}; use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
@@ -39,7 +39,9 @@ struct ResolcInner {
impl SolidityCompiler for Resolc { impl SolidityCompiler for Resolc {
async fn new( async fn new(
config: &Arguments, context: impl AsRef<SolcConfiguration>
+ AsRef<ResolcConfiguration>
+ AsRef<WorkingDirectoryConfiguration>,
version: impl Into<Option<VersionOrRequirement>>, version: impl Into<Option<VersionOrRequirement>>,
) -> Result<Self> { ) -> Result<Self> {
/// This is a cache of all of the resolc compiler objects. Since we do not currently support /// This is a cache of all of the resolc compiler objects. Since we do not currently support
@@ -47,7 +49,9 @@ impl SolidityCompiler for Resolc {
/// its version to the resolc compiler. /// its version to the resolc compiler.
static COMPILERS_CACHE: LazyLock<DashMap<Solc, Resolc>> = LazyLock::new(Default::default); static COMPILERS_CACHE: LazyLock<DashMap<Solc, Resolc>> = LazyLock::new(Default::default);
let solc = Solc::new(config, version) let resolc_configuration = AsRef::<ResolcConfiguration>::as_ref(&context);
let solc = Solc::new(&context, version)
.await .await
.context("Failed to create the solc compiler frontend for resolc")?; .context("Failed to create the solc compiler frontend for resolc")?;
@@ -56,7 +60,7 @@ impl SolidityCompiler for Resolc {
.or_insert_with(|| { .or_insert_with(|| {
Self(Arc::new(ResolcInner { Self(Arc::new(ResolcInner {
solc, solc,
resolc_path: config.resolc.clone(), resolc_path: resolc_configuration.path.clone(),
})) }))
}) })
.clone()) .clone())
+17 -7
View File
@@ -9,12 +9,12 @@ use std::{
use dashmap::DashMap; use dashmap::DashMap;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
use revive_dt_config::Arguments; use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
use revive_dt_solc_binaries::download_solc; use revive_dt_solc_binaries::download_solc;
use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler}; use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
use anyhow::{Context, Result}; use anyhow::{Context as _, Result};
use foundry_compilers_artifacts::{ use foundry_compilers_artifacts::{
output_selection::{ output_selection::{
BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection, BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection,
@@ -38,25 +38,35 @@ struct SolcInner {
impl SolidityCompiler for Solc { impl SolidityCompiler for Solc {
async fn new( async fn new(
config: &Arguments, context: impl AsRef<SolcConfiguration>
+ AsRef<ResolcConfiguration>
+ AsRef<WorkingDirectoryConfiguration>,
version: impl Into<Option<VersionOrRequirement>>, version: impl Into<Option<VersionOrRequirement>>,
) -> Result<Self> { ) -> Result<Self> {
// This is a cache for the compiler objects so that whenever the same compiler version is // This is a cache for the compiler objects so that whenever the same compiler version is
// requested the same object is returned. We do this as we do not want to keep cloning the // requested the same object is returned. We do this as we do not want to keep cloning the
// compiler around. // compiler around.
static COMPILERS_CACHE: LazyLock<DashMap<Version, Solc>> = LazyLock::new(Default::default); static COMPILERS_CACHE: LazyLock<DashMap<(PathBuf, Version), Solc>> =
LazyLock::new(Default::default);
let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
let solc_configuration = AsRef::<SolcConfiguration>::as_ref(&context);
// We attempt to download the solc binary. Note the following: this call does the version // We attempt to download the solc binary. Note the following: this call does the version
// resolution for us. Therefore, even if the download didn't proceed, this function will // resolution for us. Therefore, even if the download didn't proceed, this function will
// resolve the version requirement into a canonical version of the compiler. It's then up // resolve the version requirement into a canonical version of the compiler. It's then up
// to us to either use the provided path or not. // to us to either use the provided path or not.
let version = version.into().unwrap_or_else(|| config.solc.clone().into()); let version = version
let (version, path) = download_solc(config.directory(), version, false) .into()
.unwrap_or_else(|| solc_configuration.version.clone().into());
let (version, path) =
download_solc(working_directory_configuration.as_path(), version, false)
.await .await
.context("Failed to download/get path to solc binary")?; .context("Failed to download/get path to solc binary")?;
Ok(COMPILERS_CACHE Ok(COMPILERS_CACHE
.entry(version.clone()) .entry((path.clone(), version.clone()))
.or_insert_with(|| { .or_insert_with(|| {
Self(Arc::new(SolcInner { Self(Arc::new(SolcInner {
solc_path: path, solc_path: path,
+3 -3
View File
@@ -2,13 +2,13 @@ use std::path::PathBuf;
use revive_dt_common::types::VersionOrRequirement; use revive_dt_common::types::VersionOrRequirement;
use revive_dt_compiler::{Compiler, SolidityCompiler, revive_resolc::Resolc, solc::Solc}; use revive_dt_compiler::{Compiler, SolidityCompiler, revive_resolc::Resolc, solc::Solc};
use revive_dt_config::Arguments; use revive_dt_config::ExecutionContext;
use semver::Version; use semver::Version;
#[tokio::test] #[tokio::test]
async fn contracts_can_be_compiled_with_solc() { async fn contracts_can_be_compiled_with_solc() {
// Arrange // Arrange
let args = Arguments::default(); let args = ExecutionContext::default();
let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30))) let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
.await .await
.unwrap(); .unwrap();
@@ -49,7 +49,7 @@ async fn contracts_can_be_compiled_with_solc() {
#[tokio::test] #[tokio::test]
async fn contracts_can_be_compiled_with_resolc() { async fn contracts_can_be_compiled_with_resolc() {
// Arrange // Arrange
let args = Arguments::default(); let args = ExecutionContext::default();
let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30))) let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
.await .await
.unwrap(); .unwrap();
+3
View File
@@ -10,10 +10,13 @@ rust-version.workspace = true
[dependencies] [dependencies]
alloy = { workspace = true } alloy = { workspace = true }
anyhow = { workspace = true }
clap = { workspace = true } clap = { workspace = true }
semver = { workspace = true } semver = { workspace = true }
temp-dir = { workspace = true } temp-dir = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
serde_json = { workspace = true }
strum = { workspace = true }
[lints] [lints]
workspace = true workspace = true
+482 -136
View File
@@ -2,215 +2,561 @@
use std::{ use std::{
fmt::Display, fmt::Display,
fs::read_to_string,
ops::Deref,
path::{Path, PathBuf}, path::{Path, PathBuf},
sync::LazyLock, str::FromStr,
sync::{Arc, LazyLock, OnceLock},
time::Duration,
}; };
use alloy::{network::EthereumWallet, signers::local::PrivateKeySigner}; use alloy::{
use clap::{Parser, ValueEnum}; genesis::Genesis,
hex::ToHexExt,
network::EthereumWallet,
primitives::{FixedBytes, U256},
signers::local::PrivateKeySigner,
};
use clap::{Parser, ValueEnum, ValueHint};
use semver::Version; use semver::Version;
use serde::{Deserialize, Serialize}; use serde::{Serialize, Serializer};
use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
use temp_dir::TempDir; use temp_dir::TempDir;
#[derive(Debug, Parser, Clone, Serialize, Deserialize)] #[derive(Clone, Debug, Parser, Serialize)]
#[command(name = "retester")] #[command(name = "retester")]
pub struct Arguments { pub enum Context {
/// The `solc` version to use if the test didn't specify it explicitly. /// Executes tests in the MatterLabs format differentially against a leader and a follower.
#[arg(long = "solc", short, default_value = "0.8.29")] ExecuteTests(ExecutionContext),
pub solc: Version, }
/// Use the Wasm compiler versions. impl Context {
#[arg(long = "wasm")] pub fn working_directory_configuration(&self) -> &WorkingDirectoryConfiguration {
pub wasm: bool, self.as_ref()
}
/// The path to the `resolc` executable to be tested. pub fn report_configuration(&self) -> &ReportConfiguration {
self.as_ref()
}
}
impl AsRef<WorkingDirectoryConfiguration> for Context {
fn as_ref(&self) -> &WorkingDirectoryConfiguration {
match self {
Context::ExecuteTests(execution_context) => &execution_context.working_directory,
}
}
}
impl AsRef<ReportConfiguration> for Context {
fn as_ref(&self) -> &ReportConfiguration {
match self {
Context::ExecuteTests(execution_context) => &execution_context.report_configuration,
}
}
}
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ExecutionContext {
/// The working directory that the program will use for all of the temporary artifacts needed at
/// runtime.
/// ///
/// By default it uses the `resolc` binary found in `$PATH`. /// If not specified, then a temporary directory will be created and used by the program for all
/// /// temporary artifacts.
/// If `--wasm` is set, this should point to the resolc Wasm ile. #[clap(
#[arg(long = "resolc", short, default_value = "resolc")] short,
pub resolc: PathBuf, long,
default_value = "",
value_hint = ValueHint::DirPath,
)]
pub working_directory: WorkingDirectoryConfiguration,
/// The differential testing leader node implementation.
#[arg(short, long = "leader", default_value_t = TestingPlatform::Geth)]
pub leader: TestingPlatform,
/// The differential testing follower node implementation.
#[arg(short, long = "follower", default_value_t = TestingPlatform::Kitchensink)]
pub follower: TestingPlatform,
/// A list of test corpus JSON files to be tested. /// A list of test corpus JSON files to be tested.
#[arg(long = "corpus", short)] #[arg(long = "corpus", short)]
pub corpus: Vec<PathBuf>, pub corpus: Vec<PathBuf>,
/// A place to store temporary artifacts during test execution. /// Configuration parameters for the solc compiler.
/// #[clap(flatten, next_help_heading = "Solc Configuration")]
/// Creates a temporary dir if not specified. pub solc_configuration: SolcConfiguration,
#[arg(long = "workdir", short)]
pub working_directory: Option<PathBuf>,
/// Add a tempdir manually if `working_directory` was not given. /// Configuration parameters for the resolc compiler.
#[clap(flatten, next_help_heading = "Resolc Configuration")]
pub resolc_configuration: ResolcConfiguration,
/// Configuration parameters for the geth node.
#[clap(flatten, next_help_heading = "Geth Configuration")]
pub geth_configuration: GethConfiguration,
/// Configuration parameters for the Kitchensink.
#[clap(flatten, next_help_heading = "Kitchensink Configuration")]
pub kitchensink_configuration: KitchensinkConfiguration,
/// Configuration parameters for the Revive Dev Node.
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
/// Configuration parameters for the Eth Rpc.
#[clap(flatten, next_help_heading = "Eth RPC Configuration")]
pub eth_rpc_configuration: EthRpcConfiguration,
/// Configuration parameters for the genesis.
#[clap(flatten, next_help_heading = "Genesis Configuration")]
pub genesis_configuration: GenesisConfiguration,
/// Configuration parameters for the wallet.
#[clap(flatten, next_help_heading = "Wallet Configuration")]
pub wallet_configuration: WalletConfiguration,
/// Configuration parameters for concurrency.
#[clap(flatten, next_help_heading = "Concurrency Configuration")]
pub concurrency_configuration: ConcurrencyConfiguration,
/// Configuration parameters for the compilers and compilation.
#[clap(flatten, next_help_heading = "Compilation Configuration")]
pub compilation_configuration: CompilationConfiguration,
/// Configuration parameters for the report.
#[clap(flatten, next_help_heading = "Report Configuration")]
pub report_configuration: ReportConfiguration,
}
impl Default for ExecutionContext {
fn default() -> Self {
Self::parse_from(["execution-context"])
}
}
impl AsRef<WorkingDirectoryConfiguration> for ExecutionContext {
fn as_ref(&self) -> &WorkingDirectoryConfiguration {
&self.working_directory
}
}
impl AsRef<SolcConfiguration> for ExecutionContext {
fn as_ref(&self) -> &SolcConfiguration {
&self.solc_configuration
}
}
impl AsRef<ResolcConfiguration> for ExecutionContext {
fn as_ref(&self) -> &ResolcConfiguration {
&self.resolc_configuration
}
}
impl AsRef<GethConfiguration> for ExecutionContext {
fn as_ref(&self) -> &GethConfiguration {
&self.geth_configuration
}
}
impl AsRef<KitchensinkConfiguration> for ExecutionContext {
fn as_ref(&self) -> &KitchensinkConfiguration {
&self.kitchensink_configuration
}
}
impl AsRef<ReviveDevNodeConfiguration> for ExecutionContext {
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
&self.revive_dev_node_configuration
}
}
impl AsRef<EthRpcConfiguration> for ExecutionContext {
fn as_ref(&self) -> &EthRpcConfiguration {
&self.eth_rpc_configuration
}
}
impl AsRef<GenesisConfiguration> for ExecutionContext {
fn as_ref(&self) -> &GenesisConfiguration {
&self.genesis_configuration
}
}
impl AsRef<WalletConfiguration> for ExecutionContext {
fn as_ref(&self) -> &WalletConfiguration {
&self.wallet_configuration
}
}
impl AsRef<ConcurrencyConfiguration> for ExecutionContext {
fn as_ref(&self) -> &ConcurrencyConfiguration {
&self.concurrency_configuration
}
}
impl AsRef<CompilationConfiguration> for ExecutionContext {
fn as_ref(&self) -> &CompilationConfiguration {
&self.compilation_configuration
}
}
impl AsRef<ReportConfiguration> for ExecutionContext {
fn as_ref(&self) -> &ReportConfiguration {
&self.report_configuration
}
}
/// A set of configuration parameters for Solc.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct SolcConfiguration {
/// Specifies the default version of the Solc compiler that should be used if there is no
/// override specified by one of the test cases.
#[clap(long = "solc.version", default_value = "0.8.29")]
pub version: Version,
}
/// A set of configuration parameters for Resolc.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ResolcConfiguration {
/// Specifies the path of the resolc compiler to be used by the tool.
/// ///
/// We attach it here because [TempDir] prunes itself on drop. /// If this is not specified, then the tool assumes that it should use the resolc binary that's
/// provided in the user's $PATH.
#[clap(id = "resolc.path", long = "resolc.path", default_value = "resolc")]
pub path: PathBuf,
}
/// A set of configuration parameters for Geth.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct GethConfiguration {
/// Specifies the path of the geth node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the geth binary that's
/// provided in the user's $PATH.
#[clap(id = "geth.path", long = "geth.path", default_value = "geth")]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "geth.start-timeout-ms",
long = "geth.start-timeout-ms",
default_value = "5000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
}
/// A set of configuration parameters for Kitchensink.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct KitchensinkConfiguration {
/// Specifies the path of the kitchensink node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the kitchensink binary
/// that's provided in the user's $PATH.
#[clap(
id = "kitchensink.path",
long = "kitchensink.path",
default_value = "substrate-node"
)]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "kitchensink.start-timeout-ms",
long = "kitchensink.start-timeout-ms",
default_value = "5000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
/// This configures the tool to use Kitchensink instead of using the revive-dev-node.
#[clap(long = "kitchensink.dont-use-dev-node")]
pub use_kitchensink: bool,
}
/// A set of configuration parameters for the revive dev node.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ReviveDevNodeConfiguration {
/// Specifies the path of the revive dev node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the revive dev node binary
/// that's provided in the user's $PATH.
#[clap(
id = "revive-dev-node.path",
long = "revive-dev-node.path",
default_value = "revive-dev-node"
)]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "revive-dev-node.start-timeout-ms",
long = "revive-dev-node.start-timeout-ms",
default_value = "5000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
}
/// A set of configuration parameters for the ETH RPC.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct EthRpcConfiguration {
/// Specifies the path of the ETH RPC to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the ETH RPC binary
/// that's provided in the user's $PATH.
#[clap(id = "eth-rpc.path", long = "eth-rpc.path", default_value = "eth-rpc")]
pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out.
#[clap(
id = "eth-rpc.start-timeout-ms",
long = "eth-rpc.start-timeout-ms",
default_value = "5000",
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
}
/// A set of configuration parameters for the genesis.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct GenesisConfiguration {
/// Specifies the path of the genesis file to use for the nodes that are started.
///
/// This is expected to be the path of a JSON geth genesis file.
#[clap(id = "genesis.path", long = "genesis.path")]
path: Option<PathBuf>,
/// The genesis object found at the provided path.
#[clap(skip)] #[clap(skip)]
#[serde(skip)] #[serde(skip)]
pub temp_dir: Option<&'static TempDir>, genesis: OnceLock<Genesis>,
}
/// The path to the `geth` executable. impl GenesisConfiguration {
/// pub fn genesis(&self) -> anyhow::Result<&Genesis> {
/// By default it uses `geth` binary found in `$PATH`. static DEFAULT_GENESIS: LazyLock<Genesis> = LazyLock::new(|| {
#[arg(short, long = "geth", default_value = "geth")] let genesis = include_str!("../../../genesis.json");
pub geth: PathBuf, serde_json::from_str(genesis).unwrap()
});
/// The maximum time in milliseconds to wait for geth to start. match self.genesis.get() {
#[arg(long = "geth-start-timeout", default_value = "5000")] Some(genesis) => Ok(genesis),
pub geth_start_timeout: u64, None => {
let genesis = match self.path.as_ref() {
Some(genesis_path) => {
let genesis_content = read_to_string(genesis_path)?;
serde_json::from_str(genesis_content.as_str())?
}
None => DEFAULT_GENESIS.clone(),
};
Ok(self.genesis.get_or_init(|| genesis))
}
}
}
}
/// Configure nodes according to this genesis.json file. /// A set of configuration parameters for the wallet.
#[arg(long = "genesis", default_value = "genesis.json")] #[derive(Clone, Debug, Parser, Serialize)]
pub genesis_file: PathBuf, pub struct WalletConfiguration {
/// The private key of the default signer.
/// The signing account private key. #[clap(
#[arg( long = "wallet.default-private-key",
short,
long = "account",
default_value = "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d" default_value = "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d"
)] )]
pub account: String, #[serde(serialize_with = "serialize_private_key")]
default_key: PrivateKeySigner,
/// This argument controls which private keys the nodes should have access to and be added to /// This argument controls which private keys the nodes should have access to and be added to
/// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set /// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set
/// of the node. /// of the node.
#[arg(long = "private-keys-count", default_value_t = 100_000)] #[clap(long = "wallet.additional-keys", default_value_t = 100_000)]
pub private_keys_to_add: usize, additional_keys: usize,
/// The differential testing leader node implementation. /// The wallet object that will be used.
#[arg(short, long = "leader", default_value = "geth")] #[clap(skip)]
pub leader: TestingPlatform, #[serde(skip)]
wallet: OnceLock<Arc<EthereumWallet>>,
}
/// The differential testing follower node implementation. impl WalletConfiguration {
#[arg(short, long = "follower", default_value = "kitchensink")] pub fn wallet(&self) -> Arc<EthereumWallet> {
pub follower: TestingPlatform, self.wallet
.get_or_init(|| {
let mut wallet = EthereumWallet::new(self.default_key.clone());
for signer in (1..=self.additional_keys)
.map(|id| U256::from(id))
.map(|id| id.to_be_bytes::<32>())
.map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap())
{
wallet.register_signer(signer);
}
Arc::new(wallet)
})
.clone()
}
}
fn serialize_private_key<S>(value: &PrivateKeySigner, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
value.to_bytes().encode_hex().serialize(serializer)
}
/// A set of configuration for concurrency.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ConcurrencyConfiguration {
/// Determines the amount of nodes that will be spawned for each chain. /// Determines the amount of nodes that will be spawned for each chain.
#[arg(long, default_value = "1")] #[clap(long = "concurrency.number-of-nodes", default_value_t = 5)]
pub number_of_nodes: usize, pub number_of_nodes: usize,
/// Determines the amount of tokio worker threads that will will be used. /// Determines the amount of tokio worker threads that will will be used.
#[arg( #[arg(
long, long = "concurrency.number-of-threads",
default_value_t = std::thread::available_parallelism() default_value_t = std::thread::available_parallelism()
.map(|n| n.get()) .map(|n| n.get())
.unwrap_or(1) .unwrap_or(1)
)] )]
pub number_of_threads: usize, pub number_of_threads: usize,
/// Determines the amount of concurrent tasks that will be spawned to run tests. Defaults to 10 x the number of nodes. /// Determines the amount of concurrent tasks that will be spawned to run tests.
#[arg(long)]
pub number_concurrent_tasks: Option<usize>,
/// Extract problems back to the test corpus.
#[arg(short, long = "extract-problems")]
pub extract_problems: bool,
/// The path to the `kitchensink` executable.
/// ///
/// By default it uses `substrate-node` binary found in `$PATH`. /// Defaults to 10 x the number of nodes.
#[arg(short, long = "kitchensink", default_value = "substrate-node")] #[arg(long = "concurrency.number-of-concurrent-tasks")]
pub kitchensink: PathBuf, number_concurrent_tasks: Option<usize>,
/// The path to the `revive-dev-node` executable. /// Determines if the concurrency limit should be ignored or not.
/// #[arg(long = "concurrency.ignore-concurrency-limit")]
/// By default it uses `revive-dev-node` binary found in `$PATH`. ignore_concurrency_limit: bool,
#[arg(long = "revive-dev-node", default_value = "revive-dev-node")] }
pub revive_dev_node: PathBuf,
/// By default the tool uses the revive-dev-node when it's running differential tests against impl ConcurrencyConfiguration {
/// PolkaVM since the dev-node is much faster than kitchensink. This flag allows the caller to pub fn concurrency_limit(&self) -> Option<usize> {
/// configure the tool to use kitchensink rather than the dev-node. match self.ignore_concurrency_limit {
#[arg(long)] true => None,
pub use_kitchensink_not_dev_node: bool, false => Some(
self.number_concurrent_tasks
/// The path to the `eth_proxy` executable. .unwrap_or(20 * self.number_of_nodes),
/// ),
/// By default it uses `eth-rpc` binary found in `$PATH`. }
#[arg(short = 'p', long = "eth_proxy", default_value = "eth-rpc")] }
pub eth_proxy: PathBuf, }
#[derive(Clone, Debug, Parser, Serialize)]
pub struct CompilationConfiguration {
/// Controls if the compilation cache should be invalidated or not. /// Controls if the compilation cache should be invalidated or not.
#[arg(short, long)] #[arg(long = "compilation.invalidate-cache")]
pub invalidate_compilation_cache: bool, pub invalidate_compilation_cache: bool,
}
#[derive(Clone, Debug, Parser, Serialize)]
pub struct ReportConfiguration {
/// Controls if the compiler input is included in the final report. /// Controls if the compiler input is included in the final report.
#[clap(long = "report.include-compiler-input")] #[clap(long = "report.include-compiler-input")]
pub report_include_compiler_input: bool, pub include_compiler_input: bool,
/// Controls if the compiler output is included in the final report. /// Controls if the compiler output is included in the final report.
#[clap(long = "report.include-compiler-output")] #[clap(long = "report.include-compiler-output")]
pub report_include_compiler_output: bool, pub include_compiler_output: bool,
} }
impl Arguments { /// Represents the working directory that the program uses.
/// Return the configured working directory with the following precedence: #[derive(Debug, Clone)]
/// 1. `self.working_directory` if it was provided. pub enum WorkingDirectoryConfiguration {
/// 2. `self.temp_dir` if it it was provided /// A temporary directory is used as the working directory. This will be removed when dropped.
/// 3. Panic. TemporaryDirectory(Arc<TempDir>),
pub fn directory(&self) -> &Path { /// A directory with a path is used as the working directory.
if let Some(path) = &self.working_directory { Path(PathBuf),
return path.as_path();
} }
if let Some(temp_dir) = &self.temp_dir { impl WorkingDirectoryConfiguration {
return temp_dir.path(); pub fn as_path(&self) -> &Path {
} self.as_ref()
panic!("should have a workdir configured")
}
/// Return the number of concurrent tasks to run. This is provided via the
/// `--number-concurrent-tasks` argument, and otherwise defaults to --number-of-nodes * 20.
pub fn number_of_concurrent_tasks(&self) -> usize {
self.number_concurrent_tasks
.unwrap_or(20 * self.number_of_nodes)
}
/// Try to parse `self.account` into a [PrivateKeySigner],
/// panicing on error.
pub fn wallet(&self) -> EthereumWallet {
let signer = self
.account
.parse::<PrivateKeySigner>()
.unwrap_or_else(|error| {
panic!("private key '{}' parsing error: {error}", self.account);
});
EthereumWallet::new(signer)
} }
} }
impl Default for Arguments { impl Deref for WorkingDirectoryConfiguration {
type Target = Path;
fn deref(&self) -> &Self::Target {
self.as_path()
}
}
impl AsRef<Path> for WorkingDirectoryConfiguration {
fn as_ref(&self) -> &Path {
match self {
WorkingDirectoryConfiguration::TemporaryDirectory(temp_dir) => temp_dir.path(),
WorkingDirectoryConfiguration::Path(path) => path.as_path(),
}
}
}
impl Default for WorkingDirectoryConfiguration {
fn default() -> Self { fn default() -> Self {
static TEMP_DIR: LazyLock<TempDir> = LazyLock::new(|| TempDir::new().unwrap()); TempDir::new()
.map(Arc::new)
let default = Arguments::parse_from(["retester"]); .map(Self::TemporaryDirectory)
.expect("Failed to create the temporary directory")
Arguments {
temp_dir: Some(&TEMP_DIR),
..default
} }
} }
impl FromStr for WorkingDirectoryConfiguration {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"" => Ok(Default::default()),
_ => Ok(Self::Path(PathBuf::from(s))),
}
}
}
impl Display for WorkingDirectoryConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self.as_path().display(), f)
}
}
impl Serialize for WorkingDirectoryConfiguration {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.as_path().serialize(serializer)
}
}
fn parse_duration(s: &str) -> anyhow::Result<Duration> {
u64::from_str(s)
.map(Duration::from_millis)
.map_err(Into::into)
} }
/// The Solidity compatible node implementation. /// The Solidity compatible node implementation.
/// ///
/// This describes the solutions to be tested against on a high level. /// This describes the solutions to be tested against on a high level.
#[derive( #[derive(
Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, ValueEnum, Serialize, Deserialize, Clone,
Copy,
Debug,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Serialize,
ValueEnum,
EnumString,
Display,
AsRefStr,
IntoStaticStr,
)] )]
#[clap(rename_all = "lower")] #[strum(serialize_all = "kebab-case")]
pub enum TestingPlatform { pub enum TestingPlatform {
/// The go-ethereum reference full node EVM implementation. /// The go-ethereum reference full node EVM implementation.
Geth, Geth,
/// The kitchensink runtime provides the PolkaVM (PVM) based node implentation. /// The kitchensink runtime provides the PolkaVM (PVM) based node implementation.
Kitchensink, Kitchensink,
} }
impl Display for TestingPlatform {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Geth => f.write_str("geth"),
Self::Kitchensink => f.write_str("revive"),
}
}
}
-1
View File
@@ -35,7 +35,6 @@ tracing-subscriber = { workspace = true }
semver = { workspace = true } semver = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
serde_json = { workspace = true } serde_json = { workspace = true }
temp-dir = { workspace = true }
[lints] [lints]
workspace = true workspace = true
+1 -1
View File
@@ -18,7 +18,7 @@ use alloy::{
primitives::Address, primitives::Address,
rpc::types::{TransactionRequest, trace::geth::DiffMode}, rpc::types::{TransactionRequest, trace::geth::DiffMode},
}; };
use anyhow::Context; use anyhow::Context as _;
use futures::TryStreamExt; use futures::TryStreamExt;
use indexmap::IndexMap; use indexmap::IndexMap;
use revive_dt_format::traits::{ResolutionContext, ResolverApi}; use revive_dt_format::traits::{ResolutionContext, ResolverApi};
+63 -98
View File
@@ -5,7 +5,7 @@ use std::{
collections::{BTreeMap, HashMap}, collections::{BTreeMap, HashMap},
io::{BufWriter, Write, stderr}, io::{BufWriter, Write, stderr},
path::Path, path::Path,
sync::{Arc, LazyLock}, sync::Arc,
time::Instant, time::Instant,
}; };
@@ -13,7 +13,7 @@ use alloy::{
network::{Ethereum, TransactionBuilder}, network::{Ethereum, TransactionBuilder},
rpc::types::TransactionRequest, rpc::types::TransactionRequest,
}; };
use anyhow::Context; use anyhow::Context as _;
use clap::Parser; use clap::Parser;
use futures::stream; use futures::stream;
use futures::{Stream, StreamExt}; use futures::{Stream, StreamExt};
@@ -24,15 +24,13 @@ use revive_dt_report::{
TestSpecificReporter, TestSpecifier, TestSpecificReporter, TestSpecifier,
}; };
use serde_json::{Value, json}; use serde_json::{Value, json};
use temp_dir::TempDir;
use tokio::try_join; use tokio::try_join;
use tracing::{debug, error, info, info_span, instrument}; use tracing::{debug, error, info, info_span, instrument};
use tracing_appender::non_blocking::WorkerGuard;
use tracing_subscriber::{EnvFilter, FmtSubscriber}; use tracing_subscriber::{EnvFilter, FmtSubscriber};
use revive_dt_common::{iterators::EitherIter, types::Mode}; use revive_dt_common::{iterators::EitherIter, types::Mode};
use revive_dt_compiler::{CompilerOutput, SolidityCompiler}; use revive_dt_compiler::{CompilerOutput, SolidityCompiler};
use revive_dt_config::*; use revive_dt_config::{Context, *};
use revive_dt_core::{ use revive_dt_core::{
Geth, Kitchensink, Platform, Geth, Kitchensink, Platform,
driver::{CaseDriver, CaseState}, driver::{CaseDriver, CaseState},
@@ -48,58 +46,8 @@ use revive_dt_node::{Node, pool::NodePool};
use crate::cached_compiler::CachedCompiler; use crate::cached_compiler::CachedCompiler;
static TEMP_DIR: LazyLock<TempDir> = LazyLock::new(|| TempDir::new().unwrap());
fn main() -> anyhow::Result<()> { fn main() -> anyhow::Result<()> {
let (args, _guard) = init_cli().context("Failed to initialize CLI and tracing subscriber")?; let (writer, _guard) = tracing_appender::non_blocking::NonBlockingBuilder::default()
info!(
leader = args.leader.to_string(),
follower = args.follower.to_string(),
working_directory = %args.directory().display(),
number_of_nodes = args.number_of_nodes,
invalidate_compilation_cache = args.invalidate_compilation_cache,
"Differential testing tool has been initialized"
);
let (reporter, report_aggregator_task) = ReportAggregator::new(args.clone()).into_task();
let number_of_threads = args.number_of_threads;
let body = async move {
let tests = collect_corpora(&args)
.context("Failed to collect corpus files from provided arguments")?
.into_iter()
.inspect(|(corpus, _)| {
reporter
.report_corpus_file_discovery_event(corpus.clone())
.expect("Can't fail")
})
.flat_map(|(_, files)| files.into_iter())
.inspect(|metadata_file| {
reporter
.report_metadata_file_discovery_event(
metadata_file.metadata_file_path.clone(),
metadata_file.content.clone(),
)
.expect("Can't fail")
})
.collect::<Vec<_>>();
execute_corpus(&args, &tests, reporter, report_aggregator_task)
.await
.context("Failed to execute corpus")?;
Ok(())
};
tokio::runtime::Builder::new_multi_thread()
.worker_threads(number_of_threads)
.enable_all()
.build()
.expect("Failed building the Runtime")
.block_on(body)
}
fn init_cli() -> anyhow::Result<(Arguments, WorkerGuard)> {
let (writer, guard) = tracing_appender::non_blocking::NonBlockingBuilder::default()
.lossy(false) .lossy(false)
// Assuming that each line contains 255 characters and that each character is one byte, then // Assuming that each line contains 255 characters and that each character is one byte, then
// this means that our buffer is about 4GBs large. // this means that our buffer is about 4GBs large.
@@ -118,31 +66,51 @@ fn init_cli() -> anyhow::Result<(Arguments, WorkerGuard)> {
tracing::subscriber::set_global_default(subscriber)?; tracing::subscriber::set_global_default(subscriber)?;
info!("Differential testing tool is starting"); info!("Differential testing tool is starting");
let mut args = Arguments::parse(); let context = Context::try_parse()?;
let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task();
if args.corpus.is_empty() { match context {
anyhow::bail!("no test corpus specified"); Context::ExecuteTests(context) => {
} let tests = collect_corpora(&context)
.context("Failed to collect corpus files from provided arguments")?
.into_iter()
.inspect(|(corpus, _)| {
reporter
.report_corpus_file_discovery_event(corpus.clone())
.expect("Can't fail")
})
.flat_map(|(_, files)| files.into_iter())
.inspect(|metadata_file| {
reporter
.report_metadata_file_discovery_event(
metadata_file.metadata_file_path.clone(),
metadata_file.content.clone(),
)
.expect("Can't fail")
})
.collect::<Vec<_>>();
match args.working_directory.as_ref() { tokio::runtime::Builder::new_multi_thread()
Some(dir) => { .worker_threads(context.concurrency_configuration.number_of_threads)
if !dir.exists() { .enable_all()
anyhow::bail!("workdir {} does not exist", dir.display()); .build()
.expect("Failed building the Runtime")
.block_on(async move {
execute_corpus(context, &tests, reporter, report_aggregator_task)
.await
.context("Failed to execute corpus")
})
} }
} }
None => {
args.temp_dir = Some(&TEMP_DIR);
}
}
Ok((args, guard))
} }
#[instrument(level = "debug", name = "Collecting Corpora", skip_all)] #[instrument(level = "debug", name = "Collecting Corpora", skip_all)]
fn collect_corpora(args: &Arguments) -> anyhow::Result<HashMap<Corpus, Vec<MetadataFile>>> { fn collect_corpora(
context: &ExecutionContext,
) -> anyhow::Result<HashMap<Corpus, Vec<MetadataFile>>> {
let mut corpora = HashMap::new(); let mut corpora = HashMap::new();
for path in &args.corpus { for path in &context.corpus {
let span = info_span!("Processing corpus file", path = %path.display()); let span = info_span!("Processing corpus file", path = %path.display());
let _guard = span.enter(); let _guard = span.enter();
@@ -160,7 +128,7 @@ fn collect_corpora(args: &Arguments) -> anyhow::Result<HashMap<Corpus, Vec<Metad
} }
async fn run_driver<L, F>( async fn run_driver<L, F>(
args: &Arguments, context: ExecutionContext,
metadata_files: &[MetadataFile], metadata_files: &[MetadataFile],
reporter: Reporter, reporter: Reporter,
report_aggregator_task: impl Future<Output = anyhow::Result<()>>, report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
@@ -171,20 +139,20 @@ where
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
{ {
let leader_nodes = let leader_nodes = NodePool::<L::Blockchain>::new(context.clone())
NodePool::<L::Blockchain>::new(args).context("Failed to initialize leader node pool")?; .context("Failed to initialize leader node pool")?;
let follower_nodes = let follower_nodes = NodePool::<F::Blockchain>::new(context.clone())
NodePool::<F::Blockchain>::new(args).context("Failed to initialize follower node pool")?; .context("Failed to initialize follower node pool")?;
let tests_stream = tests_stream( let tests_stream = tests_stream(
args, &context,
metadata_files.iter(), metadata_files.iter(),
&leader_nodes, &leader_nodes,
&follower_nodes, &follower_nodes,
reporter.clone(), reporter.clone(),
) )
.await; .await;
let driver_task = start_driver_task::<L, F>(args, tests_stream) let driver_task = start_driver_task::<L, F>(&context, tests_stream)
.await .await
.context("Failed to start driver task")?; .context("Failed to start driver task")?;
let cli_reporting_task = start_cli_reporting_task(reporter); let cli_reporting_task = start_cli_reporting_task(reporter);
@@ -196,7 +164,7 @@ where
} }
async fn tests_stream<'a, L, F>( async fn tests_stream<'a, L, F>(
args: &Arguments, args: &ExecutionContext,
metadata_files: impl IntoIterator<Item = &'a MetadataFile> + Clone, metadata_files: impl IntoIterator<Item = &'a MetadataFile> + Clone,
leader_node_pool: &'a NodePool<L::Blockchain>, leader_node_pool: &'a NodePool<L::Blockchain>,
follower_node_pool: &'a NodePool<F::Blockchain>, follower_node_pool: &'a NodePool<F::Blockchain>,
@@ -320,7 +288,7 @@ where
} }
async fn start_driver_task<'a, L, F>( async fn start_driver_task<'a, L, F>(
args: &Arguments, context: &ExecutionContext,
tests: impl Stream<Item = Test<'a, L, F>>, tests: impl Stream<Item = Test<'a, L, F>>,
) -> anyhow::Result<impl Future<Output = ()>> ) -> anyhow::Result<impl Future<Output = ()>>
where where
@@ -333,25 +301,22 @@ where
{ {
info!("Starting driver task"); info!("Starting driver task");
let number_concurrent_tasks = args.number_of_concurrent_tasks();
let cached_compiler = Arc::new( let cached_compiler = Arc::new(
CachedCompiler::new( CachedCompiler::new(
args.directory().join("compilation_cache"), context
args.invalidate_compilation_cache, .working_directory
.as_path()
.join("compilation_cache"),
context
.compilation_configuration
.invalidate_compilation_cache,
) )
.await .await
.context("Failed to initialize cached compiler")?, .context("Failed to initialize cached compiler")?,
); );
Ok(tests.for_each_concurrent( Ok(tests.for_each_concurrent(
// We want to limit the concurrent tasks here because: context.concurrency_configuration.concurrency_limit(),
//
// 1. We don't want to overwhelm the nodes with too many requests, leading to responses timing out.
// 2. We don't want to open too many files at once, leading to the OS running out of file descriptors.
//
// By default, we allow maximum of 10 ongoing requests per node in order to limit (1), and assume that
// this number will automatically be low enough to address (2). The user can override this.
Some(number_concurrent_tasks),
move |test| { move |test| {
let cached_compiler = cached_compiler.clone(); let cached_compiler = cached_compiler.clone();
@@ -387,8 +352,7 @@ where
)) ))
} }
#[allow(clippy::uninlined_format_args)] #[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
#[allow(irrefutable_let_patterns)]
async fn start_cli_reporting_task(reporter: Reporter) { async fn start_cli_reporting_task(reporter: Reporter) {
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail"); let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
drop(reporter); drop(reporter);
@@ -710,17 +674,18 @@ where
} }
async fn execute_corpus( async fn execute_corpus(
args: &Arguments, context: ExecutionContext,
tests: &[MetadataFile], tests: &[MetadataFile],
reporter: Reporter, reporter: Reporter,
report_aggregator_task: impl Future<Output = anyhow::Result<()>>, report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
match (&args.leader, &args.follower) { match (&context.leader, &context.follower) {
(TestingPlatform::Geth, TestingPlatform::Kitchensink) => { (TestingPlatform::Geth, TestingPlatform::Kitchensink) => {
run_driver::<Geth, Kitchensink>(args, tests, reporter, report_aggregator_task).await? run_driver::<Geth, Kitchensink>(context, tests, reporter, report_aggregator_task)
.await?
} }
(TestingPlatform::Geth, TestingPlatform::Geth) => { (TestingPlatform::Geth, TestingPlatform::Geth) => {
run_driver::<Geth, Geth>(args, tests, reporter, report_aggregator_task).await? run_driver::<Geth, Geth>(context, tests, reporter, report_aggregator_task).await?
} }
_ => unimplemented!(), _ => unimplemented!(),
} }
+1 -1
View File
@@ -8,7 +8,7 @@ use alloy::{
rpc::types::TransactionRequest, rpc::types::TransactionRequest,
}; };
use alloy_primitives::{FixedBytes, utils::parse_units}; use alloy_primitives::{FixedBytes, utils::parse_units};
use anyhow::Context; use anyhow::Context as _;
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, stream}; use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, stream};
use semver::VersionReq; use semver::VersionReq;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
+1 -1
View File
@@ -1,4 +1,4 @@
use anyhow::Context; use anyhow::Context as _;
use regex::Regex; use regex::Regex;
use revive_dt_common::iterators::EitherIter; use revive_dt_common::iterators::EitherIter;
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline}; use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
+46 -65
View File
@@ -17,9 +17,7 @@ use alloy::{
eips::BlockNumberOrTag, eips::BlockNumberOrTag,
genesis::{Genesis, GenesisAccount}, genesis::{Genesis, GenesisAccount},
network::{Ethereum, EthereumWallet, NetworkWallet}, network::{Ethereum, EthereumWallet, NetworkWallet},
primitives::{ primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
Address, BlockHash, BlockNumber, BlockTimestamp, FixedBytes, StorageKey, TxHash, U256,
},
providers::{ providers::{
Provider, ProviderBuilder, Provider, ProviderBuilder,
ext::DebugApi, ext::DebugApi,
@@ -29,9 +27,8 @@ use alloy::{
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest, EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame}, trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
}, },
signers::local::PrivateKeySigner,
}; };
use anyhow::Context; use anyhow::Context as _;
use revive_common::EVMVersion; use revive_common::EVMVersion;
use tracing::{Instrument, instrument}; use tracing::{Instrument, instrument};
@@ -39,7 +36,7 @@ use revive_dt_common::{
fs::clear_directory, fs::clear_directory,
futures::{PollingWaitBehavior, poll}, futures::{PollingWaitBehavior, poll},
}; };
use revive_dt_config::Arguments; use revive_dt_config::*;
use revive_dt_format::traits::ResolverApi; use revive_dt_format::traits::ResolverApi;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
@@ -64,7 +61,7 @@ pub struct GethNode {
geth: PathBuf, geth: PathBuf,
id: u32, id: u32,
handle: Option<Child>, handle: Option<Child>,
start_timeout: u64, start_timeout: Duration,
wallet: Arc<EthereumWallet>, wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
chain_id_filler: ChainIdFiller, chain_id_filler: ChainIdFiller,
@@ -97,7 +94,7 @@ impl GethNode {
/// Create the node directory and call `geth init` to configure the genesis. /// Create the node directory and call `geth init` to configure the genesis.
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn init(&mut self, genesis: String) -> anyhow::Result<&mut Self> { fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
let _ = clear_directory(&self.base_directory); let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory); let _ = clear_directory(&self.logs_directory);
@@ -106,8 +103,6 @@ impl GethNode {
create_dir_all(&self.logs_directory) create_dir_all(&self.logs_directory)
.context("Failed to create logs directory for geth node")?; .context("Failed to create logs directory for geth node")?;
let mut genesis = serde_json::from_str::<Genesis>(&genesis)
.context("Failed to deserialize geth genesis JSON")?;
for signer_address in for signer_address in
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet) <EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
{ {
@@ -240,7 +235,7 @@ impl GethNode {
.open(self.geth_stderr_log_file_path()) .open(self.geth_stderr_log_file_path())
.context("Failed to open geth stderr logs file for readiness check")?; .context("Failed to open geth stderr logs file for readiness check")?;
let maximum_wait_time = Duration::from_millis(self.start_timeout); let maximum_wait_time = self.start_timeout;
let mut stderr = BufReader::new(logs_file).lines(); let mut stderr = BufReader::new(logs_file).lines();
let mut lines = vec![]; let mut lines = vec![];
loop { loop {
@@ -256,7 +251,7 @@ impl GethNode {
if Instant::now().duration_since(start_time) > maximum_wait_time { if Instant::now().duration_since(start_time) > maximum_wait_time {
anyhow::bail!( anyhow::bail!(
"Timeout in starting geth: took longer than {}ms. stdout:\n\n{}\n", "Timeout in starting geth: took longer than {}ms. stdout:\n\n{}\n",
self.start_timeout, self.start_timeout.as_millis(),
lines.join("\n") lines.join("\n")
); );
} }
@@ -556,30 +551,40 @@ impl ResolverApi for GethNode {
} }
impl Node for GethNode { impl Node for GethNode {
fn new(config: &Arguments) -> Self { fn new(
let geth_directory = config.directory().join(Self::BASE_DIRECTORY); context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<ConcurrencyConfiguration>
+ AsRef<GenesisConfiguration>
+ AsRef<WalletConfiguration>
+ AsRef<GethConfiguration>
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Clone,
) -> Self {
let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context);
let geth_configuration = AsRef::<GethConfiguration>::as_ref(&context);
let geth_directory = working_directory_configuration
.as_path()
.join(Self::BASE_DIRECTORY);
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
let base_directory = geth_directory.join(id.to_string()); let base_directory = geth_directory.join(id.to_string());
let mut wallet = config.wallet(); let wallet = wallet_configuration.wallet();
for signer in (1..=config.private_keys_to_add)
.map(|id| U256::from(id))
.map(|id| id.to_be_bytes::<32>())
.map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap())
{
wallet.register_signer(signer);
}
Self { Self {
connection_string: base_directory.join(Self::IPC_FILE).display().to_string(), connection_string: base_directory.join(Self::IPC_FILE).display().to_string(),
data_directory: base_directory.join(Self::DATA_DIRECTORY), data_directory: base_directory.join(Self::DATA_DIRECTORY),
logs_directory: base_directory.join(Self::LOGS_DIRECTORY), logs_directory: base_directory.join(Self::LOGS_DIRECTORY),
base_directory, base_directory,
geth: config.geth.clone(), geth: geth_configuration.path.clone(),
id, id,
handle: None, handle: None,
start_timeout: config.geth_start_timeout, start_timeout: geth_configuration.start_timeout_ms,
wallet: Arc::new(wallet), wallet: wallet.clone(),
chain_id_filler: Default::default(), chain_id_filler: Default::default(),
nonce_manager: Default::default(), nonce_manager: Default::default(),
// We know that we only need to be storing 2 files so we can specify that when creating // We know that we only need to be storing 2 files so we can specify that when creating
@@ -621,7 +626,7 @@ impl Node for GethNode {
} }
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn spawn(&mut self, genesis: String) -> anyhow::Result<()> { fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
self.init(genesis)?.spawn_process()?; self.init(genesis)?.spawn_process()?;
Ok(()) Ok(())
} }
@@ -662,49 +667,25 @@ impl Drop for GethNode {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use revive_dt_config::Arguments;
use temp_dir::TempDir;
use crate::{GENESIS_JSON, Node};
use super::*; use super::*;
fn test_config() -> (Arguments, TempDir) { fn test_config() -> ExecutionContext {
let mut config = Arguments::default(); ExecutionContext::default()
let temp_dir = TempDir::new().unwrap();
config.working_directory = temp_dir.path().to_path_buf().into();
(config, temp_dir)
} }
fn new_node() -> (GethNode, TempDir) { fn new_node() -> (ExecutionContext, GethNode) {
let (args, temp_dir) = test_config(); let context = test_config();
let mut node = GethNode::new(&args); let mut node = GethNode::new(&context);
node.init(GENESIS_JSON.to_owned()) node.init(context.genesis_configuration.genesis().unwrap().clone())
.expect("Failed to initialize the node") .expect("Failed to initialize the node")
.spawn_process() .spawn_process()
.expect("Failed to spawn the node process"); .expect("Failed to spawn the node process");
(node, temp_dir) (context, node)
}
#[test]
fn init_works() {
GethNode::new(&test_config().0)
.init(GENESIS_JSON.to_string())
.unwrap();
}
#[test]
fn spawn_works() {
GethNode::new(&test_config().0)
.spawn(GENESIS_JSON.to_string())
.unwrap();
} }
#[test] #[test]
fn version_works() { fn version_works() {
let version = GethNode::new(&test_config().0).version().unwrap(); let version = GethNode::new(&test_config()).version().unwrap();
assert!( assert!(
version.starts_with("geth version"), version.starts_with("geth version"),
"expected version string, got: '{version}'" "expected version string, got: '{version}'"
@@ -714,7 +695,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_chain_id_from_node() { async fn can_get_chain_id_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (_context, node) = new_node();
// Act // Act
let chain_id = node.chain_id().await; let chain_id = node.chain_id().await;
@@ -727,7 +708,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_gas_limit_from_node() { async fn can_get_gas_limit_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (_context, node) = new_node();
// Act // Act
let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest).await; let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest).await;
@@ -740,7 +721,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_coinbase_from_node() { async fn can_get_coinbase_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (_context, node) = new_node();
// Act // Act
let coinbase = node.block_coinbase(BlockNumberOrTag::Latest).await; let coinbase = node.block_coinbase(BlockNumberOrTag::Latest).await;
@@ -753,7 +734,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_block_difficulty_from_node() { async fn can_get_block_difficulty_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (_context, node) = new_node();
// Act // Act
let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest).await; let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest).await;
@@ -766,7 +747,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_block_hash_from_node() { async fn can_get_block_hash_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (_context, node) = new_node();
// Act // Act
let block_hash = node.block_hash(BlockNumberOrTag::Latest).await; let block_hash = node.block_hash(BlockNumberOrTag::Latest).await;
@@ -778,7 +759,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_block_timestamp_from_node() { async fn can_get_block_timestamp_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (_context, node) = new_node();
// Act // Act
let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest).await; let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest).await;
@@ -790,7 +771,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_block_number_from_node() { async fn can_get_block_number_from_node() {
// Arrange // Arrange
let (node, _temp_dir) = new_node(); let (_context, node) = new_node();
// Act // Act
let block_number = node.last_block_number().await; let block_number = node.last_block_number().await;
+59 -60
View File
@@ -19,8 +19,8 @@ use alloy::{
TransactionBuilderError, UnbuiltTransactionError, TransactionBuilderError, UnbuiltTransactionError,
}, },
primitives::{ primitives::{
Address, B64, B256, BlockHash, BlockNumber, BlockTimestamp, Bloom, Bytes, FixedBytes, Address, B64, B256, BlockHash, BlockNumber, BlockTimestamp, Bloom, Bytes, StorageKey,
StorageKey, TxHash, U256, TxHash, U256,
}, },
providers::{ providers::{
Provider, ProviderBuilder, Provider, ProviderBuilder,
@@ -32,9 +32,8 @@ use alloy::{
eth::{Block, Header, Transaction}, eth::{Block, Header, Transaction},
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame}, trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
}, },
signers::local::PrivateKeySigner,
}; };
use anyhow::Context; use anyhow::Context as _;
use revive_common::EVMVersion; use revive_common::EVMVersion;
use revive_dt_common::fs::clear_directory; use revive_dt_common::fs::clear_directory;
use revive_dt_format::traits::ResolverApi; use revive_dt_format::traits::ResolverApi;
@@ -43,7 +42,7 @@ use serde_json::{Value as JsonValue, json};
use sp_core::crypto::Ss58Codec; use sp_core::crypto::Ss58Codec;
use sp_runtime::AccountId32; use sp_runtime::AccountId32;
use revive_dt_config::Arguments; use revive_dt_config::*;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE}; use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE};
@@ -92,7 +91,7 @@ impl KitchensinkNode {
const PROXY_STDOUT_LOG_FILE_NAME: &str = "proxy_stdout.log"; const PROXY_STDOUT_LOG_FILE_NAME: &str = "proxy_stdout.log";
const PROXY_STDERR_LOG_FILE_NAME: &str = "proxy_stderr.log"; const PROXY_STDERR_LOG_FILE_NAME: &str = "proxy_stderr.log";
fn init(&mut self, genesis: &str) -> anyhow::Result<&mut Self> { fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
let _ = clear_directory(&self.base_directory); let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory); let _ = clear_directory(&self.logs_directory);
@@ -153,8 +152,6 @@ impl KitchensinkNode {
}) })
.collect(); .collect();
let mut eth_balances = { let mut eth_balances = {
let mut genesis = serde_json::from_str::<Genesis>(genesis)
.context("Failed to deserialize EVM genesis JSON for kitchensink")?;
for signer_address in for signer_address in
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet) <EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
{ {
@@ -586,35 +583,47 @@ impl ResolverApi for KitchensinkNode {
} }
impl Node for KitchensinkNode { impl Node for KitchensinkNode {
fn new(config: &Arguments) -> Self { fn new(
let kitchensink_directory = config.directory().join(Self::BASE_DIRECTORY); context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<ConcurrencyConfiguration>
+ AsRef<GenesisConfiguration>
+ AsRef<WalletConfiguration>
+ AsRef<GethConfiguration>
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Clone,
) -> Self {
let kitchensink_configuration = AsRef::<KitchensinkConfiguration>::as_ref(&context);
let dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
let eth_rpc_configuration = AsRef::<EthRpcConfiguration>::as_ref(&context);
let working_directory_configuration =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context);
let kitchensink_directory = working_directory_configuration
.as_path()
.join(Self::BASE_DIRECTORY);
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
let base_directory = kitchensink_directory.join(id.to_string()); let base_directory = kitchensink_directory.join(id.to_string());
let logs_directory = base_directory.join(Self::LOGS_DIRECTORY); let logs_directory = base_directory.join(Self::LOGS_DIRECTORY);
let mut wallet = config.wallet(); let wallet = wallet_configuration.wallet();
for signer in (1..=config.private_keys_to_add)
.map(|id| U256::from(id))
.map(|id| id.to_be_bytes::<32>())
.map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap())
{
wallet.register_signer(signer);
}
Self { Self {
id, id,
substrate_binary: config.kitchensink.clone(), substrate_binary: kitchensink_configuration.path.clone(),
dev_node_binary: config.revive_dev_node.clone(), dev_node_binary: dev_node_configuration.path.clone(),
eth_proxy_binary: config.eth_proxy.clone(), eth_proxy_binary: eth_rpc_configuration.path.clone(),
rpc_url: String::new(), rpc_url: String::new(),
base_directory, base_directory,
logs_directory, logs_directory,
process_substrate: None, process_substrate: None,
process_proxy: None, process_proxy: None,
wallet: Arc::new(wallet), wallet: wallet.clone(),
chain_id_filler: Default::default(), chain_id_filler: Default::default(),
nonce_manager: Default::default(), nonce_manager: Default::default(),
use_kitchensink_not_dev_node: config.use_kitchensink_not_dev_node, use_kitchensink_not_dev_node: kitchensink_configuration.use_kitchensink,
// We know that we only need to be storing 4 files so we can specify that when creating // We know that we only need to be storing 4 files so we can specify that when creating
// the vector. It's the stdout and stderr of the substrate-node and the eth-rpc. // the vector. It's the stdout and stderr of the substrate-node and the eth-rpc.
logs_file_to_flush: Vec::with_capacity(4), logs_file_to_flush: Vec::with_capacity(4),
@@ -655,8 +664,8 @@ impl Node for KitchensinkNode {
Ok(()) Ok(())
} }
fn spawn(&mut self, genesis: String) -> anyhow::Result<()> { fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
self.init(&genesis)?.spawn_process() self.init(genesis)?.spawn_process()
} }
fn version(&self) -> anyhow::Result<String> { fn version(&self) -> anyhow::Result<String> {
@@ -1121,25 +1130,20 @@ impl BlockHeader for KitchenSinkHeader {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use alloy::rpc::types::TransactionRequest; use alloy::rpc::types::TransactionRequest;
use revive_dt_config::Arguments;
use std::path::PathBuf;
use std::sync::{LazyLock, Mutex}; use std::sync::{LazyLock, Mutex};
use std::fs; use std::fs;
use super::*; use super::*;
use crate::{GENESIS_JSON, Node}; use crate::Node;
fn test_config() -> Arguments { fn test_config() -> ExecutionContext {
Arguments { let mut context = ExecutionContext::default();
kitchensink: PathBuf::from("substrate-node"), context.kitchensink_configuration.use_kitchensink = true;
eth_proxy: PathBuf::from("eth-rpc"), context
use_kitchensink_not_dev_node: true,
..Default::default()
}
} }
fn new_node() -> (KitchensinkNode, Arguments) { fn new_node() -> (ExecutionContext, KitchensinkNode) {
// Note: When we run the tests in the CI we found that if they're all // Note: When we run the tests in the CI we found that if they're all
// run in parallel then the CI is unable to start all of the nodes in // run in parallel then the CI is unable to start all of the nodes in
// time and their start up times-out. Therefore, we want all of the // time and their start up times-out. Therefore, we want all of the
@@ -1158,32 +1162,36 @@ mod tests {
static NODE_START_MUTEX: Mutex<()> = Mutex::new(()); static NODE_START_MUTEX: Mutex<()> = Mutex::new(());
let _guard = NODE_START_MUTEX.lock().unwrap(); let _guard = NODE_START_MUTEX.lock().unwrap();
let args = test_config(); let context = test_config();
let mut node = KitchensinkNode::new(&args); let mut node = KitchensinkNode::new(&context);
node.init(GENESIS_JSON) node.init(context.genesis_configuration.genesis().unwrap().clone())
.expect("Failed to initialize the node") .expect("Failed to initialize the node")
.spawn_process() .spawn_process()
.expect("Failed to spawn the node process"); .expect("Failed to spawn the node process");
(node, args) (context, node)
} }
/// A shared node that multiple tests can use. It starts up once. /// A shared node that multiple tests can use. It starts up once.
fn shared_node() -> &'static KitchensinkNode { fn shared_node() -> &'static KitchensinkNode {
static NODE: LazyLock<(KitchensinkNode, Arguments)> = LazyLock::new(|| { static NODE: LazyLock<(ExecutionContext, KitchensinkNode)> = LazyLock::new(|| {
let (node, args) = new_node(); let (context, node) = new_node();
(node, args) (context, node)
}); });
&NODE.0 &NODE.1
} }
#[tokio::test] #[tokio::test]
async fn node_mines_simple_transfer_transaction_and_returns_receipt() { async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
// Arrange // Arrange
let (node, args) = new_node(); let (context, node) = new_node();
let provider = node.provider().await.expect("Failed to create provider"); let provider = node.provider().await.expect("Failed to create provider");
let account_address = args.wallet().default_signer().address(); let account_address = context
.wallet_configuration
.wallet()
.default_signer()
.address();
let transaction = TransactionRequest::default() let transaction = TransactionRequest::default()
.to(account_address) .to(account_address)
.value(U256::from(100_000_000_000_000u128)); .value(U256::from(100_000_000_000_000u128));
@@ -1217,7 +1225,9 @@ mod tests {
let mut dummy_node = KitchensinkNode::new(&test_config()); let mut dummy_node = KitchensinkNode::new(&test_config());
// Call `init()` // Call `init()`
dummy_node.init(genesis_content).expect("init failed"); dummy_node
.init(serde_json::from_str(genesis_content).unwrap())
.expect("init failed");
// Check that the patched chainspec file was generated // Check that the patched chainspec file was generated
let final_chainspec_path = dummy_node let final_chainspec_path = dummy_node
@@ -1327,20 +1337,10 @@ mod tests {
} }
} }
#[test]
fn spawn_works() {
let config = test_config();
let mut node = KitchensinkNode::new(&config);
node.spawn(GENESIS_JSON.to_string()).unwrap();
}
#[test] #[test]
fn version_works() { fn version_works() {
let config = test_config(); let node = shared_node();
let node = KitchensinkNode::new(&config);
let version = node.version().unwrap(); let version = node.version().unwrap();
assert!( assert!(
@@ -1351,9 +1351,8 @@ mod tests {
#[test] #[test]
fn eth_rpc_version_works() { fn eth_rpc_version_works() {
let config = test_config(); let node = shared_node();
let node = KitchensinkNode::new(&config);
let version = node.eth_rpc_version().unwrap(); let version = node.eth_rpc_version().unwrap();
assert!( assert!(
+14 -6
View File
@@ -1,7 +1,8 @@
//! This crate implements the testing nodes. //! This crate implements the testing nodes.
use alloy::genesis::Genesis;
use revive_common::EVMVersion; use revive_common::EVMVersion;
use revive_dt_config::Arguments; use revive_dt_config::*;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
pub mod common; pub mod common;
@@ -10,13 +11,20 @@ pub mod geth;
pub mod kitchensink; pub mod kitchensink;
pub mod pool; pub mod pool;
/// The default genesis configuration.
pub const GENESIS_JSON: &str = include_str!("../../../genesis.json");
/// An abstract interface for testing nodes. /// An abstract interface for testing nodes.
pub trait Node: EthereumNode { pub trait Node: EthereumNode {
/// Create a new uninitialized instance. /// Create a new uninitialized instance.
fn new(config: &Arguments) -> Self; fn new(
context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<ConcurrencyConfiguration>
+ AsRef<GenesisConfiguration>
+ AsRef<WalletConfiguration>
+ AsRef<GethConfiguration>
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Clone,
) -> Self;
/// Returns the identifier of the node. /// Returns the identifier of the node.
fn id(&self) -> usize; fn id(&self) -> usize;
@@ -24,7 +32,7 @@ pub trait Node: EthereumNode {
/// Spawns a node configured according to the genesis json. /// Spawns a node configured according to the genesis json.
/// ///
/// Blocking until it's ready to accept transactions. /// Blocking until it's ready to accept transactions.
fn spawn(&mut self, genesis: String) -> anyhow::Result<()>; fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()>;
/// Prune the node instance and related data. /// Prune the node instance and related data.
/// ///
+42 -14
View File
@@ -5,10 +5,13 @@ use std::{
thread, thread,
}; };
use revive_dt_common::cached_fs::read_to_string; use alloy::genesis::Genesis;
use anyhow::Context as _;
use anyhow::Context; use revive_dt_config::{
use revive_dt_config::Arguments; ConcurrencyConfiguration, EthRpcConfiguration, GenesisConfiguration, GethConfiguration,
KitchensinkConfiguration, ReviveDevNodeConfiguration, WalletConfiguration,
WorkingDirectoryConfiguration,
};
use tracing::info; use tracing::info;
use crate::Node; use crate::Node;
@@ -25,18 +28,31 @@ where
T: Node + Send + 'static, T: Node + Send + 'static,
{ {
/// Create a new Pool. This will start as many nodes as there are workers in `config`. /// Create a new Pool. This will start as many nodes as there are workers in `config`.
pub fn new(config: &Arguments) -> anyhow::Result<Self> { pub fn new(
let nodes = config.number_of_nodes; context: impl AsRef<WorkingDirectoryConfiguration>
let genesis = read_to_string(&config.genesis_file).context(format!( + AsRef<ConcurrencyConfiguration>
"can not read genesis file: {}", + AsRef<GenesisConfiguration>
config.genesis_file.display() + AsRef<WalletConfiguration>
))?; + AsRef<GethConfiguration>
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Send
+ Sync
+ Clone
+ 'static,
) -> anyhow::Result<Self> {
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let nodes = concurrency_configuration.number_of_nodes;
let genesis = genesis_configuration.genesis()?;
let mut handles = Vec::with_capacity(nodes); let mut handles = Vec::with_capacity(nodes);
for _ in 0..nodes { for _ in 0..nodes {
let config = config.clone(); let context = context.clone();
let genesis = genesis.clone(); let genesis = genesis.clone();
handles.push(thread::spawn(move || spawn_node::<T>(&config, genesis))); handles.push(thread::spawn(move || spawn_node::<T>(context, genesis)));
} }
let mut nodes = Vec::with_capacity(nodes); let mut nodes = Vec::with_capacity(nodes);
@@ -64,8 +80,20 @@ where
} }
} }
fn spawn_node<T: Node + Send>(args: &Arguments, genesis: String) -> anyhow::Result<T> { fn spawn_node<T: Node + Send>(
let mut node = T::new(args); context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<ConcurrencyConfiguration>
+ AsRef<GenesisConfiguration>
+ AsRef<WalletConfiguration>
+ AsRef<GethConfiguration>
+ AsRef<KitchensinkConfiguration>
+ AsRef<ReviveDevNodeConfiguration>
+ AsRef<EthRpcConfiguration>
+ Clone
+ 'static,
genesis: Genesis,
) -> anyhow::Result<T> {
let mut node = T::new(context);
info!( info!(
id = node.id(), id = node.id(),
connection_string = node.connection_string(), connection_string = node.connection_string(),
+38 -23
View File
@@ -12,7 +12,7 @@ use alloy_primitives::Address;
use anyhow::{Context as _, Result}; use anyhow::{Context as _, Result};
use indexmap::IndexMap; use indexmap::IndexMap;
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode}; use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
use revive_dt_config::{Arguments, TestingPlatform}; use revive_dt_config::{Context, TestingPlatform};
use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance}; use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance};
use semver::Version; use semver::Version;
use serde::Serialize; use serde::Serialize;
@@ -36,11 +36,11 @@ pub struct ReportAggregator {
} }
impl ReportAggregator { impl ReportAggregator {
pub fn new(config: Arguments) -> Self { pub fn new(context: Context) -> Self {
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>(); let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
let (listener_tx, _) = channel::<ReporterEvent>(1024); let (listener_tx, _) = channel::<ReporterEvent>(1024);
Self { Self {
report: Report::new(config), report: Report::new(context),
remaining_cases: Default::default(), remaining_cases: Default::default(),
runner_tx: Some(runner_tx), runner_tx: Some(runner_tx),
runner_rx, runner_rx,
@@ -121,7 +121,12 @@ impl ReportAggregator {
file_name.push_str(".json"); file_name.push_str(".json");
file_name file_name
}; };
let file_path = self.report.config.directory().join(file_name); let file_path = self
.report
.context
.working_directory_configuration()
.as_path()
.join(file_name);
let file = OpenOptions::new() let file = OpenOptions::new()
.create(true) .create(true)
.write(true) .write(true)
@@ -282,8 +287,16 @@ impl ReportAggregator {
&mut self, &mut self,
event: PreLinkContractsCompilationSucceededEvent, event: PreLinkContractsCompilationSucceededEvent,
) { ) {
let include_input = self.report.config.report_include_compiler_input; let include_input = self
let include_output = self.report.config.report_include_compiler_output; .report
.context
.report_configuration()
.include_compiler_input;
let include_output = self
.report
.context
.report_configuration()
.include_compiler_output;
let execution_information = self.execution_information(&event.execution_specifier); let execution_information = self.execution_information(&event.execution_specifier);
@@ -311,8 +324,16 @@ impl ReportAggregator {
&mut self, &mut self,
event: PostLinkContractsCompilationSucceededEvent, event: PostLinkContractsCompilationSucceededEvent,
) { ) {
let include_input = self.report.config.report_include_compiler_input; let include_input = self
let include_output = self.report.config.report_include_compiler_output; .report
.context
.report_configuration()
.include_compiler_input;
let include_output = self
.report
.context
.report_configuration()
.include_compiler_output;
let execution_information = self.execution_information(&event.execution_specifier); let execution_information = self.execution_information(&event.execution_specifier);
@@ -406,12 +427,8 @@ impl ReportAggregator {
#[serde_as] #[serde_as]
#[derive(Clone, Debug, Serialize)] #[derive(Clone, Debug, Serialize)]
pub struct Report { pub struct Report {
/// The configuration that the tool was started up with. /// The context that the tool was started up with.
pub config: Arguments, pub context: Context,
/// The platform of the leader chain.
pub leader_platform: TestingPlatform,
/// The platform of the follower chain.
pub follower_platform: TestingPlatform,
/// The list of corpus files that the tool found. /// The list of corpus files that the tool found.
pub corpora: Vec<Corpus>, pub corpora: Vec<Corpus>,
/// The list of metadata files that were found by the tool. /// The list of metadata files that were found by the tool.
@@ -423,11 +440,9 @@ pub struct Report {
} }
impl Report { impl Report {
pub fn new(config: Arguments) -> Self { pub fn new(context: Context) -> Self {
Self { Self {
leader_platform: config.leader, context,
follower_platform: config.follower,
config,
corpora: Default::default(), corpora: Default::default(),
metadata_files: Default::default(), metadata_files: Default::default(),
test_case_information: Default::default(), test_case_information: Default::default(),
@@ -517,12 +532,12 @@ pub enum CompilationStatus {
/// The path of the compiler used to compile the contracts. /// The path of the compiler used to compile the contracts.
compiler_path: PathBuf, compiler_path: PathBuf,
/// The input provided to the compiler to compile the contracts. This is only included if /// The input provided to the compiler to compile the contracts. This is only included if
/// the appropriate flag is set in the CLI configuration and if the contracts were not /// the appropriate flag is set in the CLI context and if the contracts were not cached and
/// cached and the compiler was invoked. /// the compiler was invoked.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_input: Option<CompilerInput>, compiler_input: Option<CompilerInput>,
/// The output of the compiler. This is only included if the appropriate flag is set in the /// The output of the compiler. This is only included if the appropriate flag is set in the
/// CLI configurations. /// CLI contexts.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_output: Option<CompilerOutput>, compiler_output: Option<CompilerOutput>,
}, },
@@ -537,8 +552,8 @@ pub enum CompilationStatus {
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_path: Option<PathBuf>, compiler_path: Option<PathBuf>,
/// The input provided to the compiler to compile the contracts. This is only included if /// The input provided to the compiler to compile the contracts. This is only included if
/// the appropriate flag is set in the CLI configuration and if the contracts were not /// the appropriate flag is set in the CLI context and if the contracts were not cached and
/// cached and the compiler was invoked. /// the compiler was invoked.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
compiler_input: Option<CompilerInput>, compiler_input: Option<CompilerInput>,
}, },
+1 -1
View File
@@ -13,7 +13,7 @@ use semver::Version;
use tokio::sync::Mutex; use tokio::sync::Mutex;
use crate::download::SolcDownloader; use crate::download::SolcDownloader;
use anyhow::Context; use anyhow::Context as _;
pub const SOLC_CACHE_DIRECTORY: &str = "solc"; pub const SOLC_CACHE_DIRECTORY: &str = "solc";
pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default); pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default);
+1 -1
View File
@@ -11,7 +11,7 @@ use semver::Version;
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
use crate::list::List; use crate::list::List;
use anyhow::Context; use anyhow::Context as _;
pub static LIST_CACHE: LazyLock<Mutex<HashMap<&'static str, List>>> = pub static LIST_CACHE: LazyLock<Mutex<HashMap<&'static str, List>>> =
LazyLock::new(Default::default); LazyLock::new(Default::default);
+1 -1
View File
@@ -5,7 +5,7 @@
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use anyhow::Context; use anyhow::Context as _;
use cache::get_or_download; use cache::get_or_download;
use download::SolcDownloader; use download::SolcDownloader;
+6 -6
View File
@@ -89,13 +89,13 @@ echo "This may take a while..."
echo "" echo ""
# Run the tool # Run the tool
RUST_LOG="error" cargo run --release -- \ RUST_LOG="error" cargo run --release -- execute-tests \
--corpus "$CORPUS_FILE" \ --corpus "$CORPUS_FILE" \
--workdir "$WORKDIR" \ --working-directory "$WORKDIR" \
--number-of-nodes 5 \ --concurrency.number-of-nodes 5 \
--kitchensink "$SUBSTRATE_NODE_BIN" \ --kitchensink.path "$SUBSTRATE_NODE_BIN" \
--revive-dev-node "$REVIVE_DEV_NODE_BIN" \ --revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
--eth_proxy "$ETH_RPC_BIN" \ --eth-rpc.path "$ETH_RPC_BIN" \
> logs.log \ > logs.log \
2> output.log 2> output.log