mirror of
https://github.com/pezkuwichain/revive-differential-tests.git
synced 2026-04-22 10:17:56 +00:00
Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 77a875ed6f | |||
| e06dd491b9 | |||
| a30d4f9b9e | |||
| b4118faa3d | |||
| 762b45ffd1 | |||
| 1ec1778e32 |
Generated
+5
-3
@@ -4501,9 +4501,12 @@ name = "revive-dt-config"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"alloy",
|
||||
"anyhow",
|
||||
"clap",
|
||||
"semver 1.0.26",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"strum",
|
||||
"temp-dir",
|
||||
]
|
||||
|
||||
@@ -4528,7 +4531,6 @@ dependencies = [
|
||||
"semver 1.0.26",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"temp-dir",
|
||||
"tokio",
|
||||
"tracing",
|
||||
"tracing-appender",
|
||||
@@ -5690,9 +5692,9 @@ checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
|
||||
|
||||
[[package]]
|
||||
name = "strum"
|
||||
version = "0.27.1"
|
||||
version = "0.27.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f64def088c51c9510a8579e3c5d67c65349dcf755e5479ad3d010aa6454e2c32"
|
||||
checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf"
|
||||
dependencies = [
|
||||
"strum_macros",
|
||||
]
|
||||
|
||||
@@ -48,6 +48,7 @@ serde_with = { version = "3.14.0" }
|
||||
sha2 = { version = "0.10.9" }
|
||||
sp-core = "36.1.0"
|
||||
sp-runtime = "41.1.0"
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
temp-dir = { version = "0.1.16" }
|
||||
tempfile = "3.3"
|
||||
thiserror = "2"
|
||||
|
||||
@@ -187,10 +187,11 @@ The above corpus file instructs the tool to look for all of the test cases conta
|
||||
The simplest command to run this tool is the following:
|
||||
|
||||
```bash
|
||||
RUST_LOG="info" cargo run --release -- \
|
||||
RUST_LOG="info" cargo run --release -- execute-tests \
|
||||
--follower geth \
|
||||
--corpus path_to_your_corpus_file.json \
|
||||
--workdir path_to_a_temporary_directory_to_cache_things_in \
|
||||
--number-of-nodes 5 \
|
||||
--working-directory path_to_a_temporary_directory_to_cache_things_in \
|
||||
--concurrency.number-of-nodes 5 \
|
||||
> logs.log \
|
||||
2> output.log
|
||||
```
|
||||
|
||||
@@ -11,14 +11,14 @@ use std::{
|
||||
|
||||
use alloy::json_abi::JsonAbi;
|
||||
use alloy_primitives::Address;
|
||||
use anyhow::{Context, Result};
|
||||
use anyhow::{Context as _, Result};
|
||||
use semver::Version;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use revive_common::EVMVersion;
|
||||
use revive_dt_common::cached_fs::read_to_string;
|
||||
use revive_dt_common::types::VersionOrRequirement;
|
||||
use revive_dt_config::Arguments;
|
||||
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
|
||||
|
||||
// Re-export this as it's a part of the compiler interface.
|
||||
pub use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
|
||||
@@ -31,11 +31,13 @@ pub mod solc;
|
||||
pub trait SolidityCompiler: Sized {
|
||||
/// Instantiates a new compiler object.
|
||||
///
|
||||
/// Based on the given [`Arguments`] and [`VersionOrRequirement`] this function instantiates a
|
||||
/// Based on the given [`Context`] and [`VersionOrRequirement`] this function instantiates a
|
||||
/// new compiler object. Certain implementations of this trait might choose to cache cache the
|
||||
/// compiler objects and return the same ones over and over again.
|
||||
fn new(
|
||||
config: &Arguments,
|
||||
context: impl AsRef<SolcConfiguration>
|
||||
+ AsRef<ResolcConfiguration>
|
||||
+ AsRef<WorkingDirectoryConfiguration>,
|
||||
version: impl Into<Option<VersionOrRequirement>>,
|
||||
) -> impl Future<Output = Result<Self>>;
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ use std::{
|
||||
|
||||
use dashmap::DashMap;
|
||||
use revive_dt_common::types::VersionOrRequirement;
|
||||
use revive_dt_config::Arguments;
|
||||
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
|
||||
use revive_solc_json_interface::{
|
||||
SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
|
||||
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection,
|
||||
@@ -21,7 +21,7 @@ use crate::{
|
||||
};
|
||||
|
||||
use alloy::json_abi::JsonAbi;
|
||||
use anyhow::{Context, Result};
|
||||
use anyhow::{Context as _, Result};
|
||||
use semver::Version;
|
||||
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
|
||||
|
||||
@@ -39,7 +39,9 @@ struct ResolcInner {
|
||||
|
||||
impl SolidityCompiler for Resolc {
|
||||
async fn new(
|
||||
config: &Arguments,
|
||||
context: impl AsRef<SolcConfiguration>
|
||||
+ AsRef<ResolcConfiguration>
|
||||
+ AsRef<WorkingDirectoryConfiguration>,
|
||||
version: impl Into<Option<VersionOrRequirement>>,
|
||||
) -> Result<Self> {
|
||||
/// This is a cache of all of the resolc compiler objects. Since we do not currently support
|
||||
@@ -47,7 +49,9 @@ impl SolidityCompiler for Resolc {
|
||||
/// its version to the resolc compiler.
|
||||
static COMPILERS_CACHE: LazyLock<DashMap<Solc, Resolc>> = LazyLock::new(Default::default);
|
||||
|
||||
let solc = Solc::new(config, version)
|
||||
let resolc_configuration = AsRef::<ResolcConfiguration>::as_ref(&context);
|
||||
|
||||
let solc = Solc::new(&context, version)
|
||||
.await
|
||||
.context("Failed to create the solc compiler frontend for resolc")?;
|
||||
|
||||
@@ -56,7 +60,7 @@ impl SolidityCompiler for Resolc {
|
||||
.or_insert_with(|| {
|
||||
Self(Arc::new(ResolcInner {
|
||||
solc,
|
||||
resolc_path: config.resolc.clone(),
|
||||
resolc_path: resolc_configuration.path.clone(),
|
||||
}))
|
||||
})
|
||||
.clone())
|
||||
|
||||
@@ -9,12 +9,12 @@ use std::{
|
||||
|
||||
use dashmap::DashMap;
|
||||
use revive_dt_common::types::VersionOrRequirement;
|
||||
use revive_dt_config::Arguments;
|
||||
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
|
||||
use revive_dt_solc_binaries::download_solc;
|
||||
|
||||
use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use anyhow::{Context as _, Result};
|
||||
use foundry_compilers_artifacts::{
|
||||
output_selection::{
|
||||
BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection,
|
||||
@@ -38,25 +38,35 @@ struct SolcInner {
|
||||
|
||||
impl SolidityCompiler for Solc {
|
||||
async fn new(
|
||||
config: &Arguments,
|
||||
context: impl AsRef<SolcConfiguration>
|
||||
+ AsRef<ResolcConfiguration>
|
||||
+ AsRef<WorkingDirectoryConfiguration>,
|
||||
version: impl Into<Option<VersionOrRequirement>>,
|
||||
) -> Result<Self> {
|
||||
// This is a cache for the compiler objects so that whenever the same compiler version is
|
||||
// requested the same object is returned. We do this as we do not want to keep cloning the
|
||||
// compiler around.
|
||||
static COMPILERS_CACHE: LazyLock<DashMap<Version, Solc>> = LazyLock::new(Default::default);
|
||||
static COMPILERS_CACHE: LazyLock<DashMap<(PathBuf, Version), Solc>> =
|
||||
LazyLock::new(Default::default);
|
||||
|
||||
let working_directory_configuration =
|
||||
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
||||
let solc_configuration = AsRef::<SolcConfiguration>::as_ref(&context);
|
||||
|
||||
// We attempt to download the solc binary. Note the following: this call does the version
|
||||
// resolution for us. Therefore, even if the download didn't proceed, this function will
|
||||
// resolve the version requirement into a canonical version of the compiler. It's then up
|
||||
// to us to either use the provided path or not.
|
||||
let version = version.into().unwrap_or_else(|| config.solc.clone().into());
|
||||
let (version, path) = download_solc(config.directory(), version, false)
|
||||
.await
|
||||
.context("Failed to download/get path to solc binary")?;
|
||||
let version = version
|
||||
.into()
|
||||
.unwrap_or_else(|| solc_configuration.version.clone().into());
|
||||
let (version, path) =
|
||||
download_solc(working_directory_configuration.as_path(), version, false)
|
||||
.await
|
||||
.context("Failed to download/get path to solc binary")?;
|
||||
|
||||
Ok(COMPILERS_CACHE
|
||||
.entry(version.clone())
|
||||
.entry((path.clone(), version.clone()))
|
||||
.or_insert_with(|| {
|
||||
Self(Arc::new(SolcInner {
|
||||
solc_path: path,
|
||||
|
||||
@@ -2,13 +2,13 @@ use std::path::PathBuf;
|
||||
|
||||
use revive_dt_common::types::VersionOrRequirement;
|
||||
use revive_dt_compiler::{Compiler, SolidityCompiler, revive_resolc::Resolc, solc::Solc};
|
||||
use revive_dt_config::Arguments;
|
||||
use revive_dt_config::ExecutionContext;
|
||||
use semver::Version;
|
||||
|
||||
#[tokio::test]
|
||||
async fn contracts_can_be_compiled_with_solc() {
|
||||
// Arrange
|
||||
let args = Arguments::default();
|
||||
let args = ExecutionContext::default();
|
||||
let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -49,7 +49,7 @@ async fn contracts_can_be_compiled_with_solc() {
|
||||
#[tokio::test]
|
||||
async fn contracts_can_be_compiled_with_resolc() {
|
||||
// Arrange
|
||||
let args = Arguments::default();
|
||||
let args = ExecutionContext::default();
|
||||
let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -10,10 +10,13 @@ rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
alloy = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
semver = { workspace = true }
|
||||
temp-dir = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
strum = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
+481
-135
@@ -2,215 +2,561 @@
|
||||
|
||||
use std::{
|
||||
fmt::Display,
|
||||
fs::read_to_string,
|
||||
ops::Deref,
|
||||
path::{Path, PathBuf},
|
||||
sync::LazyLock,
|
||||
str::FromStr,
|
||||
sync::{Arc, LazyLock, OnceLock},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use alloy::{network::EthereumWallet, signers::local::PrivateKeySigner};
|
||||
use clap::{Parser, ValueEnum};
|
||||
use alloy::{
|
||||
genesis::Genesis,
|
||||
hex::ToHexExt,
|
||||
network::EthereumWallet,
|
||||
primitives::{FixedBytes, U256},
|
||||
signers::local::PrivateKeySigner,
|
||||
};
|
||||
use clap::{Parser, ValueEnum, ValueHint};
|
||||
use semver::Version;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde::{Serialize, Serializer};
|
||||
use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
|
||||
use temp_dir::TempDir;
|
||||
|
||||
#[derive(Debug, Parser, Clone, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
#[command(name = "retester")]
|
||||
pub struct Arguments {
|
||||
/// The `solc` version to use if the test didn't specify it explicitly.
|
||||
#[arg(long = "solc", short, default_value = "0.8.29")]
|
||||
pub solc: Version,
|
||||
pub enum Context {
|
||||
/// Executes tests in the MatterLabs format differentially against a leader and a follower.
|
||||
ExecuteTests(ExecutionContext),
|
||||
}
|
||||
|
||||
/// Use the Wasm compiler versions.
|
||||
#[arg(long = "wasm")]
|
||||
pub wasm: bool,
|
||||
impl Context {
|
||||
pub fn working_directory_configuration(&self) -> &WorkingDirectoryConfiguration {
|
||||
self.as_ref()
|
||||
}
|
||||
|
||||
/// The path to the `resolc` executable to be tested.
|
||||
pub fn report_configuration(&self) -> &ReportConfiguration {
|
||||
self.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<WorkingDirectoryConfiguration> for Context {
|
||||
fn as_ref(&self) -> &WorkingDirectoryConfiguration {
|
||||
match self {
|
||||
Context::ExecuteTests(execution_context) => &execution_context.working_directory,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ReportConfiguration> for Context {
|
||||
fn as_ref(&self) -> &ReportConfiguration {
|
||||
match self {
|
||||
Context::ExecuteTests(execution_context) => &execution_context.report_configuration,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct ExecutionContext {
|
||||
/// The working directory that the program will use for all of the temporary artifacts needed at
|
||||
/// runtime.
|
||||
///
|
||||
/// By default it uses the `resolc` binary found in `$PATH`.
|
||||
///
|
||||
/// If `--wasm` is set, this should point to the resolc Wasm ile.
|
||||
#[arg(long = "resolc", short, default_value = "resolc")]
|
||||
pub resolc: PathBuf,
|
||||
/// If not specified, then a temporary directory will be created and used by the program for all
|
||||
/// temporary artifacts.
|
||||
#[clap(
|
||||
short,
|
||||
long,
|
||||
default_value = "",
|
||||
value_hint = ValueHint::DirPath,
|
||||
)]
|
||||
pub working_directory: WorkingDirectoryConfiguration,
|
||||
|
||||
/// The differential testing leader node implementation.
|
||||
#[arg(short, long = "leader", default_value_t = TestingPlatform::Geth)]
|
||||
pub leader: TestingPlatform,
|
||||
|
||||
/// The differential testing follower node implementation.
|
||||
#[arg(short, long = "follower", default_value_t = TestingPlatform::Kitchensink)]
|
||||
pub follower: TestingPlatform,
|
||||
|
||||
/// A list of test corpus JSON files to be tested.
|
||||
#[arg(long = "corpus", short)]
|
||||
pub corpus: Vec<PathBuf>,
|
||||
|
||||
/// A place to store temporary artifacts during test execution.
|
||||
///
|
||||
/// Creates a temporary dir if not specified.
|
||||
#[arg(long = "workdir", short)]
|
||||
pub working_directory: Option<PathBuf>,
|
||||
/// Configuration parameters for the solc compiler.
|
||||
#[clap(flatten, next_help_heading = "Solc Configuration")]
|
||||
pub solc_configuration: SolcConfiguration,
|
||||
|
||||
/// Add a tempdir manually if `working_directory` was not given.
|
||||
/// Configuration parameters for the resolc compiler.
|
||||
#[clap(flatten, next_help_heading = "Resolc Configuration")]
|
||||
pub resolc_configuration: ResolcConfiguration,
|
||||
|
||||
/// Configuration parameters for the geth node.
|
||||
#[clap(flatten, next_help_heading = "Geth Configuration")]
|
||||
pub geth_configuration: GethConfiguration,
|
||||
|
||||
/// Configuration parameters for the Kitchensink.
|
||||
#[clap(flatten, next_help_heading = "Kitchensink Configuration")]
|
||||
pub kitchensink_configuration: KitchensinkConfiguration,
|
||||
|
||||
/// Configuration parameters for the Revive Dev Node.
|
||||
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
|
||||
pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
|
||||
|
||||
/// Configuration parameters for the Eth Rpc.
|
||||
#[clap(flatten, next_help_heading = "Eth RPC Configuration")]
|
||||
pub eth_rpc_configuration: EthRpcConfiguration,
|
||||
|
||||
/// Configuration parameters for the genesis.
|
||||
#[clap(flatten, next_help_heading = "Genesis Configuration")]
|
||||
pub genesis_configuration: GenesisConfiguration,
|
||||
|
||||
/// Configuration parameters for the wallet.
|
||||
#[clap(flatten, next_help_heading = "Wallet Configuration")]
|
||||
pub wallet_configuration: WalletConfiguration,
|
||||
|
||||
/// Configuration parameters for concurrency.
|
||||
#[clap(flatten, next_help_heading = "Concurrency Configuration")]
|
||||
pub concurrency_configuration: ConcurrencyConfiguration,
|
||||
|
||||
/// Configuration parameters for the compilers and compilation.
|
||||
#[clap(flatten, next_help_heading = "Compilation Configuration")]
|
||||
pub compilation_configuration: CompilationConfiguration,
|
||||
|
||||
/// Configuration parameters for the report.
|
||||
#[clap(flatten, next_help_heading = "Report Configuration")]
|
||||
pub report_configuration: ReportConfiguration,
|
||||
}
|
||||
|
||||
impl Default for ExecutionContext {
|
||||
fn default() -> Self {
|
||||
Self::parse_from(["execution-context"])
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<WorkingDirectoryConfiguration> for ExecutionContext {
|
||||
fn as_ref(&self) -> &WorkingDirectoryConfiguration {
|
||||
&self.working_directory
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<SolcConfiguration> for ExecutionContext {
|
||||
fn as_ref(&self) -> &SolcConfiguration {
|
||||
&self.solc_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ResolcConfiguration> for ExecutionContext {
|
||||
fn as_ref(&self) -> &ResolcConfiguration {
|
||||
&self.resolc_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<GethConfiguration> for ExecutionContext {
|
||||
fn as_ref(&self) -> &GethConfiguration {
|
||||
&self.geth_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<KitchensinkConfiguration> for ExecutionContext {
|
||||
fn as_ref(&self) -> &KitchensinkConfiguration {
|
||||
&self.kitchensink_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ReviveDevNodeConfiguration> for ExecutionContext {
|
||||
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
|
||||
&self.revive_dev_node_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<EthRpcConfiguration> for ExecutionContext {
|
||||
fn as_ref(&self) -> &EthRpcConfiguration {
|
||||
&self.eth_rpc_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<GenesisConfiguration> for ExecutionContext {
|
||||
fn as_ref(&self) -> &GenesisConfiguration {
|
||||
&self.genesis_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<WalletConfiguration> for ExecutionContext {
|
||||
fn as_ref(&self) -> &WalletConfiguration {
|
||||
&self.wallet_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ConcurrencyConfiguration> for ExecutionContext {
|
||||
fn as_ref(&self) -> &ConcurrencyConfiguration {
|
||||
&self.concurrency_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<CompilationConfiguration> for ExecutionContext {
|
||||
fn as_ref(&self) -> &CompilationConfiguration {
|
||||
&self.compilation_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<ReportConfiguration> for ExecutionContext {
|
||||
fn as_ref(&self) -> &ReportConfiguration {
|
||||
&self.report_configuration
|
||||
}
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for Solc.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct SolcConfiguration {
|
||||
/// Specifies the default version of the Solc compiler that should be used if there is no
|
||||
/// override specified by one of the test cases.
|
||||
#[clap(long = "solc.version", default_value = "0.8.29")]
|
||||
pub version: Version,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for Resolc.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct ResolcConfiguration {
|
||||
/// Specifies the path of the resolc compiler to be used by the tool.
|
||||
///
|
||||
/// We attach it here because [TempDir] prunes itself on drop.
|
||||
/// If this is not specified, then the tool assumes that it should use the resolc binary that's
|
||||
/// provided in the user's $PATH.
|
||||
#[clap(id = "resolc.path", long = "resolc.path", default_value = "resolc")]
|
||||
pub path: PathBuf,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for Geth.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct GethConfiguration {
|
||||
/// Specifies the path of the geth node to be used by the tool.
|
||||
///
|
||||
/// If this is not specified, then the tool assumes that it should use the geth binary that's
|
||||
/// provided in the user's $PATH.
|
||||
#[clap(id = "geth.path", long = "geth.path", default_value = "geth")]
|
||||
pub path: PathBuf,
|
||||
|
||||
/// The amount of time to wait upon startup before considering that the node timed out.
|
||||
#[clap(
|
||||
id = "geth.start-timeout-ms",
|
||||
long = "geth.start-timeout-ms",
|
||||
default_value = "5000",
|
||||
value_parser = parse_duration
|
||||
)]
|
||||
pub start_timeout_ms: Duration,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for Kitchensink.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct KitchensinkConfiguration {
|
||||
/// Specifies the path of the kitchensink node to be used by the tool.
|
||||
///
|
||||
/// If this is not specified, then the tool assumes that it should use the kitchensink binary
|
||||
/// that's provided in the user's $PATH.
|
||||
#[clap(
|
||||
id = "kitchensink.path",
|
||||
long = "kitchensink.path",
|
||||
default_value = "substrate-node"
|
||||
)]
|
||||
pub path: PathBuf,
|
||||
|
||||
/// The amount of time to wait upon startup before considering that the node timed out.
|
||||
#[clap(
|
||||
id = "kitchensink.start-timeout-ms",
|
||||
long = "kitchensink.start-timeout-ms",
|
||||
default_value = "5000",
|
||||
value_parser = parse_duration
|
||||
)]
|
||||
pub start_timeout_ms: Duration,
|
||||
|
||||
/// This configures the tool to use Kitchensink instead of using the revive-dev-node.
|
||||
#[clap(long = "kitchensink.dont-use-dev-node")]
|
||||
pub use_kitchensink: bool,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for the revive dev node.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct ReviveDevNodeConfiguration {
|
||||
/// Specifies the path of the revive dev node to be used by the tool.
|
||||
///
|
||||
/// If this is not specified, then the tool assumes that it should use the revive dev node binary
|
||||
/// that's provided in the user's $PATH.
|
||||
#[clap(
|
||||
id = "revive-dev-node.path",
|
||||
long = "revive-dev-node.path",
|
||||
default_value = "revive-dev-node"
|
||||
)]
|
||||
pub path: PathBuf,
|
||||
|
||||
/// The amount of time to wait upon startup before considering that the node timed out.
|
||||
#[clap(
|
||||
id = "revive-dev-node.start-timeout-ms",
|
||||
long = "revive-dev-node.start-timeout-ms",
|
||||
default_value = "5000",
|
||||
value_parser = parse_duration
|
||||
)]
|
||||
pub start_timeout_ms: Duration,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for the ETH RPC.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct EthRpcConfiguration {
|
||||
/// Specifies the path of the ETH RPC to be used by the tool.
|
||||
///
|
||||
/// If this is not specified, then the tool assumes that it should use the ETH RPC binary
|
||||
/// that's provided in the user's $PATH.
|
||||
#[clap(id = "eth-rpc.path", long = "eth-rpc.path", default_value = "eth-rpc")]
|
||||
pub path: PathBuf,
|
||||
|
||||
/// The amount of time to wait upon startup before considering that the node timed out.
|
||||
#[clap(
|
||||
id = "eth-rpc.start-timeout-ms",
|
||||
long = "eth-rpc.start-timeout-ms",
|
||||
default_value = "5000",
|
||||
value_parser = parse_duration
|
||||
)]
|
||||
pub start_timeout_ms: Duration,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for the genesis.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct GenesisConfiguration {
|
||||
/// Specifies the path of the genesis file to use for the nodes that are started.
|
||||
///
|
||||
/// This is expected to be the path of a JSON geth genesis file.
|
||||
#[clap(id = "genesis.path", long = "genesis.path")]
|
||||
path: Option<PathBuf>,
|
||||
|
||||
/// The genesis object found at the provided path.
|
||||
#[clap(skip)]
|
||||
#[serde(skip)]
|
||||
pub temp_dir: Option<&'static TempDir>,
|
||||
genesis: OnceLock<Genesis>,
|
||||
}
|
||||
|
||||
/// The path to the `geth` executable.
|
||||
///
|
||||
/// By default it uses `geth` binary found in `$PATH`.
|
||||
#[arg(short, long = "geth", default_value = "geth")]
|
||||
pub geth: PathBuf,
|
||||
impl GenesisConfiguration {
|
||||
pub fn genesis(&self) -> anyhow::Result<&Genesis> {
|
||||
static DEFAULT_GENESIS: LazyLock<Genesis> = LazyLock::new(|| {
|
||||
let genesis = include_str!("../../../genesis.json");
|
||||
serde_json::from_str(genesis).unwrap()
|
||||
});
|
||||
|
||||
/// The maximum time in milliseconds to wait for geth to start.
|
||||
#[arg(long = "geth-start-timeout", default_value = "5000")]
|
||||
pub geth_start_timeout: u64,
|
||||
match self.genesis.get() {
|
||||
Some(genesis) => Ok(genesis),
|
||||
None => {
|
||||
let genesis = match self.path.as_ref() {
|
||||
Some(genesis_path) => {
|
||||
let genesis_content = read_to_string(genesis_path)?;
|
||||
serde_json::from_str(genesis_content.as_str())?
|
||||
}
|
||||
None => DEFAULT_GENESIS.clone(),
|
||||
};
|
||||
Ok(self.genesis.get_or_init(|| genesis))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configure nodes according to this genesis.json file.
|
||||
#[arg(long = "genesis", default_value = "genesis.json")]
|
||||
pub genesis_file: PathBuf,
|
||||
|
||||
/// The signing account private key.
|
||||
#[arg(
|
||||
short,
|
||||
long = "account",
|
||||
/// A set of configuration parameters for the wallet.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct WalletConfiguration {
|
||||
/// The private key of the default signer.
|
||||
#[clap(
|
||||
long = "wallet.default-private-key",
|
||||
default_value = "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d"
|
||||
)]
|
||||
pub account: String,
|
||||
#[serde(serialize_with = "serialize_private_key")]
|
||||
default_key: PrivateKeySigner,
|
||||
|
||||
/// This argument controls which private keys the nodes should have access to and be added to
|
||||
/// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set
|
||||
/// of the node.
|
||||
#[arg(long = "private-keys-count", default_value_t = 100_000)]
|
||||
pub private_keys_to_add: usize,
|
||||
#[clap(long = "wallet.additional-keys", default_value_t = 100_000)]
|
||||
additional_keys: usize,
|
||||
|
||||
/// The differential testing leader node implementation.
|
||||
#[arg(short, long = "leader", default_value = "geth")]
|
||||
pub leader: TestingPlatform,
|
||||
/// The wallet object that will be used.
|
||||
#[clap(skip)]
|
||||
#[serde(skip)]
|
||||
wallet: OnceLock<Arc<EthereumWallet>>,
|
||||
}
|
||||
|
||||
/// The differential testing follower node implementation.
|
||||
#[arg(short, long = "follower", default_value = "kitchensink")]
|
||||
pub follower: TestingPlatform,
|
||||
impl WalletConfiguration {
|
||||
pub fn wallet(&self) -> Arc<EthereumWallet> {
|
||||
self.wallet
|
||||
.get_or_init(|| {
|
||||
let mut wallet = EthereumWallet::new(self.default_key.clone());
|
||||
for signer in (1..=self.additional_keys)
|
||||
.map(|id| U256::from(id))
|
||||
.map(|id| id.to_be_bytes::<32>())
|
||||
.map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap())
|
||||
{
|
||||
wallet.register_signer(signer);
|
||||
}
|
||||
Arc::new(wallet)
|
||||
})
|
||||
.clone()
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize_private_key<S>(value: &PrivateKeySigner, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
value.to_bytes().encode_hex().serialize(serializer)
|
||||
}
|
||||
|
||||
/// A set of configuration for concurrency.
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct ConcurrencyConfiguration {
|
||||
/// Determines the amount of nodes that will be spawned for each chain.
|
||||
#[arg(long, default_value = "1")]
|
||||
#[clap(long = "concurrency.number-of-nodes", default_value_t = 5)]
|
||||
pub number_of_nodes: usize,
|
||||
|
||||
/// Determines the amount of tokio worker threads that will will be used.
|
||||
#[arg(
|
||||
long,
|
||||
long = "concurrency.number-of-threads",
|
||||
default_value_t = std::thread::available_parallelism()
|
||||
.map(|n| n.get())
|
||||
.unwrap_or(1)
|
||||
)]
|
||||
pub number_of_threads: usize,
|
||||
|
||||
/// Determines the amount of concurrent tasks that will be spawned to run tests. Defaults to 10 x the number of nodes.
|
||||
#[arg(long)]
|
||||
pub number_concurrent_tasks: Option<usize>,
|
||||
|
||||
/// Extract problems back to the test corpus.
|
||||
#[arg(short, long = "extract-problems")]
|
||||
pub extract_problems: bool,
|
||||
|
||||
/// The path to the `kitchensink` executable.
|
||||
/// Determines the amount of concurrent tasks that will be spawned to run tests.
|
||||
///
|
||||
/// By default it uses `substrate-node` binary found in `$PATH`.
|
||||
#[arg(short, long = "kitchensink", default_value = "substrate-node")]
|
||||
pub kitchensink: PathBuf,
|
||||
/// Defaults to 10 x the number of nodes.
|
||||
#[arg(long = "concurrency.number-of-concurrent-tasks")]
|
||||
number_concurrent_tasks: Option<usize>,
|
||||
|
||||
/// The path to the `revive-dev-node` executable.
|
||||
///
|
||||
/// By default it uses `revive-dev-node` binary found in `$PATH`.
|
||||
#[arg(long = "revive-dev-node", default_value = "revive-dev-node")]
|
||||
pub revive_dev_node: PathBuf,
|
||||
/// Determines if the concurrency limit should be ignored or not.
|
||||
#[arg(long = "concurrency.ignore-concurrency-limit")]
|
||||
ignore_concurrency_limit: bool,
|
||||
}
|
||||
|
||||
/// By default the tool uses the revive-dev-node when it's running differential tests against
|
||||
/// PolkaVM since the dev-node is much faster than kitchensink. This flag allows the caller to
|
||||
/// configure the tool to use kitchensink rather than the dev-node.
|
||||
#[arg(long)]
|
||||
pub use_kitchensink_not_dev_node: bool,
|
||||
|
||||
/// The path to the `eth_proxy` executable.
|
||||
///
|
||||
/// By default it uses `eth-rpc` binary found in `$PATH`.
|
||||
#[arg(short = 'p', long = "eth_proxy", default_value = "eth-rpc")]
|
||||
pub eth_proxy: PathBuf,
|
||||
impl ConcurrencyConfiguration {
|
||||
pub fn concurrency_limit(&self) -> Option<usize> {
|
||||
match self.ignore_concurrency_limit {
|
||||
true => None,
|
||||
false => Some(
|
||||
self.number_concurrent_tasks
|
||||
.unwrap_or(20 * self.number_of_nodes),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct CompilationConfiguration {
|
||||
/// Controls if the compilation cache should be invalidated or not.
|
||||
#[arg(short, long)]
|
||||
#[arg(long = "compilation.invalidate-cache")]
|
||||
pub invalidate_compilation_cache: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Parser, Serialize)]
|
||||
pub struct ReportConfiguration {
|
||||
/// Controls if the compiler input is included in the final report.
|
||||
#[clap(long = "report.include-compiler-input")]
|
||||
pub report_include_compiler_input: bool,
|
||||
pub include_compiler_input: bool,
|
||||
|
||||
/// Controls if the compiler output is included in the final report.
|
||||
#[clap(long = "report.include-compiler-output")]
|
||||
pub report_include_compiler_output: bool,
|
||||
pub include_compiler_output: bool,
|
||||
}
|
||||
|
||||
impl Arguments {
|
||||
/// Return the configured working directory with the following precedence:
|
||||
/// 1. `self.working_directory` if it was provided.
|
||||
/// 2. `self.temp_dir` if it it was provided
|
||||
/// 3. Panic.
|
||||
pub fn directory(&self) -> &Path {
|
||||
if let Some(path) = &self.working_directory {
|
||||
return path.as_path();
|
||||
}
|
||||
/// Represents the working directory that the program uses.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum WorkingDirectoryConfiguration {
|
||||
/// A temporary directory is used as the working directory. This will be removed when dropped.
|
||||
TemporaryDirectory(Arc<TempDir>),
|
||||
/// A directory with a path is used as the working directory.
|
||||
Path(PathBuf),
|
||||
}
|
||||
|
||||
if let Some(temp_dir) = &self.temp_dir {
|
||||
return temp_dir.path();
|
||||
}
|
||||
|
||||
panic!("should have a workdir configured")
|
||||
}
|
||||
|
||||
/// Return the number of concurrent tasks to run. This is provided via the
|
||||
/// `--number-concurrent-tasks` argument, and otherwise defaults to --number-of-nodes * 20.
|
||||
pub fn number_of_concurrent_tasks(&self) -> usize {
|
||||
self.number_concurrent_tasks
|
||||
.unwrap_or(20 * self.number_of_nodes)
|
||||
}
|
||||
|
||||
/// Try to parse `self.account` into a [PrivateKeySigner],
|
||||
/// panicing on error.
|
||||
pub fn wallet(&self) -> EthereumWallet {
|
||||
let signer = self
|
||||
.account
|
||||
.parse::<PrivateKeySigner>()
|
||||
.unwrap_or_else(|error| {
|
||||
panic!("private key '{}' parsing error: {error}", self.account);
|
||||
});
|
||||
EthereumWallet::new(signer)
|
||||
impl WorkingDirectoryConfiguration {
|
||||
pub fn as_path(&self) -> &Path {
|
||||
self.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Arguments {
|
||||
impl Deref for WorkingDirectoryConfiguration {
|
||||
type Target = Path;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.as_path()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<Path> for WorkingDirectoryConfiguration {
|
||||
fn as_ref(&self) -> &Path {
|
||||
match self {
|
||||
WorkingDirectoryConfiguration::TemporaryDirectory(temp_dir) => temp_dir.path(),
|
||||
WorkingDirectoryConfiguration::Path(path) => path.as_path(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for WorkingDirectoryConfiguration {
|
||||
fn default() -> Self {
|
||||
static TEMP_DIR: LazyLock<TempDir> = LazyLock::new(|| TempDir::new().unwrap());
|
||||
TempDir::new()
|
||||
.map(Arc::new)
|
||||
.map(Self::TemporaryDirectory)
|
||||
.expect("Failed to create the temporary directory")
|
||||
}
|
||||
}
|
||||
|
||||
let default = Arguments::parse_from(["retester"]);
|
||||
impl FromStr for WorkingDirectoryConfiguration {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
Arguments {
|
||||
temp_dir: Some(&TEMP_DIR),
|
||||
..default
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
"" => Ok(Default::default()),
|
||||
_ => Ok(Self::Path(PathBuf::from(s))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for WorkingDirectoryConfiguration {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
Display::fmt(&self.as_path().display(), f)
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for WorkingDirectoryConfiguration {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
self.as_path().serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_duration(s: &str) -> anyhow::Result<Duration> {
|
||||
u64::from_str(s)
|
||||
.map(Duration::from_millis)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
/// The Solidity compatible node implementation.
|
||||
///
|
||||
/// This describes the solutions to be tested against on a high level.
|
||||
#[derive(
|
||||
Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, ValueEnum, Serialize, Deserialize,
|
||||
Clone,
|
||||
Copy,
|
||||
Debug,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
ValueEnum,
|
||||
EnumString,
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
)]
|
||||
#[clap(rename_all = "lower")]
|
||||
#[strum(serialize_all = "kebab-case")]
|
||||
pub enum TestingPlatform {
|
||||
/// The go-ethereum reference full node EVM implementation.
|
||||
Geth,
|
||||
/// The kitchensink runtime provides the PolkaVM (PVM) based node implentation.
|
||||
/// The kitchensink runtime provides the PolkaVM (PVM) based node implementation.
|
||||
Kitchensink,
|
||||
}
|
||||
|
||||
impl Display for TestingPlatform {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Geth => f.write_str("geth"),
|
||||
Self::Kitchensink => f.write_str("revive"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,7 +35,6 @@ tracing-subscriber = { workspace = true }
|
||||
semver = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
temp-dir = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -18,7 +18,7 @@ use alloy::{
|
||||
primitives::Address,
|
||||
rpc::types::{TransactionRequest, trace::geth::DiffMode},
|
||||
};
|
||||
use anyhow::Context;
|
||||
use anyhow::Context as _;
|
||||
use futures::TryStreamExt;
|
||||
use indexmap::IndexMap;
|
||||
use revive_dt_format::traits::{ResolutionContext, ResolverApi};
|
||||
|
||||
+63
-98
@@ -5,7 +5,7 @@ use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
io::{BufWriter, Write, stderr},
|
||||
path::Path,
|
||||
sync::{Arc, LazyLock},
|
||||
sync::Arc,
|
||||
time::Instant,
|
||||
};
|
||||
|
||||
@@ -13,7 +13,7 @@ use alloy::{
|
||||
network::{Ethereum, TransactionBuilder},
|
||||
rpc::types::TransactionRequest,
|
||||
};
|
||||
use anyhow::Context;
|
||||
use anyhow::Context as _;
|
||||
use clap::Parser;
|
||||
use futures::stream;
|
||||
use futures::{Stream, StreamExt};
|
||||
@@ -24,15 +24,13 @@ use revive_dt_report::{
|
||||
TestSpecificReporter, TestSpecifier,
|
||||
};
|
||||
use serde_json::{Value, json};
|
||||
use temp_dir::TempDir;
|
||||
use tokio::try_join;
|
||||
use tracing::{debug, error, info, info_span, instrument};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
use tracing_subscriber::{EnvFilter, FmtSubscriber};
|
||||
|
||||
use revive_dt_common::{iterators::EitherIter, types::Mode};
|
||||
use revive_dt_compiler::{CompilerOutput, SolidityCompiler};
|
||||
use revive_dt_config::*;
|
||||
use revive_dt_config::{Context, *};
|
||||
use revive_dt_core::{
|
||||
Geth, Kitchensink, Platform,
|
||||
driver::{CaseDriver, CaseState},
|
||||
@@ -48,58 +46,8 @@ use revive_dt_node::{Node, pool::NodePool};
|
||||
|
||||
use crate::cached_compiler::CachedCompiler;
|
||||
|
||||
static TEMP_DIR: LazyLock<TempDir> = LazyLock::new(|| TempDir::new().unwrap());
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let (args, _guard) = init_cli().context("Failed to initialize CLI and tracing subscriber")?;
|
||||
info!(
|
||||
leader = args.leader.to_string(),
|
||||
follower = args.follower.to_string(),
|
||||
working_directory = %args.directory().display(),
|
||||
number_of_nodes = args.number_of_nodes,
|
||||
invalidate_compilation_cache = args.invalidate_compilation_cache,
|
||||
"Differential testing tool has been initialized"
|
||||
);
|
||||
|
||||
let (reporter, report_aggregator_task) = ReportAggregator::new(args.clone()).into_task();
|
||||
|
||||
let number_of_threads = args.number_of_threads;
|
||||
let body = async move {
|
||||
let tests = collect_corpora(&args)
|
||||
.context("Failed to collect corpus files from provided arguments")?
|
||||
.into_iter()
|
||||
.inspect(|(corpus, _)| {
|
||||
reporter
|
||||
.report_corpus_file_discovery_event(corpus.clone())
|
||||
.expect("Can't fail")
|
||||
})
|
||||
.flat_map(|(_, files)| files.into_iter())
|
||||
.inspect(|metadata_file| {
|
||||
reporter
|
||||
.report_metadata_file_discovery_event(
|
||||
metadata_file.metadata_file_path.clone(),
|
||||
metadata_file.content.clone(),
|
||||
)
|
||||
.expect("Can't fail")
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
execute_corpus(&args, &tests, reporter, report_aggregator_task)
|
||||
.await
|
||||
.context("Failed to execute corpus")?;
|
||||
Ok(())
|
||||
};
|
||||
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.worker_threads(number_of_threads)
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Failed building the Runtime")
|
||||
.block_on(body)
|
||||
}
|
||||
|
||||
fn init_cli() -> anyhow::Result<(Arguments, WorkerGuard)> {
|
||||
let (writer, guard) = tracing_appender::non_blocking::NonBlockingBuilder::default()
|
||||
let (writer, _guard) = tracing_appender::non_blocking::NonBlockingBuilder::default()
|
||||
.lossy(false)
|
||||
// Assuming that each line contains 255 characters and that each character is one byte, then
|
||||
// this means that our buffer is about 4GBs large.
|
||||
@@ -118,31 +66,51 @@ fn init_cli() -> anyhow::Result<(Arguments, WorkerGuard)> {
|
||||
tracing::subscriber::set_global_default(subscriber)?;
|
||||
info!("Differential testing tool is starting");
|
||||
|
||||
let mut args = Arguments::parse();
|
||||
let context = Context::try_parse()?;
|
||||
let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task();
|
||||
|
||||
if args.corpus.is_empty() {
|
||||
anyhow::bail!("no test corpus specified");
|
||||
}
|
||||
match context {
|
||||
Context::ExecuteTests(context) => {
|
||||
let tests = collect_corpora(&context)
|
||||
.context("Failed to collect corpus files from provided arguments")?
|
||||
.into_iter()
|
||||
.inspect(|(corpus, _)| {
|
||||
reporter
|
||||
.report_corpus_file_discovery_event(corpus.clone())
|
||||
.expect("Can't fail")
|
||||
})
|
||||
.flat_map(|(_, files)| files.into_iter())
|
||||
.inspect(|metadata_file| {
|
||||
reporter
|
||||
.report_metadata_file_discovery_event(
|
||||
metadata_file.metadata_file_path.clone(),
|
||||
metadata_file.content.clone(),
|
||||
)
|
||||
.expect("Can't fail")
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
match args.working_directory.as_ref() {
|
||||
Some(dir) => {
|
||||
if !dir.exists() {
|
||||
anyhow::bail!("workdir {} does not exist", dir.display());
|
||||
}
|
||||
}
|
||||
None => {
|
||||
args.temp_dir = Some(&TEMP_DIR);
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.worker_threads(context.concurrency_configuration.number_of_threads)
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Failed building the Runtime")
|
||||
.block_on(async move {
|
||||
execute_corpus(context, &tests, reporter, report_aggregator_task)
|
||||
.await
|
||||
.context("Failed to execute corpus")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Ok((args, guard))
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", name = "Collecting Corpora", skip_all)]
|
||||
fn collect_corpora(args: &Arguments) -> anyhow::Result<HashMap<Corpus, Vec<MetadataFile>>> {
|
||||
fn collect_corpora(
|
||||
context: &ExecutionContext,
|
||||
) -> anyhow::Result<HashMap<Corpus, Vec<MetadataFile>>> {
|
||||
let mut corpora = HashMap::new();
|
||||
|
||||
for path in &args.corpus {
|
||||
for path in &context.corpus {
|
||||
let span = info_span!("Processing corpus file", path = %path.display());
|
||||
let _guard = span.enter();
|
||||
|
||||
@@ -160,7 +128,7 @@ fn collect_corpora(args: &Arguments) -> anyhow::Result<HashMap<Corpus, Vec<Metad
|
||||
}
|
||||
|
||||
async fn run_driver<L, F>(
|
||||
args: &Arguments,
|
||||
context: ExecutionContext,
|
||||
metadata_files: &[MetadataFile],
|
||||
reporter: Reporter,
|
||||
report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
|
||||
@@ -171,20 +139,20 @@ where
|
||||
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
||||
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
||||
{
|
||||
let leader_nodes =
|
||||
NodePool::<L::Blockchain>::new(args).context("Failed to initialize leader node pool")?;
|
||||
let follower_nodes =
|
||||
NodePool::<F::Blockchain>::new(args).context("Failed to initialize follower node pool")?;
|
||||
let leader_nodes = NodePool::<L::Blockchain>::new(context.clone())
|
||||
.context("Failed to initialize leader node pool")?;
|
||||
let follower_nodes = NodePool::<F::Blockchain>::new(context.clone())
|
||||
.context("Failed to initialize follower node pool")?;
|
||||
|
||||
let tests_stream = tests_stream(
|
||||
args,
|
||||
&context,
|
||||
metadata_files.iter(),
|
||||
&leader_nodes,
|
||||
&follower_nodes,
|
||||
reporter.clone(),
|
||||
)
|
||||
.await;
|
||||
let driver_task = start_driver_task::<L, F>(args, tests_stream)
|
||||
let driver_task = start_driver_task::<L, F>(&context, tests_stream)
|
||||
.await
|
||||
.context("Failed to start driver task")?;
|
||||
let cli_reporting_task = start_cli_reporting_task(reporter);
|
||||
@@ -196,7 +164,7 @@ where
|
||||
}
|
||||
|
||||
async fn tests_stream<'a, L, F>(
|
||||
args: &Arguments,
|
||||
args: &ExecutionContext,
|
||||
metadata_files: impl IntoIterator<Item = &'a MetadataFile> + Clone,
|
||||
leader_node_pool: &'a NodePool<L::Blockchain>,
|
||||
follower_node_pool: &'a NodePool<F::Blockchain>,
|
||||
@@ -320,7 +288,7 @@ where
|
||||
}
|
||||
|
||||
async fn start_driver_task<'a, L, F>(
|
||||
args: &Arguments,
|
||||
context: &ExecutionContext,
|
||||
tests: impl Stream<Item = Test<'a, L, F>>,
|
||||
) -> anyhow::Result<impl Future<Output = ()>>
|
||||
where
|
||||
@@ -333,25 +301,22 @@ where
|
||||
{
|
||||
info!("Starting driver task");
|
||||
|
||||
let number_concurrent_tasks = args.number_of_concurrent_tasks();
|
||||
let cached_compiler = Arc::new(
|
||||
CachedCompiler::new(
|
||||
args.directory().join("compilation_cache"),
|
||||
args.invalidate_compilation_cache,
|
||||
context
|
||||
.working_directory
|
||||
.as_path()
|
||||
.join("compilation_cache"),
|
||||
context
|
||||
.compilation_configuration
|
||||
.invalidate_compilation_cache,
|
||||
)
|
||||
.await
|
||||
.context("Failed to initialize cached compiler")?,
|
||||
);
|
||||
|
||||
Ok(tests.for_each_concurrent(
|
||||
// We want to limit the concurrent tasks here because:
|
||||
//
|
||||
// 1. We don't want to overwhelm the nodes with too many requests, leading to responses timing out.
|
||||
// 2. We don't want to open too many files at once, leading to the OS running out of file descriptors.
|
||||
//
|
||||
// By default, we allow maximum of 10 ongoing requests per node in order to limit (1), and assume that
|
||||
// this number will automatically be low enough to address (2). The user can override this.
|
||||
Some(number_concurrent_tasks),
|
||||
context.concurrency_configuration.concurrency_limit(),
|
||||
move |test| {
|
||||
let cached_compiler = cached_compiler.clone();
|
||||
|
||||
@@ -387,8 +352,7 @@ where
|
||||
))
|
||||
}
|
||||
|
||||
#[allow(clippy::uninlined_format_args)]
|
||||
#[allow(irrefutable_let_patterns)]
|
||||
#[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
|
||||
async fn start_cli_reporting_task(reporter: Reporter) {
|
||||
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
|
||||
drop(reporter);
|
||||
@@ -710,17 +674,18 @@ where
|
||||
}
|
||||
|
||||
async fn execute_corpus(
|
||||
args: &Arguments,
|
||||
context: ExecutionContext,
|
||||
tests: &[MetadataFile],
|
||||
reporter: Reporter,
|
||||
report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
|
||||
) -> anyhow::Result<()> {
|
||||
match (&args.leader, &args.follower) {
|
||||
match (&context.leader, &context.follower) {
|
||||
(TestingPlatform::Geth, TestingPlatform::Kitchensink) => {
|
||||
run_driver::<Geth, Kitchensink>(args, tests, reporter, report_aggregator_task).await?
|
||||
run_driver::<Geth, Kitchensink>(context, tests, reporter, report_aggregator_task)
|
||||
.await?
|
||||
}
|
||||
(TestingPlatform::Geth, TestingPlatform::Geth) => {
|
||||
run_driver::<Geth, Geth>(args, tests, reporter, report_aggregator_task).await?
|
||||
run_driver::<Geth, Geth>(context, tests, reporter, report_aggregator_task).await?
|
||||
}
|
||||
_ => unimplemented!(),
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ use alloy::{
|
||||
rpc::types::TransactionRequest,
|
||||
};
|
||||
use alloy_primitives::{FixedBytes, utils::parse_units};
|
||||
use anyhow::Context;
|
||||
use anyhow::Context as _;
|
||||
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, stream};
|
||||
use semver::VersionReq;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use anyhow::Context;
|
||||
use anyhow::Context as _;
|
||||
use regex::Regex;
|
||||
use revive_dt_common::iterators::EitherIter;
|
||||
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
|
||||
|
||||
+46
-65
@@ -17,9 +17,7 @@ use alloy::{
|
||||
eips::BlockNumberOrTag,
|
||||
genesis::{Genesis, GenesisAccount},
|
||||
network::{Ethereum, EthereumWallet, NetworkWallet},
|
||||
primitives::{
|
||||
Address, BlockHash, BlockNumber, BlockTimestamp, FixedBytes, StorageKey, TxHash, U256,
|
||||
},
|
||||
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
|
||||
providers::{
|
||||
Provider, ProviderBuilder,
|
||||
ext::DebugApi,
|
||||
@@ -29,9 +27,8 @@ use alloy::{
|
||||
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
|
||||
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
|
||||
},
|
||||
signers::local::PrivateKeySigner,
|
||||
};
|
||||
use anyhow::Context;
|
||||
use anyhow::Context as _;
|
||||
use revive_common::EVMVersion;
|
||||
use tracing::{Instrument, instrument};
|
||||
|
||||
@@ -39,7 +36,7 @@ use revive_dt_common::{
|
||||
fs::clear_directory,
|
||||
futures::{PollingWaitBehavior, poll},
|
||||
};
|
||||
use revive_dt_config::Arguments;
|
||||
use revive_dt_config::*;
|
||||
use revive_dt_format::traits::ResolverApi;
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
|
||||
@@ -64,7 +61,7 @@ pub struct GethNode {
|
||||
geth: PathBuf,
|
||||
id: u32,
|
||||
handle: Option<Child>,
|
||||
start_timeout: u64,
|
||||
start_timeout: Duration,
|
||||
wallet: Arc<EthereumWallet>,
|
||||
nonce_manager: CachedNonceManager,
|
||||
chain_id_filler: ChainIdFiller,
|
||||
@@ -97,7 +94,7 @@ impl GethNode {
|
||||
|
||||
/// Create the node directory and call `geth init` to configure the genesis.
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn init(&mut self, genesis: String) -> anyhow::Result<&mut Self> {
|
||||
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
|
||||
let _ = clear_directory(&self.base_directory);
|
||||
let _ = clear_directory(&self.logs_directory);
|
||||
|
||||
@@ -106,8 +103,6 @@ impl GethNode {
|
||||
create_dir_all(&self.logs_directory)
|
||||
.context("Failed to create logs directory for geth node")?;
|
||||
|
||||
let mut genesis = serde_json::from_str::<Genesis>(&genesis)
|
||||
.context("Failed to deserialize geth genesis JSON")?;
|
||||
for signer_address in
|
||||
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
|
||||
{
|
||||
@@ -240,7 +235,7 @@ impl GethNode {
|
||||
.open(self.geth_stderr_log_file_path())
|
||||
.context("Failed to open geth stderr logs file for readiness check")?;
|
||||
|
||||
let maximum_wait_time = Duration::from_millis(self.start_timeout);
|
||||
let maximum_wait_time = self.start_timeout;
|
||||
let mut stderr = BufReader::new(logs_file).lines();
|
||||
let mut lines = vec![];
|
||||
loop {
|
||||
@@ -256,7 +251,7 @@ impl GethNode {
|
||||
if Instant::now().duration_since(start_time) > maximum_wait_time {
|
||||
anyhow::bail!(
|
||||
"Timeout in starting geth: took longer than {}ms. stdout:\n\n{}\n",
|
||||
self.start_timeout,
|
||||
self.start_timeout.as_millis(),
|
||||
lines.join("\n")
|
||||
);
|
||||
}
|
||||
@@ -556,30 +551,40 @@ impl ResolverApi for GethNode {
|
||||
}
|
||||
|
||||
impl Node for GethNode {
|
||||
fn new(config: &Arguments) -> Self {
|
||||
let geth_directory = config.directory().join(Self::BASE_DIRECTORY);
|
||||
fn new(
|
||||
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||
+ AsRef<ConcurrencyConfiguration>
|
||||
+ AsRef<GenesisConfiguration>
|
||||
+ AsRef<WalletConfiguration>
|
||||
+ AsRef<GethConfiguration>
|
||||
+ AsRef<KitchensinkConfiguration>
|
||||
+ AsRef<ReviveDevNodeConfiguration>
|
||||
+ AsRef<EthRpcConfiguration>
|
||||
+ Clone,
|
||||
) -> Self {
|
||||
let working_directory_configuration =
|
||||
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
||||
let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context);
|
||||
let geth_configuration = AsRef::<GethConfiguration>::as_ref(&context);
|
||||
|
||||
let geth_directory = working_directory_configuration
|
||||
.as_path()
|
||||
.join(Self::BASE_DIRECTORY);
|
||||
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||
let base_directory = geth_directory.join(id.to_string());
|
||||
|
||||
let mut wallet = config.wallet();
|
||||
for signer in (1..=config.private_keys_to_add)
|
||||
.map(|id| U256::from(id))
|
||||
.map(|id| id.to_be_bytes::<32>())
|
||||
.map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap())
|
||||
{
|
||||
wallet.register_signer(signer);
|
||||
}
|
||||
let wallet = wallet_configuration.wallet();
|
||||
|
||||
Self {
|
||||
connection_string: base_directory.join(Self::IPC_FILE).display().to_string(),
|
||||
data_directory: base_directory.join(Self::DATA_DIRECTORY),
|
||||
logs_directory: base_directory.join(Self::LOGS_DIRECTORY),
|
||||
base_directory,
|
||||
geth: config.geth.clone(),
|
||||
geth: geth_configuration.path.clone(),
|
||||
id,
|
||||
handle: None,
|
||||
start_timeout: config.geth_start_timeout,
|
||||
wallet: Arc::new(wallet),
|
||||
start_timeout: geth_configuration.start_timeout_ms,
|
||||
wallet: wallet.clone(),
|
||||
chain_id_filler: Default::default(),
|
||||
nonce_manager: Default::default(),
|
||||
// We know that we only need to be storing 2 files so we can specify that when creating
|
||||
@@ -621,7 +626,7 @@ impl Node for GethNode {
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn spawn(&mut self, genesis: String) -> anyhow::Result<()> {
|
||||
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
|
||||
self.init(genesis)?.spawn_process()?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -662,49 +667,25 @@ impl Drop for GethNode {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use revive_dt_config::Arguments;
|
||||
|
||||
use temp_dir::TempDir;
|
||||
|
||||
use crate::{GENESIS_JSON, Node};
|
||||
|
||||
use super::*;
|
||||
|
||||
fn test_config() -> (Arguments, TempDir) {
|
||||
let mut config = Arguments::default();
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
config.working_directory = temp_dir.path().to_path_buf().into();
|
||||
|
||||
(config, temp_dir)
|
||||
fn test_config() -> ExecutionContext {
|
||||
ExecutionContext::default()
|
||||
}
|
||||
|
||||
fn new_node() -> (GethNode, TempDir) {
|
||||
let (args, temp_dir) = test_config();
|
||||
let mut node = GethNode::new(&args);
|
||||
node.init(GENESIS_JSON.to_owned())
|
||||
fn new_node() -> (ExecutionContext, GethNode) {
|
||||
let context = test_config();
|
||||
let mut node = GethNode::new(&context);
|
||||
node.init(context.genesis_configuration.genesis().unwrap().clone())
|
||||
.expect("Failed to initialize the node")
|
||||
.spawn_process()
|
||||
.expect("Failed to spawn the node process");
|
||||
(node, temp_dir)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn init_works() {
|
||||
GethNode::new(&test_config().0)
|
||||
.init(GENESIS_JSON.to_string())
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn spawn_works() {
|
||||
GethNode::new(&test_config().0)
|
||||
.spawn(GENESIS_JSON.to_string())
|
||||
.unwrap();
|
||||
(context, node)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn version_works() {
|
||||
let version = GethNode::new(&test_config().0).version().unwrap();
|
||||
let version = GethNode::new(&test_config()).version().unwrap();
|
||||
assert!(
|
||||
version.starts_with("geth version"),
|
||||
"expected version string, got: '{version}'"
|
||||
@@ -714,7 +695,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn can_get_chain_id_from_node() {
|
||||
// Arrange
|
||||
let (node, _temp_dir) = new_node();
|
||||
let (_context, node) = new_node();
|
||||
|
||||
// Act
|
||||
let chain_id = node.chain_id().await;
|
||||
@@ -727,7 +708,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn can_get_gas_limit_from_node() {
|
||||
// Arrange
|
||||
let (node, _temp_dir) = new_node();
|
||||
let (_context, node) = new_node();
|
||||
|
||||
// Act
|
||||
let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest).await;
|
||||
@@ -740,7 +721,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn can_get_coinbase_from_node() {
|
||||
// Arrange
|
||||
let (node, _temp_dir) = new_node();
|
||||
let (_context, node) = new_node();
|
||||
|
||||
// Act
|
||||
let coinbase = node.block_coinbase(BlockNumberOrTag::Latest).await;
|
||||
@@ -753,7 +734,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn can_get_block_difficulty_from_node() {
|
||||
// Arrange
|
||||
let (node, _temp_dir) = new_node();
|
||||
let (_context, node) = new_node();
|
||||
|
||||
// Act
|
||||
let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest).await;
|
||||
@@ -766,7 +747,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn can_get_block_hash_from_node() {
|
||||
// Arrange
|
||||
let (node, _temp_dir) = new_node();
|
||||
let (_context, node) = new_node();
|
||||
|
||||
// Act
|
||||
let block_hash = node.block_hash(BlockNumberOrTag::Latest).await;
|
||||
@@ -778,7 +759,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn can_get_block_timestamp_from_node() {
|
||||
// Arrange
|
||||
let (node, _temp_dir) = new_node();
|
||||
let (_context, node) = new_node();
|
||||
|
||||
// Act
|
||||
let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest).await;
|
||||
@@ -790,7 +771,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn can_get_block_number_from_node() {
|
||||
// Arrange
|
||||
let (node, _temp_dir) = new_node();
|
||||
let (_context, node) = new_node();
|
||||
|
||||
// Act
|
||||
let block_number = node.last_block_number().await;
|
||||
|
||||
@@ -19,8 +19,8 @@ use alloy::{
|
||||
TransactionBuilderError, UnbuiltTransactionError,
|
||||
},
|
||||
primitives::{
|
||||
Address, B64, B256, BlockHash, BlockNumber, BlockTimestamp, Bloom, Bytes, FixedBytes,
|
||||
StorageKey, TxHash, U256,
|
||||
Address, B64, B256, BlockHash, BlockNumber, BlockTimestamp, Bloom, Bytes, StorageKey,
|
||||
TxHash, U256,
|
||||
},
|
||||
providers::{
|
||||
Provider, ProviderBuilder,
|
||||
@@ -32,9 +32,8 @@ use alloy::{
|
||||
eth::{Block, Header, Transaction},
|
||||
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
|
||||
},
|
||||
signers::local::PrivateKeySigner,
|
||||
};
|
||||
use anyhow::Context;
|
||||
use anyhow::Context as _;
|
||||
use revive_common::EVMVersion;
|
||||
use revive_dt_common::fs::clear_directory;
|
||||
use revive_dt_format::traits::ResolverApi;
|
||||
@@ -43,7 +42,7 @@ use serde_json::{Value as JsonValue, json};
|
||||
use sp_core::crypto::Ss58Codec;
|
||||
use sp_runtime::AccountId32;
|
||||
|
||||
use revive_dt_config::Arguments;
|
||||
use revive_dt_config::*;
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
|
||||
use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE};
|
||||
@@ -92,7 +91,7 @@ impl KitchensinkNode {
|
||||
const PROXY_STDOUT_LOG_FILE_NAME: &str = "proxy_stdout.log";
|
||||
const PROXY_STDERR_LOG_FILE_NAME: &str = "proxy_stderr.log";
|
||||
|
||||
fn init(&mut self, genesis: &str) -> anyhow::Result<&mut Self> {
|
||||
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
|
||||
let _ = clear_directory(&self.base_directory);
|
||||
let _ = clear_directory(&self.logs_directory);
|
||||
|
||||
@@ -153,8 +152,6 @@ impl KitchensinkNode {
|
||||
})
|
||||
.collect();
|
||||
let mut eth_balances = {
|
||||
let mut genesis = serde_json::from_str::<Genesis>(genesis)
|
||||
.context("Failed to deserialize EVM genesis JSON for kitchensink")?;
|
||||
for signer_address in
|
||||
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
|
||||
{
|
||||
@@ -586,35 +583,47 @@ impl ResolverApi for KitchensinkNode {
|
||||
}
|
||||
|
||||
impl Node for KitchensinkNode {
|
||||
fn new(config: &Arguments) -> Self {
|
||||
let kitchensink_directory = config.directory().join(Self::BASE_DIRECTORY);
|
||||
fn new(
|
||||
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||
+ AsRef<ConcurrencyConfiguration>
|
||||
+ AsRef<GenesisConfiguration>
|
||||
+ AsRef<WalletConfiguration>
|
||||
+ AsRef<GethConfiguration>
|
||||
+ AsRef<KitchensinkConfiguration>
|
||||
+ AsRef<ReviveDevNodeConfiguration>
|
||||
+ AsRef<EthRpcConfiguration>
|
||||
+ Clone,
|
||||
) -> Self {
|
||||
let kitchensink_configuration = AsRef::<KitchensinkConfiguration>::as_ref(&context);
|
||||
let dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
|
||||
let eth_rpc_configuration = AsRef::<EthRpcConfiguration>::as_ref(&context);
|
||||
let working_directory_configuration =
|
||||
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
||||
let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context);
|
||||
|
||||
let kitchensink_directory = working_directory_configuration
|
||||
.as_path()
|
||||
.join(Self::BASE_DIRECTORY);
|
||||
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||
let base_directory = kitchensink_directory.join(id.to_string());
|
||||
let logs_directory = base_directory.join(Self::LOGS_DIRECTORY);
|
||||
|
||||
let mut wallet = config.wallet();
|
||||
for signer in (1..=config.private_keys_to_add)
|
||||
.map(|id| U256::from(id))
|
||||
.map(|id| id.to_be_bytes::<32>())
|
||||
.map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap())
|
||||
{
|
||||
wallet.register_signer(signer);
|
||||
}
|
||||
let wallet = wallet_configuration.wallet();
|
||||
|
||||
Self {
|
||||
id,
|
||||
substrate_binary: config.kitchensink.clone(),
|
||||
dev_node_binary: config.revive_dev_node.clone(),
|
||||
eth_proxy_binary: config.eth_proxy.clone(),
|
||||
substrate_binary: kitchensink_configuration.path.clone(),
|
||||
dev_node_binary: dev_node_configuration.path.clone(),
|
||||
eth_proxy_binary: eth_rpc_configuration.path.clone(),
|
||||
rpc_url: String::new(),
|
||||
base_directory,
|
||||
logs_directory,
|
||||
process_substrate: None,
|
||||
process_proxy: None,
|
||||
wallet: Arc::new(wallet),
|
||||
wallet: wallet.clone(),
|
||||
chain_id_filler: Default::default(),
|
||||
nonce_manager: Default::default(),
|
||||
use_kitchensink_not_dev_node: config.use_kitchensink_not_dev_node,
|
||||
use_kitchensink_not_dev_node: kitchensink_configuration.use_kitchensink,
|
||||
// We know that we only need to be storing 4 files so we can specify that when creating
|
||||
// the vector. It's the stdout and stderr of the substrate-node and the eth-rpc.
|
||||
logs_file_to_flush: Vec::with_capacity(4),
|
||||
@@ -655,8 +664,8 @@ impl Node for KitchensinkNode {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn spawn(&mut self, genesis: String) -> anyhow::Result<()> {
|
||||
self.init(&genesis)?.spawn_process()
|
||||
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
|
||||
self.init(genesis)?.spawn_process()
|
||||
}
|
||||
|
||||
fn version(&self) -> anyhow::Result<String> {
|
||||
@@ -1121,25 +1130,20 @@ impl BlockHeader for KitchenSinkHeader {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use alloy::rpc::types::TransactionRequest;
|
||||
use revive_dt_config::Arguments;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::{LazyLock, Mutex};
|
||||
|
||||
use std::fs;
|
||||
|
||||
use super::*;
|
||||
use crate::{GENESIS_JSON, Node};
|
||||
use crate::Node;
|
||||
|
||||
fn test_config() -> Arguments {
|
||||
Arguments {
|
||||
kitchensink: PathBuf::from("substrate-node"),
|
||||
eth_proxy: PathBuf::from("eth-rpc"),
|
||||
use_kitchensink_not_dev_node: true,
|
||||
..Default::default()
|
||||
}
|
||||
fn test_config() -> ExecutionContext {
|
||||
let mut context = ExecutionContext::default();
|
||||
context.kitchensink_configuration.use_kitchensink = true;
|
||||
context
|
||||
}
|
||||
|
||||
fn new_node() -> (KitchensinkNode, Arguments) {
|
||||
fn new_node() -> (ExecutionContext, KitchensinkNode) {
|
||||
// Note: When we run the tests in the CI we found that if they're all
|
||||
// run in parallel then the CI is unable to start all of the nodes in
|
||||
// time and their start up times-out. Therefore, we want all of the
|
||||
@@ -1158,32 +1162,36 @@ mod tests {
|
||||
static NODE_START_MUTEX: Mutex<()> = Mutex::new(());
|
||||
let _guard = NODE_START_MUTEX.lock().unwrap();
|
||||
|
||||
let args = test_config();
|
||||
let mut node = KitchensinkNode::new(&args);
|
||||
node.init(GENESIS_JSON)
|
||||
let context = test_config();
|
||||
let mut node = KitchensinkNode::new(&context);
|
||||
node.init(context.genesis_configuration.genesis().unwrap().clone())
|
||||
.expect("Failed to initialize the node")
|
||||
.spawn_process()
|
||||
.expect("Failed to spawn the node process");
|
||||
(node, args)
|
||||
(context, node)
|
||||
}
|
||||
|
||||
/// A shared node that multiple tests can use. It starts up once.
|
||||
fn shared_node() -> &'static KitchensinkNode {
|
||||
static NODE: LazyLock<(KitchensinkNode, Arguments)> = LazyLock::new(|| {
|
||||
let (node, args) = new_node();
|
||||
(node, args)
|
||||
static NODE: LazyLock<(ExecutionContext, KitchensinkNode)> = LazyLock::new(|| {
|
||||
let (context, node) = new_node();
|
||||
(context, node)
|
||||
});
|
||||
&NODE.0
|
||||
&NODE.1
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
|
||||
// Arrange
|
||||
let (node, args) = new_node();
|
||||
let (context, node) = new_node();
|
||||
|
||||
let provider = node.provider().await.expect("Failed to create provider");
|
||||
|
||||
let account_address = args.wallet().default_signer().address();
|
||||
let account_address = context
|
||||
.wallet_configuration
|
||||
.wallet()
|
||||
.default_signer()
|
||||
.address();
|
||||
let transaction = TransactionRequest::default()
|
||||
.to(account_address)
|
||||
.value(U256::from(100_000_000_000_000u128));
|
||||
@@ -1217,7 +1225,9 @@ mod tests {
|
||||
let mut dummy_node = KitchensinkNode::new(&test_config());
|
||||
|
||||
// Call `init()`
|
||||
dummy_node.init(genesis_content).expect("init failed");
|
||||
dummy_node
|
||||
.init(serde_json::from_str(genesis_content).unwrap())
|
||||
.expect("init failed");
|
||||
|
||||
// Check that the patched chainspec file was generated
|
||||
let final_chainspec_path = dummy_node
|
||||
@@ -1327,20 +1337,10 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn spawn_works() {
|
||||
let config = test_config();
|
||||
|
||||
let mut node = KitchensinkNode::new(&config);
|
||||
|
||||
node.spawn(GENESIS_JSON.to_string()).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn version_works() {
|
||||
let config = test_config();
|
||||
let node = shared_node();
|
||||
|
||||
let node = KitchensinkNode::new(&config);
|
||||
let version = node.version().unwrap();
|
||||
|
||||
assert!(
|
||||
@@ -1351,9 +1351,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn eth_rpc_version_works() {
|
||||
let config = test_config();
|
||||
let node = shared_node();
|
||||
|
||||
let node = KitchensinkNode::new(&config);
|
||||
let version = node.eth_rpc_version().unwrap();
|
||||
|
||||
assert!(
|
||||
|
||||
+14
-6
@@ -1,7 +1,8 @@
|
||||
//! This crate implements the testing nodes.
|
||||
|
||||
use alloy::genesis::Genesis;
|
||||
use revive_common::EVMVersion;
|
||||
use revive_dt_config::Arguments;
|
||||
use revive_dt_config::*;
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
|
||||
pub mod common;
|
||||
@@ -10,13 +11,20 @@ pub mod geth;
|
||||
pub mod kitchensink;
|
||||
pub mod pool;
|
||||
|
||||
/// The default genesis configuration.
|
||||
pub const GENESIS_JSON: &str = include_str!("../../../genesis.json");
|
||||
|
||||
/// An abstract interface for testing nodes.
|
||||
pub trait Node: EthereumNode {
|
||||
/// Create a new uninitialized instance.
|
||||
fn new(config: &Arguments) -> Self;
|
||||
fn new(
|
||||
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||
+ AsRef<ConcurrencyConfiguration>
|
||||
+ AsRef<GenesisConfiguration>
|
||||
+ AsRef<WalletConfiguration>
|
||||
+ AsRef<GethConfiguration>
|
||||
+ AsRef<KitchensinkConfiguration>
|
||||
+ AsRef<ReviveDevNodeConfiguration>
|
||||
+ AsRef<EthRpcConfiguration>
|
||||
+ Clone,
|
||||
) -> Self;
|
||||
|
||||
/// Returns the identifier of the node.
|
||||
fn id(&self) -> usize;
|
||||
@@ -24,7 +32,7 @@ pub trait Node: EthereumNode {
|
||||
/// Spawns a node configured according to the genesis json.
|
||||
///
|
||||
/// Blocking until it's ready to accept transactions.
|
||||
fn spawn(&mut self, genesis: String) -> anyhow::Result<()>;
|
||||
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()>;
|
||||
|
||||
/// Prune the node instance and related data.
|
||||
///
|
||||
|
||||
+42
-14
@@ -5,10 +5,13 @@ use std::{
|
||||
thread,
|
||||
};
|
||||
|
||||
use revive_dt_common::cached_fs::read_to_string;
|
||||
|
||||
use anyhow::Context;
|
||||
use revive_dt_config::Arguments;
|
||||
use alloy::genesis::Genesis;
|
||||
use anyhow::Context as _;
|
||||
use revive_dt_config::{
|
||||
ConcurrencyConfiguration, EthRpcConfiguration, GenesisConfiguration, GethConfiguration,
|
||||
KitchensinkConfiguration, ReviveDevNodeConfiguration, WalletConfiguration,
|
||||
WorkingDirectoryConfiguration,
|
||||
};
|
||||
use tracing::info;
|
||||
|
||||
use crate::Node;
|
||||
@@ -25,18 +28,31 @@ where
|
||||
T: Node + Send + 'static,
|
||||
{
|
||||
/// Create a new Pool. This will start as many nodes as there are workers in `config`.
|
||||
pub fn new(config: &Arguments) -> anyhow::Result<Self> {
|
||||
let nodes = config.number_of_nodes;
|
||||
let genesis = read_to_string(&config.genesis_file).context(format!(
|
||||
"can not read genesis file: {}",
|
||||
config.genesis_file.display()
|
||||
))?;
|
||||
pub fn new(
|
||||
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||
+ AsRef<ConcurrencyConfiguration>
|
||||
+ AsRef<GenesisConfiguration>
|
||||
+ AsRef<WalletConfiguration>
|
||||
+ AsRef<GethConfiguration>
|
||||
+ AsRef<KitchensinkConfiguration>
|
||||
+ AsRef<ReviveDevNodeConfiguration>
|
||||
+ AsRef<EthRpcConfiguration>
|
||||
+ Send
|
||||
+ Sync
|
||||
+ Clone
|
||||
+ 'static,
|
||||
) -> anyhow::Result<Self> {
|
||||
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
|
||||
let nodes = concurrency_configuration.number_of_nodes;
|
||||
let genesis = genesis_configuration.genesis()?;
|
||||
|
||||
let mut handles = Vec::with_capacity(nodes);
|
||||
for _ in 0..nodes {
|
||||
let config = config.clone();
|
||||
let context = context.clone();
|
||||
let genesis = genesis.clone();
|
||||
handles.push(thread::spawn(move || spawn_node::<T>(&config, genesis)));
|
||||
handles.push(thread::spawn(move || spawn_node::<T>(context, genesis)));
|
||||
}
|
||||
|
||||
let mut nodes = Vec::with_capacity(nodes);
|
||||
@@ -64,8 +80,20 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
fn spawn_node<T: Node + Send>(args: &Arguments, genesis: String) -> anyhow::Result<T> {
|
||||
let mut node = T::new(args);
|
||||
fn spawn_node<T: Node + Send>(
|
||||
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||
+ AsRef<ConcurrencyConfiguration>
|
||||
+ AsRef<GenesisConfiguration>
|
||||
+ AsRef<WalletConfiguration>
|
||||
+ AsRef<GethConfiguration>
|
||||
+ AsRef<KitchensinkConfiguration>
|
||||
+ AsRef<ReviveDevNodeConfiguration>
|
||||
+ AsRef<EthRpcConfiguration>
|
||||
+ Clone
|
||||
+ 'static,
|
||||
genesis: Genesis,
|
||||
) -> anyhow::Result<T> {
|
||||
let mut node = T::new(context);
|
||||
info!(
|
||||
id = node.id(),
|
||||
connection_string = node.connection_string(),
|
||||
|
||||
@@ -12,7 +12,7 @@ use alloy_primitives::Address;
|
||||
use anyhow::{Context as _, Result};
|
||||
use indexmap::IndexMap;
|
||||
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
|
||||
use revive_dt_config::{Arguments, TestingPlatform};
|
||||
use revive_dt_config::{Context, TestingPlatform};
|
||||
use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance};
|
||||
use semver::Version;
|
||||
use serde::Serialize;
|
||||
@@ -36,11 +36,11 @@ pub struct ReportAggregator {
|
||||
}
|
||||
|
||||
impl ReportAggregator {
|
||||
pub fn new(config: Arguments) -> Self {
|
||||
pub fn new(context: Context) -> Self {
|
||||
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
|
||||
let (listener_tx, _) = channel::<ReporterEvent>(1024);
|
||||
Self {
|
||||
report: Report::new(config),
|
||||
report: Report::new(context),
|
||||
remaining_cases: Default::default(),
|
||||
runner_tx: Some(runner_tx),
|
||||
runner_rx,
|
||||
@@ -121,7 +121,12 @@ impl ReportAggregator {
|
||||
file_name.push_str(".json");
|
||||
file_name
|
||||
};
|
||||
let file_path = self.report.config.directory().join(file_name);
|
||||
let file_path = self
|
||||
.report
|
||||
.context
|
||||
.working_directory_configuration()
|
||||
.as_path()
|
||||
.join(file_name);
|
||||
let file = OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
@@ -282,8 +287,16 @@ impl ReportAggregator {
|
||||
&mut self,
|
||||
event: PreLinkContractsCompilationSucceededEvent,
|
||||
) {
|
||||
let include_input = self.report.config.report_include_compiler_input;
|
||||
let include_output = self.report.config.report_include_compiler_output;
|
||||
let include_input = self
|
||||
.report
|
||||
.context
|
||||
.report_configuration()
|
||||
.include_compiler_input;
|
||||
let include_output = self
|
||||
.report
|
||||
.context
|
||||
.report_configuration()
|
||||
.include_compiler_output;
|
||||
|
||||
let execution_information = self.execution_information(&event.execution_specifier);
|
||||
|
||||
@@ -311,8 +324,16 @@ impl ReportAggregator {
|
||||
&mut self,
|
||||
event: PostLinkContractsCompilationSucceededEvent,
|
||||
) {
|
||||
let include_input = self.report.config.report_include_compiler_input;
|
||||
let include_output = self.report.config.report_include_compiler_output;
|
||||
let include_input = self
|
||||
.report
|
||||
.context
|
||||
.report_configuration()
|
||||
.include_compiler_input;
|
||||
let include_output = self
|
||||
.report
|
||||
.context
|
||||
.report_configuration()
|
||||
.include_compiler_output;
|
||||
|
||||
let execution_information = self.execution_information(&event.execution_specifier);
|
||||
|
||||
@@ -406,12 +427,8 @@ impl ReportAggregator {
|
||||
#[serde_as]
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct Report {
|
||||
/// The configuration that the tool was started up with.
|
||||
pub config: Arguments,
|
||||
/// The platform of the leader chain.
|
||||
pub leader_platform: TestingPlatform,
|
||||
/// The platform of the follower chain.
|
||||
pub follower_platform: TestingPlatform,
|
||||
/// The context that the tool was started up with.
|
||||
pub context: Context,
|
||||
/// The list of corpus files that the tool found.
|
||||
pub corpora: Vec<Corpus>,
|
||||
/// The list of metadata files that were found by the tool.
|
||||
@@ -423,11 +440,9 @@ pub struct Report {
|
||||
}
|
||||
|
||||
impl Report {
|
||||
pub fn new(config: Arguments) -> Self {
|
||||
pub fn new(context: Context) -> Self {
|
||||
Self {
|
||||
leader_platform: config.leader,
|
||||
follower_platform: config.follower,
|
||||
config,
|
||||
context,
|
||||
corpora: Default::default(),
|
||||
metadata_files: Default::default(),
|
||||
test_case_information: Default::default(),
|
||||
@@ -517,12 +532,12 @@ pub enum CompilationStatus {
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
compiler_path: PathBuf,
|
||||
/// The input provided to the compiler to compile the contracts. This is only included if
|
||||
/// the appropriate flag is set in the CLI configuration and if the contracts were not
|
||||
/// cached and the compiler was invoked.
|
||||
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
|
||||
/// the compiler was invoked.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
compiler_input: Option<CompilerInput>,
|
||||
/// The output of the compiler. This is only included if the appropriate flag is set in the
|
||||
/// CLI configurations.
|
||||
/// CLI contexts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
compiler_output: Option<CompilerOutput>,
|
||||
},
|
||||
@@ -537,8 +552,8 @@ pub enum CompilationStatus {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
compiler_path: Option<PathBuf>,
|
||||
/// The input provided to the compiler to compile the contracts. This is only included if
|
||||
/// the appropriate flag is set in the CLI configuration and if the contracts were not
|
||||
/// cached and the compiler was invoked.
|
||||
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
|
||||
/// the compiler was invoked.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
compiler_input: Option<CompilerInput>,
|
||||
},
|
||||
|
||||
@@ -13,7 +13,7 @@ use semver::Version;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::download::SolcDownloader;
|
||||
use anyhow::Context;
|
||||
use anyhow::Context as _;
|
||||
|
||||
pub const SOLC_CACHE_DIRECTORY: &str = "solc";
|
||||
pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default);
|
||||
|
||||
@@ -11,7 +11,7 @@ use semver::Version;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
use crate::list::List;
|
||||
use anyhow::Context;
|
||||
use anyhow::Context as _;
|
||||
|
||||
pub static LIST_CACHE: LazyLock<Mutex<HashMap<&'static str, List>>> =
|
||||
LazyLock::new(Default::default);
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::Context as _;
|
||||
use cache::get_or_download;
|
||||
use download::SolcDownloader;
|
||||
|
||||
|
||||
+6
-6
@@ -89,13 +89,13 @@ echo "This may take a while..."
|
||||
echo ""
|
||||
|
||||
# Run the tool
|
||||
RUST_LOG="error" cargo run --release -- \
|
||||
RUST_LOG="error" cargo run --release -- execute-tests \
|
||||
--corpus "$CORPUS_FILE" \
|
||||
--workdir "$WORKDIR" \
|
||||
--number-of-nodes 5 \
|
||||
--kitchensink "$SUBSTRATE_NODE_BIN" \
|
||||
--revive-dev-node "$REVIVE_DEV_NODE_BIN" \
|
||||
--eth_proxy "$ETH_RPC_BIN" \
|
||||
--working-directory "$WORKDIR" \
|
||||
--concurrency.number-of-nodes 5 \
|
||||
--kitchensink.path "$SUBSTRATE_NODE_BIN" \
|
||||
--revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
|
||||
--eth-rpc.path "$ETH_RPC_BIN" \
|
||||
> logs.log \
|
||||
2> output.log
|
||||
|
||||
|
||||
Reference in New Issue
Block a user