From 98b62d705fc20246b6d224f8e3e0023deb463822 Mon Sep 17 00:00:00 2001 From: Omar Date: Sat, 20 Sep 2025 00:59:28 +0300 Subject: [PATCH] Make our traits object safe and implement the `ReviveDevNodePolkaVMResolc` target. (#159) * Generate schema for the metadata file * Groundwork for dyn traits * Make the ethereum node trait object compatible * Allow for compilers to be created in the dyn trait * Add more identifiers to the platform * Implement the dyn compiler trait for compilers * Support the dyn compiler in the builder pattern * Introduce a geth platform * Provide a common node implementation for substrate chains * Add all of the platforms that we support * Add a way to convert platform identifier into a platform * Replace infra with the dyn infra * Remoe all references to leader and follower * Remove the old traits * Remove an un-needed dependency * Update the default values for the platforms * Final set of renames * Update the default values of the cli * Update tests --- Cargo.lock | 6 + README.md | 187 +++-- crates/common/Cargo.toml | 3 + crates/common/src/types/identifiers.rs | 124 +++ crates/common/src/types/mod.rs | 2 + crates/compiler/src/lib.rs | 26 +- crates/compiler/src/revive_resolc.rs | 369 ++++----- crates/compiler/src/solc.rs | 322 ++++---- crates/compiler/tests/lib.rs | 8 +- crates/config/Cargo.toml | 2 + crates/config/src/lib.rs | 145 +++- crates/core/src/cached_compiler.rs | 28 +- crates/core/src/driver/mod.rs | 182 ++--- crates/core/src/lib.rs | 375 ++++++++- crates/core/src/main.rs | 648 +++++++-------- crates/core/src/pool.rs | 52 ++ crates/format/src/input.rs | 79 +- crates/format/src/metadata.rs | 8 +- crates/format/src/traits.rs | 39 +- crates/node-interaction/Cargo.toml | 4 + crates/node-interaction/src/lib.rs | 32 +- crates/node/src/constants.rs | 4 +- crates/node/src/geth.rs | 603 +++++++------- crates/node/src/lib.rs | 31 +- crates/node/src/pool.rs | 110 --- .../node/src/{kitchensink.rs => substrate.rs} | 747 ++++++++++-------- crates/report/src/aggregator.rs | 56 +- crates/report/src/common.rs | 12 +- crates/report/src/runner_event.rs | 27 +- run_tests.sh | 5 +- 30 files changed, 2412 insertions(+), 1824 deletions(-) create mode 100644 crates/common/src/types/identifiers.rs create mode 100644 crates/core/src/pool.rs delete mode 100644 crates/node/src/pool.rs rename crates/node/src/{kitchensink.rs => substrate.rs} (71%) diff --git a/Cargo.lock b/Cargo.lock index e7e9684..42f4952 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4468,10 +4468,13 @@ name = "revive-dt-common" version = "0.1.0" dependencies = [ "anyhow", + "clap", "moka", "once_cell", + "schemars 1.0.4", "semver 1.0.26", "serde", + "strum", "tokio", ] @@ -4503,6 +4506,7 @@ dependencies = [ "alloy", "anyhow", "clap", + "revive-dt-common", "semver 1.0.26", "serde", "serde_json", @@ -4584,6 +4588,8 @@ version = "0.1.0" dependencies = [ "alloy", "anyhow", + "revive-common", + "revive-dt-format", ] [[package]] diff --git a/README.md b/README.md index 9261e4c..6f6c2d1 100644 --- a/README.md +++ b/README.md @@ -52,122 +52,152 @@ All of the above need to be installed and available in the path in order for the This tool is being updated quite frequently. Therefore, it's recommended that you don't install the tool and then run it, but rather that you run it from the root of the directory using `cargo run --release`. The help command of the tool gives you all of the information you need to know about each of the options and flags that the tool offers. ```bash -$ cargo run --release -- --help -Usage: retester [OPTIONS] +$ cargo run --release -- execute-tests --help +Error: Executes tests in the MatterLabs format differentially on multiple targets concurrently + +Usage: retester execute-tests [OPTIONS] Options: - -s, --solc - The `solc` version to use if the test didn't specify it explicitly + -w, --working-directory + The working directory that the program will use for all of the temporary artifacts needed at runtime. - [default: 0.8.29] + If not specified, then a temporary directory will be created and used by the program for all temporary artifacts. - --wasm - Use the Wasm compiler versions + [default: ] - -r, --resolc - The path to the `resolc` executable to be tested. + -p, --platform + The set of platforms that the differential tests should run on - By default it uses the `resolc` binary found in `$PATH`. + [default: geth-evm-solc,revive-dev-node-polkavm-resolc] - If `--wasm` is set, this should point to the resolc Wasm ile. - - [default: resolc] + Possible values: + - geth-evm-solc: The Go-ethereum reference full node EVM implementation with the solc compiler + - kitchensink-polkavm-resolc: The kitchensink node with the PolkaVM backend with the resolc compiler + - kitchensink-revm-solc: The kitchensink node with the REVM backend with the solc compiler + - revive-dev-node-polkavm-resolc: The revive dev node with the PolkaVM backend with the resolc compiler + - revive-dev-node-revm-solc: The revive dev node with the REVM backend with the solc compiler -c, --corpus A list of test corpus JSON files to be tested - -w, --workdir - A place to store temporary artifacts during test execution. + -h, --help + Print help (see a summary with '-h') - Creates a temporary dir if not specified. +Solc Configuration: + --solc.version + Specifies the default version of the Solc compiler that should be used if there is no override specified by one of the test cases - -g, --geth - The path to the `geth` executable. + [default: 0.8.29] - By default it uses `geth` binary found in `$PATH`. +Resolc Configuration: + --resolc.path + Specifies the path of the resolc compiler to be used by the tool. + + If this is not specified, then the tool assumes that it should use the resolc binary that's provided in the user's $PATH. + + [default: resolc] + +Geth Configuration: + --geth.path + Specifies the path of the geth node to be used by the tool. + + If this is not specified, then the tool assumes that it should use the geth binary that's provided in the user's $PATH. [default: geth] - --geth-start-timeout - The maximum time in milliseconds to wait for geth to start + --geth.start-timeout-ms + The amount of time to wait upon startup before considering that the node timed out [default: 5000] - --genesis - Configure nodes according to this genesis.json file +Kitchensink Configuration: + --kitchensink.path + Specifies the path of the kitchensink node to be used by the tool. - [default: genesis.json] + If this is not specified, then the tool assumes that it should use the kitchensink binary that's provided in the user's $PATH. - -a, --account - The signing account private key + [default: substrate-node] + + --kitchensink.start-timeout-ms + The amount of time to wait upon startup before considering that the node timed out + + [default: 5000] + + --kitchensink.dont-use-dev-node + This configures the tool to use Kitchensink instead of using the revive-dev-node + +Revive Dev Node Configuration: + --revive-dev-node.path + Specifies the path of the revive dev node to be used by the tool. + + If this is not specified, then the tool assumes that it should use the revive dev node binary that's provided in the user's $PATH. + + [default: revive-dev-node] + + --revive-dev-node.start-timeout-ms + The amount of time to wait upon startup before considering that the node timed out + + [default: 5000] + +Eth RPC Configuration: + --eth-rpc.path + Specifies the path of the ETH RPC to be used by the tool. + + If this is not specified, then the tool assumes that it should use the ETH RPC binary that's provided in the user's $PATH. + + [default: eth-rpc] + + --eth-rpc.start-timeout-ms + The amount of time to wait upon startup before considering that the node timed out + + [default: 5000] + +Genesis Configuration: + --genesis.path + Specifies the path of the genesis file to use for the nodes that are started. + + This is expected to be the path of a JSON geth genesis file. + +Wallet Configuration: + --wallet.default-private-key + The private key of the default signer [default: 0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d] - --private-keys-count + --wallet.additional-keys This argument controls which private keys the nodes should have access to and be added to its wallet signers. With a value of N, private keys (0, N] will be added to the signer set of the node [default: 100000] - -l, --leader - The differential testing leader node implementation - - [default: geth] - - Possible values: - - geth: The go-ethereum reference full node EVM implementation - - kitchensink: The kitchensink runtime provides the PolkaVM (PVM) based node implentation - - -f, --follower - The differential testing follower node implementation - - [default: kitchensink] - - Possible values: - - geth: The go-ethereum reference full node EVM implementation - - kitchensink: The kitchensink runtime provides the PolkaVM (PVM) based node implentation - - --compile-only - Only compile against this testing platform (doesn't execute the tests) - - Possible values: - - geth: The go-ethereum reference full node EVM implementation - - kitchensink: The kitchensink runtime provides the PolkaVM (PVM) based node implentation - - --number-of-nodes +Concurrency Configuration: + --concurrency.number-of-nodes Determines the amount of nodes that will be spawned for each chain - [default: 1] + [default: 5] - --number-of-threads + --concurrency.number-of-threads Determines the amount of tokio worker threads that will will be used [default: 16] - --number-concurrent-tasks - Determines the amount of concurrent tasks that will be spawned to run tests. Defaults to 10 x the number of nodes + --concurrency.number-of-concurrent-tasks + Determines the amount of concurrent tasks that will be spawned to run tests. - -e, --extract-problems - Extract problems back to the test corpus + Defaults to 10 x the number of nodes. - -k, --kitchensink - The path to the `kitchensink` executable. + --concurrency.ignore-concurrency-limit + Determines if the concurrency limit should be ignored or not - By default it uses `substrate-node` binary found in `$PATH`. - - [default: substrate-node] - - -p, --eth_proxy - The path to the `eth_proxy` executable. - - By default it uses `eth-rpc` binary found in `$PATH`. - - [default: eth-rpc] - - -i, --invalidate-compilation-cache +Compilation Configuration: + --compilation.invalidate-cache Controls if the compilation cache should be invalidated or not - -h, --help - Print help (see a summary with '-h') +Report Configuration: + --report.include-compiler-input + Controls if the compiler input is included in the final report + + --report.include-compiler-output + Controls if the compiler output is included in the final report ``` To run tests with this tool you need a corpus JSON file that defines the tests included in the corpus. The simplest corpus file looks like the following: @@ -188,10 +218,11 @@ The simplest command to run this tool is the following: ```bash RUST_LOG="info" cargo run --release -- execute-tests \ - --follower geth \ - --corpus path_to_your_corpus_file.json \ - --working-directory path_to_a_temporary_directory_to_cache_things_in \ + --platform geth-evm-solc \ + --corpus corp.json \ + --working-directory workdir \ --concurrency.number-of-nodes 5 \ + --concurrency.ignore-concurrency-limit \ > logs.log \ 2> output.log ``` diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index 20cf865..527f1da 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -10,10 +10,13 @@ rust-version.workspace = true [dependencies] anyhow = { workspace = true } +clap = { workspace = true } moka = { workspace = true, features = ["sync"] } once_cell = { workspace = true } semver = { workspace = true } serde = { workspace = true } +schemars = { workspace = true } +strum = { workspace = true } tokio = { workspace = true, default-features = false, features = ["time"] } [lints] diff --git a/crates/common/src/types/identifiers.rs b/crates/common/src/types/identifiers.rs new file mode 100644 index 0000000..d642f98 --- /dev/null +++ b/crates/common/src/types/identifiers.rs @@ -0,0 +1,124 @@ +use clap::ValueEnum; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use strum::{AsRefStr, Display, EnumString, IntoStaticStr}; + +/// An enum of the platform identifiers of all of the platforms supported by this framework. This +/// could be thought of like the target triple from Rust and LLVM where it specifies the platform +/// completely starting with the node, the vm, and finally the compiler used for this combination. +#[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + ValueEnum, + EnumString, + Display, + AsRefStr, + IntoStaticStr, + JsonSchema, +)] +#[serde(rename_all = "kebab-case")] +#[strum(serialize_all = "kebab-case")] +pub enum PlatformIdentifier { + /// The Go-ethereum reference full node EVM implementation with the solc compiler. + GethEvmSolc, + /// The kitchensink node with the PolkaVM backend with the resolc compiler. + KitchensinkPolkavmResolc, + /// The kitchensink node with the REVM backend with the solc compiler. + KitchensinkRevmSolc, + /// The revive dev node with the PolkaVM backend with the resolc compiler. + ReviveDevNodePolkavmResolc, + /// The revive dev node with the REVM backend with the solc compiler. + ReviveDevNodeRevmSolc, +} + +/// An enum of the platform identifiers of all of the platforms supported by this framework. +#[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + ValueEnum, + EnumString, + Display, + AsRefStr, + IntoStaticStr, + JsonSchema, +)] +pub enum CompilerIdentifier { + /// The solc compiler. + Solc, + /// The resolc compiler. + Resolc, +} + +/// An enum representing the identifiers of the supported nodes. +#[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + ValueEnum, + EnumString, + Display, + AsRefStr, + IntoStaticStr, + JsonSchema, +)] +pub enum NodeIdentifier { + /// The go-ethereum node implementation. + Geth, + /// The Kitchensink node implementation. + Kitchensink, + /// The revive dev node implementation. + ReviveDevNode, +} + +/// An enum representing the identifiers of the supported VMs. +#[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + ValueEnum, + EnumString, + Display, + AsRefStr, + IntoStaticStr, + JsonSchema, +)] +#[serde(rename_all = "lowercase")] +#[strum(serialize_all = "lowercase")] +pub enum VmIdentifier { + /// The ethereum virtual machine. + Evm, + /// The EraVM virtual machine. + EraVM, + /// Polkadot's PolaVM Risc-v based virtual machine. + PolkaVM, +} diff --git a/crates/common/src/types/mod.rs b/crates/common/src/types/mod.rs index 0e1c34f..c44de1b 100644 --- a/crates/common/src/types/mod.rs +++ b/crates/common/src/types/mod.rs @@ -1,5 +1,7 @@ +mod identifiers; mod mode; mod version_or_requirement; +pub use identifiers::*; pub use mode::*; pub use version_or_requirement::*; diff --git a/crates/compiler/src/lib.rs b/crates/compiler/src/lib.rs index 165545e..5656528 100644 --- a/crates/compiler/src/lib.rs +++ b/crates/compiler/src/lib.rs @@ -7,6 +7,7 @@ use std::{ collections::HashMap, hash::Hash, path::{Path, PathBuf}, + pin::Pin, }; use alloy::json_abi::JsonAbi; @@ -17,8 +18,6 @@ use serde::{Deserialize, Serialize}; use revive_common::EVMVersion; use revive_dt_common::cached_fs::read_to_string; -use revive_dt_common::types::VersionOrRequirement; -use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration}; // Re-export this as it's a part of the compiler interface. pub use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline}; @@ -28,19 +27,7 @@ pub mod revive_resolc; pub mod solc; /// A common interface for all supported Solidity compilers. -pub trait SolidityCompiler: Sized { - /// Instantiates a new compiler object. - /// - /// Based on the given [`Context`] and [`VersionOrRequirement`] this function instantiates a - /// new compiler object. Certain implementations of this trait might choose to cache cache the - /// compiler objects and return the same ones over and over again. - fn new( - context: impl AsRef - + AsRef - + AsRef, - version: impl Into>, - ) -> impl Future>; - +pub trait SolidityCompiler { /// Returns the version of the compiler. fn version(&self) -> &Version; @@ -48,7 +35,10 @@ pub trait SolidityCompiler: Sized { fn path(&self) -> &Path; /// The low-level compiler interface. - fn build(&self, input: CompilerInput) -> impl Future>; + fn build( + &self, + input: CompilerInput, + ) -> Pin> + '_>>; /// Does the compiler support the provided mode and version settings. fn supports_mode( @@ -74,7 +64,7 @@ pub struct CompilerInput { /// The generic compilation output configuration. #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct CompilerOutput { - /// The compiled contracts. The bytecode of the contract is kept as a string incase linking is + /// The compiled contracts. The bytecode of the contract is kept as a string in case linking is /// required and the compiled source has placeholders. pub contracts: HashMap>, } @@ -164,7 +154,7 @@ impl Compiler { callback(self) } - pub async fn try_build(self, compiler: &impl SolidityCompiler) -> Result { + pub async fn try_build(self, compiler: &dyn SolidityCompiler) -> Result { compiler.build(self.input).await } diff --git a/crates/compiler/src/revive_resolc.rs b/crates/compiler/src/revive_resolc.rs index 4d02578..79a32fb 100644 --- a/crates/compiler/src/revive_resolc.rs +++ b/crates/compiler/src/revive_resolc.rs @@ -3,6 +3,7 @@ use std::{ path::PathBuf, + pin::Pin, process::Stdio, sync::{Arc, LazyLock}, }; @@ -37,8 +38,8 @@ struct ResolcInner { resolc_path: PathBuf, } -impl SolidityCompiler for Resolc { - async fn new( +impl Resolc { + pub async fn new( context: impl AsRef + AsRef + AsRef, @@ -65,11 +66,13 @@ impl SolidityCompiler for Resolc { }) .clone()) } +} +impl SolidityCompiler for Resolc { fn version(&self) -> &Version { // We currently return the solc compiler version since we do not support multiple resolc // compiler versions. - self.0.solc.version() + SolidityCompiler::version(&self.0.solc) } fn path(&self) -> &std::path::Path { @@ -77,7 +80,7 @@ impl SolidityCompiler for Resolc { } #[tracing::instrument(level = "debug", ret)] - async fn build( + fn build( &self, CompilerInput { pipeline, @@ -91,189 +94,196 @@ impl SolidityCompiler for Resolc { // resolc. So, we need to go back to this later once it's supported. revert_string_handling: _, }: CompilerInput, - ) -> Result { - if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) { - anyhow::bail!( - "Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}" - ); - } - - let input = SolcStandardJsonInput { - language: SolcStandardJsonInputLanguage::Solidity, - sources: sources - .into_iter() - .map(|(path, source)| (path.display().to_string(), source.into())) - .collect(), - settings: SolcStandardJsonInputSettings { - evm_version, - libraries: Some( - libraries - .into_iter() - .map(|(source_code, libraries_map)| { - ( - source_code.display().to_string(), - libraries_map - .into_iter() - .map(|(library_ident, library_address)| { - (library_ident, library_address.to_string()) - }) - .collect(), - ) - }) - .collect(), - ), - remappings: None, - output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()), - via_ir: Some(true), - optimizer: SolcStandardJsonInputSettingsOptimizer::new( - optimization - .unwrap_or(ModeOptimizerSetting::M0) - .optimizations_enabled(), - None, - &Version::new(0, 0, 0), - false, - ), - metadata: None, - polkavm: None, - }, - }; - - let mut command = AsyncCommand::new(self.path()); - command - .stdin(Stdio::piped()) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .arg("--standard-json"); - - if let Some(ref base_path) = base_path { - command.arg("--base-path").arg(base_path); - } - if !allow_paths.is_empty() { - command.arg("--allow-paths").arg( - allow_paths - .iter() - .map(|path| path.display().to_string()) - .collect::>() - .join(","), - ); - } - let mut child = command - .spawn() - .with_context(|| format!("Failed to spawn resolc at {}", self.path().display()))?; - - let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped"); - let serialized_input = serde_json::to_vec(&input) - .context("Failed to serialize Standard JSON input for resolc")?; - stdin_pipe - .write_all(&serialized_input) - .await - .context("Failed to write Standard JSON to resolc stdin")?; - - let output = child - .wait_with_output() - .await - .context("Failed while waiting for resolc process to finish")?; - let stdout = output.stdout; - let stderr = output.stderr; - - if !output.status.success() { - let json_in = serde_json::to_string_pretty(&input) - .context("Failed to pretty-print Standard JSON input for logging")?; - let message = String::from_utf8_lossy(&stderr); - tracing::error!( - status = %output.status, - message = %message, - json_input = json_in, - "Compilation using resolc failed" - ); - anyhow::bail!("Compilation failed with an error: {message}"); - } - - let parsed = serde_json::from_slice::(&stdout) - .map_err(|e| { - anyhow::anyhow!( - "failed to parse resolc JSON output: {e}\nstderr: {}", - String::from_utf8_lossy(&stderr) - ) - }) - .context("Failed to parse resolc standard JSON output")?; - - tracing::debug!( - output = %serde_json::to_string(&parsed).unwrap(), - "Compiled successfully" - ); - - // Detecting if the compiler output contained errors and reporting them through logs and - // errors instead of returning the compiler output that might contain errors. - for error in parsed.errors.iter().flatten() { - if error.severity == "error" { - tracing::error!( - ?error, - ?input, - output = %serde_json::to_string(&parsed).unwrap(), - "Encountered an error in the compilation" + ) -> Pin> + '_>> { + Box::pin(async move { + if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) { + anyhow::bail!( + "Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}" ); - anyhow::bail!("Encountered an error in the compilation: {error}") } - } - let Some(contracts) = parsed.contracts else { - anyhow::bail!("Unexpected error - resolc output doesn't have a contracts section"); - }; + let input = SolcStandardJsonInput { + language: SolcStandardJsonInputLanguage::Solidity, + sources: sources + .into_iter() + .map(|(path, source)| (path.display().to_string(), source.into())) + .collect(), + settings: SolcStandardJsonInputSettings { + evm_version, + libraries: Some( + libraries + .into_iter() + .map(|(source_code, libraries_map)| { + ( + source_code.display().to_string(), + libraries_map + .into_iter() + .map(|(library_ident, library_address)| { + (library_ident, library_address.to_string()) + }) + .collect(), + ) + }) + .collect(), + ), + remappings: None, + output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()), + via_ir: Some(true), + optimizer: SolcStandardJsonInputSettingsOptimizer::new( + optimization + .unwrap_or(ModeOptimizerSetting::M0) + .optimizations_enabled(), + None, + &Version::new(0, 0, 0), + false, + ), + metadata: None, + polkavm: None, + }, + }; - let mut compiler_output = CompilerOutput::default(); - for (source_path, contracts) in contracts.into_iter() { - let src_for_msg = source_path.clone(); - let source_path = PathBuf::from(source_path) - .canonicalize() - .with_context(|| format!("Failed to canonicalize path {src_for_msg}"))?; + let path = &self.0.resolc_path; + let mut command = AsyncCommand::new(path); + command + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .arg("--standard-json"); - let map = compiler_output.contracts.entry(source_path).or_default(); - for (contract_name, contract_information) in contracts.into_iter() { - let bytecode = contract_information - .evm - .and_then(|evm| evm.bytecode.clone()) - .context("Unexpected - Contract compiled with resolc has no bytecode")?; - let abi = { - let metadata = contract_information - .metadata - .as_ref() - .context("No metadata found for the contract")?; - let solc_metadata_str = match metadata { - serde_json::Value::String(solc_metadata_str) => solc_metadata_str.as_str(), - serde_json::Value::Object(metadata_object) => { - let solc_metadata_value = metadata_object - .get("solc_metadata") - .context("Contract doesn't have a 'solc_metadata' field")?; - solc_metadata_value - .as_str() - .context("The 'solc_metadata' field is not a string")? - } - serde_json::Value::Null - | serde_json::Value::Bool(_) - | serde_json::Value::Number(_) - | serde_json::Value::Array(_) => { - anyhow::bail!("Unsupported type of metadata {metadata:?}") - } - }; - let solc_metadata = - serde_json::from_str::(solc_metadata_str).context( + if let Some(ref base_path) = base_path { + command.arg("--base-path").arg(base_path); + } + if !allow_paths.is_empty() { + command.arg("--allow-paths").arg( + allow_paths + .iter() + .map(|path| path.display().to_string()) + .collect::>() + .join(","), + ); + } + let mut child = command + .spawn() + .with_context(|| format!("Failed to spawn resolc at {}", path.display()))?; + + let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped"); + let serialized_input = serde_json::to_vec(&input) + .context("Failed to serialize Standard JSON input for resolc")?; + stdin_pipe + .write_all(&serialized_input) + .await + .context("Failed to write Standard JSON to resolc stdin")?; + + let output = child + .wait_with_output() + .await + .context("Failed while waiting for resolc process to finish")?; + let stdout = output.stdout; + let stderr = output.stderr; + + if !output.status.success() { + let json_in = serde_json::to_string_pretty(&input) + .context("Failed to pretty-print Standard JSON input for logging")?; + let message = String::from_utf8_lossy(&stderr); + tracing::error!( + status = %output.status, + message = %message, + json_input = json_in, + "Compilation using resolc failed" + ); + anyhow::bail!("Compilation failed with an error: {message}"); + } + + let parsed = serde_json::from_slice::(&stdout) + .map_err(|e| { + anyhow::anyhow!( + "failed to parse resolc JSON output: {e}\nstderr: {}", + String::from_utf8_lossy(&stderr) + ) + }) + .context("Failed to parse resolc standard JSON output")?; + + tracing::debug!( + output = %serde_json::to_string(&parsed).unwrap(), + "Compiled successfully" + ); + + // Detecting if the compiler output contained errors and reporting them through logs and + // errors instead of returning the compiler output that might contain errors. + for error in parsed.errors.iter().flatten() { + if error.severity == "error" { + tracing::error!( + ?error, + ?input, + output = %serde_json::to_string(&parsed).unwrap(), + "Encountered an error in the compilation" + ); + anyhow::bail!("Encountered an error in the compilation: {error}") + } + } + + let Some(contracts) = parsed.contracts else { + anyhow::bail!("Unexpected error - resolc output doesn't have a contracts section"); + }; + + let mut compiler_output = CompilerOutput::default(); + for (source_path, contracts) in contracts.into_iter() { + let src_for_msg = source_path.clone(); + let source_path = PathBuf::from(source_path) + .canonicalize() + .with_context(|| format!("Failed to canonicalize path {src_for_msg}"))?; + + let map = compiler_output.contracts.entry(source_path).or_default(); + for (contract_name, contract_information) in contracts.into_iter() { + let bytecode = contract_information + .evm + .and_then(|evm| evm.bytecode.clone()) + .context("Unexpected - Contract compiled with resolc has no bytecode")?; + let abi = { + let metadata = contract_information + .metadata + .as_ref() + .context("No metadata found for the contract")?; + let solc_metadata_str = match metadata { + serde_json::Value::String(solc_metadata_str) => { + solc_metadata_str.as_str() + } + serde_json::Value::Object(metadata_object) => { + let solc_metadata_value = metadata_object + .get("solc_metadata") + .context("Contract doesn't have a 'solc_metadata' field")?; + solc_metadata_value + .as_str() + .context("The 'solc_metadata' field is not a string")? + } + serde_json::Value::Null + | serde_json::Value::Bool(_) + | serde_json::Value::Number(_) + | serde_json::Value::Array(_) => { + anyhow::bail!("Unsupported type of metadata {metadata:?}") + } + }; + let solc_metadata = serde_json::from_str::( + solc_metadata_str, + ) + .context( "Failed to deserialize the solc_metadata as a serde_json generic value", )?; - let output_value = solc_metadata - .get("output") - .context("solc_metadata doesn't have an output field")?; - let abi_value = output_value - .get("abi") - .context("solc_metadata output doesn't contain an abi field")?; - serde_json::from_value::(abi_value.clone()) - .context("ABI found in solc_metadata output is not valid ABI")? - }; - map.insert(contract_name, (bytecode.object, abi)); + let output_value = solc_metadata + .get("output") + .context("solc_metadata doesn't have an output field")?; + let abi_value = output_value + .get("abi") + .context("solc_metadata output doesn't contain an abi field")?; + serde_json::from_value::(abi_value.clone()) + .context("ABI found in solc_metadata output is not valid ABI")? + }; + map.insert(contract_name, (bytecode.object, abi)); + } } - } - Ok(compiler_output) + Ok(compiler_output) + }) } fn supports_mode( @@ -281,6 +291,7 @@ impl SolidityCompiler for Resolc { optimize_setting: ModeOptimizerSetting, pipeline: ModePipeline, ) -> bool { - pipeline == ModePipeline::ViaYulIR && self.0.solc.supports_mode(optimize_setting, pipeline) + pipeline == ModePipeline::ViaYulIR + && SolidityCompiler::supports_mode(&self.0.solc, optimize_setting, pipeline) } } diff --git a/crates/compiler/src/solc.rs b/crates/compiler/src/solc.rs index dae007e..defdb19 100644 --- a/crates/compiler/src/solc.rs +++ b/crates/compiler/src/solc.rs @@ -3,6 +3,7 @@ use std::{ path::PathBuf, + pin::Pin, process::Stdio, sync::{Arc, LazyLock}, }; @@ -36,8 +37,8 @@ struct SolcInner { solc_version: Version, } -impl SolidityCompiler for Solc { - async fn new( +impl Solc { + pub async fn new( context: impl AsRef + AsRef + AsRef, @@ -75,7 +76,9 @@ impl SolidityCompiler for Solc { }) .clone()) } +} +impl SolidityCompiler for Solc { fn version(&self) -> &Version { &self.0.solc_version } @@ -85,7 +88,7 @@ impl SolidityCompiler for Solc { } #[tracing::instrument(level = "debug", ret)] - async fn build( + fn build( &self, CompilerInput { pipeline, @@ -97,170 +100,173 @@ impl SolidityCompiler for Solc { libraries, revert_string_handling, }: CompilerInput, - ) -> Result { - // Be careful to entirely omit the viaIR field if the compiler does not support it, - // as it will error if you provide fields it does not know about. Because - // `supports_mode` is called prior to instantiating a compiler, we should never - // ask for something which is invalid. - let via_ir = match (pipeline, self.compiler_supports_yul()) { - (pipeline, true) => pipeline.map(|p| p.via_yul_ir()), - (_pipeline, false) => None, - }; + ) -> Pin> + '_>> { + Box::pin(async move { + // Be careful to entirely omit the viaIR field if the compiler does not support it, + // as it will error if you provide fields it does not know about. Because + // `supports_mode` is called prior to instantiating a compiler, we should never + // ask for something which is invalid. + let via_ir = match (pipeline, self.compiler_supports_yul()) { + (pipeline, true) => pipeline.map(|p| p.via_yul_ir()), + (_pipeline, false) => None, + }; - let input = SolcInput { - language: SolcLanguage::Solidity, - sources: Sources( - sources - .into_iter() - .map(|(source_path, source_code)| (source_path, Source::new(source_code))) - .collect(), - ), - settings: Settings { - optimizer: Optimizer { - enabled: optimization.map(|o| o.optimizations_enabled()), - details: Some(Default::default()), + let input = SolcInput { + language: SolcLanguage::Solidity, + sources: Sources( + sources + .into_iter() + .map(|(source_path, source_code)| (source_path, Source::new(source_code))) + .collect(), + ), + settings: Settings { + optimizer: Optimizer { + enabled: optimization.map(|o| o.optimizations_enabled()), + details: Some(Default::default()), + ..Default::default() + }, + output_selection: OutputSelection::common_output_selection( + [ + ContractOutputSelection::Abi, + ContractOutputSelection::Evm(EvmOutputSelection::ByteCode( + BytecodeOutputSelection::Object, + )), + ] + .into_iter() + .map(|item| item.to_string()), + ), + evm_version: evm_version.map(|version| version.to_string().parse().unwrap()), + via_ir, + libraries: Libraries { + libs: libraries + .into_iter() + .map(|(file_path, libraries)| { + ( + file_path, + libraries + .into_iter() + .map(|(library_name, library_address)| { + (library_name, library_address.to_string()) + }) + .collect(), + ) + }) + .collect(), + }, + debug: revert_string_handling.map(|revert_string_handling| DebuggingSettings { + revert_strings: match revert_string_handling { + crate::RevertString::Default => Some(RevertStrings::Default), + crate::RevertString::Debug => Some(RevertStrings::Debug), + crate::RevertString::Strip => Some(RevertStrings::Strip), + crate::RevertString::VerboseDebug => Some(RevertStrings::VerboseDebug), + }, + debug_info: Default::default(), + }), ..Default::default() }, - output_selection: OutputSelection::common_output_selection( - [ - ContractOutputSelection::Abi, - ContractOutputSelection::Evm(EvmOutputSelection::ByteCode( - BytecodeOutputSelection::Object, - )), - ] - .into_iter() - .map(|item| item.to_string()), - ), - evm_version: evm_version.map(|version| version.to_string().parse().unwrap()), - via_ir, - libraries: Libraries { - libs: libraries - .into_iter() - .map(|(file_path, libraries)| { - ( - file_path, - libraries - .into_iter() - .map(|(library_name, library_address)| { - (library_name, library_address.to_string()) - }) - .collect(), - ) - }) - .collect(), - }, - debug: revert_string_handling.map(|revert_string_handling| DebuggingSettings { - revert_strings: match revert_string_handling { - crate::RevertString::Default => Some(RevertStrings::Default), - crate::RevertString::Debug => Some(RevertStrings::Debug), - crate::RevertString::Strip => Some(RevertStrings::Strip), - crate::RevertString::VerboseDebug => Some(RevertStrings::VerboseDebug), - }, - debug_info: Default::default(), - }), - ..Default::default() - }, - }; + }; - let mut command = AsyncCommand::new(self.path()); - command - .stdin(Stdio::piped()) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .arg("--standard-json"); + let path = &self.0.solc_path; + let mut command = AsyncCommand::new(path); + command + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .arg("--standard-json"); - if let Some(ref base_path) = base_path { - command.arg("--base-path").arg(base_path); - } - if !allow_paths.is_empty() { - command.arg("--allow-paths").arg( - allow_paths - .iter() - .map(|path| path.display().to_string()) - .collect::>() - .join(","), - ); - } - let mut child = command - .spawn() - .with_context(|| format!("Failed to spawn solc at {}", self.path().display()))?; - - let stdin = child.stdin.as_mut().expect("should be piped"); - let serialized_input = serde_json::to_vec(&input) - .context("Failed to serialize Standard JSON input for solc")?; - stdin - .write_all(&serialized_input) - .await - .context("Failed to write Standard JSON to solc stdin")?; - let output = child - .wait_with_output() - .await - .context("Failed while waiting for solc process to finish")?; - - if !output.status.success() { - let json_in = serde_json::to_string_pretty(&input) - .context("Failed to pretty-print Standard JSON input for logging")?; - let message = String::from_utf8_lossy(&output.stderr); - tracing::error!( - status = %output.status, - message = %message, - json_input = json_in, - "Compilation using solc failed" - ); - anyhow::bail!("Compilation failed with an error: {message}"); - } - - let parsed = serde_json::from_slice::(&output.stdout) - .map_err(|e| { - anyhow::anyhow!( - "failed to parse resolc JSON output: {e}\nstderr: {}", - String::from_utf8_lossy(&output.stdout) - ) - }) - .context("Failed to parse solc standard JSON output")?; - - // Detecting if the compiler output contained errors and reporting them through logs and - // errors instead of returning the compiler output that might contain errors. - for error in parsed.errors.iter() { - if error.severity == Severity::Error { - tracing::error!(?error, ?input, "Encountered an error in the compilation"); - anyhow::bail!("Encountered an error in the compilation: {error}") + if let Some(ref base_path) = base_path { + command.arg("--base-path").arg(base_path); } - } + if !allow_paths.is_empty() { + command.arg("--allow-paths").arg( + allow_paths + .iter() + .map(|path| path.display().to_string()) + .collect::>() + .join(","), + ); + } + let mut child = command + .spawn() + .with_context(|| format!("Failed to spawn solc at {}", path.display()))?; - tracing::debug!( - output = %String::from_utf8_lossy(&output.stdout).to_string(), - "Compiled successfully" - ); + let stdin = child.stdin.as_mut().expect("should be piped"); + let serialized_input = serde_json::to_vec(&input) + .context("Failed to serialize Standard JSON input for solc")?; + stdin + .write_all(&serialized_input) + .await + .context("Failed to write Standard JSON to solc stdin")?; + let output = child + .wait_with_output() + .await + .context("Failed while waiting for solc process to finish")?; - let mut compiler_output = CompilerOutput::default(); - for (contract_path, contracts) in parsed.contracts { - let map = compiler_output - .contracts - .entry(contract_path.canonicalize().with_context(|| { - format!( - "Failed to canonicalize contract path {}", - contract_path.display() + if !output.status.success() { + let json_in = serde_json::to_string_pretty(&input) + .context("Failed to pretty-print Standard JSON input for logging")?; + let message = String::from_utf8_lossy(&output.stderr); + tracing::error!( + status = %output.status, + message = %message, + json_input = json_in, + "Compilation using solc failed" + ); + anyhow::bail!("Compilation failed with an error: {message}"); + } + + let parsed = serde_json::from_slice::(&output.stdout) + .map_err(|e| { + anyhow::anyhow!( + "failed to parse resolc JSON output: {e}\nstderr: {}", + String::from_utf8_lossy(&output.stdout) ) - })?) - .or_default(); - for (contract_name, contract_info) in contracts.into_iter() { - let source_code = contract_info - .evm - .and_then(|evm| evm.bytecode) - .map(|bytecode| match bytecode.object { - BytecodeObject::Bytecode(bytecode) => bytecode.to_string(), - BytecodeObject::Unlinked(unlinked) => unlinked, - }) - .context("Unexpected - contract compiled with solc has no source code")?; - let abi = contract_info - .abi - .context("Unexpected - contract compiled with solc as no ABI")?; - map.insert(contract_name, (source_code, abi)); - } - } + }) + .context("Failed to parse solc standard JSON output")?; - Ok(compiler_output) + // Detecting if the compiler output contained errors and reporting them through logs and + // errors instead of returning the compiler output that might contain errors. + for error in parsed.errors.iter() { + if error.severity == Severity::Error { + tracing::error!(?error, ?input, "Encountered an error in the compilation"); + anyhow::bail!("Encountered an error in the compilation: {error}") + } + } + + tracing::debug!( + output = %String::from_utf8_lossy(&output.stdout).to_string(), + "Compiled successfully" + ); + + let mut compiler_output = CompilerOutput::default(); + for (contract_path, contracts) in parsed.contracts { + let map = compiler_output + .contracts + .entry(contract_path.canonicalize().with_context(|| { + format!( + "Failed to canonicalize contract path {}", + contract_path.display() + ) + })?) + .or_default(); + for (contract_name, contract_info) in contracts.into_iter() { + let source_code = contract_info + .evm + .and_then(|evm| evm.bytecode) + .map(|bytecode| match bytecode.object { + BytecodeObject::Bytecode(bytecode) => bytecode.to_string(), + BytecodeObject::Unlinked(unlinked) => unlinked, + }) + .context("Unexpected - contract compiled with solc has no source code")?; + let abi = contract_info + .abi + .context("Unexpected - contract compiled with solc as no ABI")?; + map.insert(contract_name, (source_code, abi)); + } + } + + Ok(compiler_output) + }) } fn supports_mode( @@ -278,6 +284,6 @@ impl SolidityCompiler for Solc { impl Solc { fn compiler_supports_yul(&self) -> bool { const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13); - self.version() >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR + SolidityCompiler::version(self) >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR } } diff --git a/crates/compiler/tests/lib.rs b/crates/compiler/tests/lib.rs index 7eeef4d..5de4b90 100644 --- a/crates/compiler/tests/lib.rs +++ b/crates/compiler/tests/lib.rs @@ -1,14 +1,14 @@ use std::path::PathBuf; use revive_dt_common::types::VersionOrRequirement; -use revive_dt_compiler::{Compiler, SolidityCompiler, revive_resolc::Resolc, solc::Solc}; -use revive_dt_config::ExecutionContext; +use revive_dt_compiler::{Compiler, revive_resolc::Resolc, solc::Solc}; +use revive_dt_config::TestExecutionContext; use semver::Version; #[tokio::test] async fn contracts_can_be_compiled_with_solc() { // Arrange - let args = ExecutionContext::default(); + let args = TestExecutionContext::default(); let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30))) .await .unwrap(); @@ -49,7 +49,7 @@ async fn contracts_can_be_compiled_with_solc() { #[tokio::test] async fn contracts_can_be_compiled_with_resolc() { // Arrange - let args = ExecutionContext::default(); + let args = TestExecutionContext::default(); let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30))) .await .unwrap(); diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index 150d8d3..1862e74 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -9,6 +9,8 @@ repository.workspace = true rust-version.workspace = true [dependencies] +revive-dt-common = { workspace = true } + alloy = { workspace = true } anyhow = { workspace = true } clap = { workspace = true } diff --git a/crates/config/src/lib.rs b/crates/config/src/lib.rs index 8141dfa..3551a34 100644 --- a/crates/config/src/lib.rs +++ b/crates/config/src/lib.rs @@ -18,6 +18,7 @@ use alloy::{ signers::local::PrivateKeySigner, }; use clap::{Parser, ValueEnum, ValueHint}; +use revive_dt_common::types::PlatformIdentifier; use semver::Version; use serde::{Serialize, Serializer}; use strum::{AsRefStr, Display, EnumString, IntoStaticStr}; @@ -26,8 +27,8 @@ use temp_dir::TempDir; #[derive(Clone, Debug, Parser, Serialize)] #[command(name = "retester")] pub enum Context { - /// Executes tests in the MatterLabs format differentially against a leader and a follower. - ExecuteTests(Box), + /// Executes tests in the MatterLabs format differentially on multiple targets concurrently. + ExecuteTests(Box), /// Exports the JSON schema of the MatterLabs test format used by the tool. ExportJsonSchema, } @@ -45,8 +46,98 @@ impl Context { impl AsRef for Context { fn as_ref(&self) -> &WorkingDirectoryConfiguration { match self { - Context::ExecuteTests(execution_context) => &execution_context.working_directory, - Context::ExportJsonSchema => unreachable!(), + Self::ExecuteTests(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } +} + +impl AsRef for Context { + fn as_ref(&self) -> &SolcConfiguration { + match self { + Self::ExecuteTests(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } +} + +impl AsRef for Context { + fn as_ref(&self) -> &ResolcConfiguration { + match self { + Self::ExecuteTests(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } +} + +impl AsRef for Context { + fn as_ref(&self) -> &GethConfiguration { + match self { + Self::ExecuteTests(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } +} + +impl AsRef for Context { + fn as_ref(&self) -> &KitchensinkConfiguration { + match self { + Self::ExecuteTests(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } +} + +impl AsRef for Context { + fn as_ref(&self) -> &ReviveDevNodeConfiguration { + match self { + Self::ExecuteTests(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } +} + +impl AsRef for Context { + fn as_ref(&self) -> &EthRpcConfiguration { + match self { + Self::ExecuteTests(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } +} + +impl AsRef for Context { + fn as_ref(&self) -> &GenesisConfiguration { + match self { + Self::ExecuteTests(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } +} + +impl AsRef for Context { + fn as_ref(&self) -> &WalletConfiguration { + match self { + Self::ExecuteTests(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } +} + +impl AsRef for Context { + fn as_ref(&self) -> &ConcurrencyConfiguration { + match self { + Self::ExecuteTests(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } +} + +impl AsRef for Context { + fn as_ref(&self) -> &CompilationConfiguration { + match self { + Self::ExecuteTests(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), } } } @@ -54,14 +145,14 @@ impl AsRef for Context { impl AsRef for Context { fn as_ref(&self) -> &ReportConfiguration { match self { - Context::ExecuteTests(execution_context) => &execution_context.report_configuration, - Context::ExportJsonSchema => unreachable!(), + Self::ExecuteTests(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), } } } #[derive(Clone, Debug, Parser, Serialize)] -pub struct ExecutionContext { +pub struct TestExecutionContext { /// The working directory that the program will use for all of the temporary artifacts needed at /// runtime. /// @@ -75,13 +166,13 @@ pub struct ExecutionContext { )] pub working_directory: WorkingDirectoryConfiguration, - /// The differential testing leader node implementation. - #[arg(short, long = "leader", default_value_t = TestingPlatform::Geth)] - pub leader: TestingPlatform, - - /// The differential testing follower node implementation. - #[arg(short, long = "follower", default_value_t = TestingPlatform::Kitchensink)] - pub follower: TestingPlatform, + /// The set of platforms that the differential tests should run on. + #[arg( + short = 'p', + long = "platform", + default_values = ["geth-evm-solc", "revive-dev-node-polkavm-resolc"] + )] + pub platforms: Vec, /// A list of test corpus JSON files to be tested. #[arg(long = "corpus", short)] @@ -132,79 +223,79 @@ pub struct ExecutionContext { pub report_configuration: ReportConfiguration, } -impl Default for ExecutionContext { +impl Default for TestExecutionContext { fn default() -> Self { Self::parse_from(["execution-context"]) } } -impl AsRef for ExecutionContext { +impl AsRef for TestExecutionContext { fn as_ref(&self) -> &WorkingDirectoryConfiguration { &self.working_directory } } -impl AsRef for ExecutionContext { +impl AsRef for TestExecutionContext { fn as_ref(&self) -> &SolcConfiguration { &self.solc_configuration } } -impl AsRef for ExecutionContext { +impl AsRef for TestExecutionContext { fn as_ref(&self) -> &ResolcConfiguration { &self.resolc_configuration } } -impl AsRef for ExecutionContext { +impl AsRef for TestExecutionContext { fn as_ref(&self) -> &GethConfiguration { &self.geth_configuration } } -impl AsRef for ExecutionContext { +impl AsRef for TestExecutionContext { fn as_ref(&self) -> &KitchensinkConfiguration { &self.kitchensink_configuration } } -impl AsRef for ExecutionContext { +impl AsRef for TestExecutionContext { fn as_ref(&self) -> &ReviveDevNodeConfiguration { &self.revive_dev_node_configuration } } -impl AsRef for ExecutionContext { +impl AsRef for TestExecutionContext { fn as_ref(&self) -> &EthRpcConfiguration { &self.eth_rpc_configuration } } -impl AsRef for ExecutionContext { +impl AsRef for TestExecutionContext { fn as_ref(&self) -> &GenesisConfiguration { &self.genesis_configuration } } -impl AsRef for ExecutionContext { +impl AsRef for TestExecutionContext { fn as_ref(&self) -> &WalletConfiguration { &self.wallet_configuration } } -impl AsRef for ExecutionContext { +impl AsRef for TestExecutionContext { fn as_ref(&self) -> &ConcurrencyConfiguration { &self.concurrency_configuration } } -impl AsRef for ExecutionContext { +impl AsRef for TestExecutionContext { fn as_ref(&self) -> &CompilationConfiguration { &self.compilation_configuration } } -impl AsRef for ExecutionContext { +impl AsRef for TestExecutionContext { fn as_ref(&self) -> &ReportConfiguration { &self.report_configuration } diff --git a/crates/core/src/cached_compiler.rs b/crates/core/src/cached_compiler.rs index ed59546..c10f7e1 100644 --- a/crates/core/src/cached_compiler.rs +++ b/crates/core/src/cached_compiler.rs @@ -9,9 +9,9 @@ use std::{ }; use futures::FutureExt; -use revive_dt_common::iterators::FilesWithExtensionIterator; +use revive_dt_common::{iterators::FilesWithExtensionIterator, types::CompilerIdentifier}; use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler}; -use revive_dt_config::TestingPlatform; +use revive_dt_core::Platform; use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata}; use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address}; @@ -22,8 +22,6 @@ use serde::{Deserialize, Serialize}; use tokio::sync::{Mutex, RwLock}; use tracing::{Instrument, debug, debug_span, instrument}; -use crate::Platform; - pub struct CachedCompiler<'a> { /// The cache that stores the compiled contracts. artifacts_cache: ArtifactsCache, @@ -57,21 +55,22 @@ impl<'a> CachedCompiler<'a> { fields( metadata_file_path = %metadata_file_path.display(), %mode, - platform = P::config_id().to_string() + platform = %platform.platform_identifier() ), err )] - pub async fn compile_contracts( + pub async fn compile_contracts( &self, metadata: &'a Metadata, metadata_file_path: &'a Path, mode: Cow<'a, Mode>, deployed_libraries: Option<&HashMap>, - compiler: &P::Compiler, + compiler: &dyn SolidityCompiler, + platform: &dyn Platform, reporter: &ExecutionSpecificReporter, ) -> Result { let cache_key = CacheKey { - platform_key: P::config_id(), + compiler_identifier: platform.compiler_identifier(), compiler_version: compiler.version().clone(), metadata_file_path, solc_mode: mode.clone(), @@ -79,7 +78,7 @@ impl<'a> CachedCompiler<'a> { let compilation_callback = || { async move { - compile_contracts::

( + compile_contracts( metadata .directory() .context("Failed to get metadata directory while preparing compilation")?, @@ -96,7 +95,7 @@ impl<'a> CachedCompiler<'a> { } .instrument(debug_span!( "Running compilation for the cache key", - cache_key.platform_key = %cache_key.platform_key, + cache_key.compiler_identifier = %cache_key.compiler_identifier, cache_key.compiler_version = %cache_key.compiler_version, cache_key.metadata_file_path = %cache_key.metadata_file_path.display(), cache_key.solc_mode = %cache_key.solc_mode, @@ -179,12 +178,12 @@ impl<'a> CachedCompiler<'a> { } } -async fn compile_contracts( +async fn compile_contracts( metadata_directory: impl AsRef, mut files_to_compile: impl Iterator, mode: &Mode, deployed_libraries: Option<&HashMap>, - compiler: &P::Compiler, + compiler: &dyn SolidityCompiler, reporter: &ExecutionSpecificReporter, ) -> Result { let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref()) @@ -332,9 +331,8 @@ impl ArtifactsCache { #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)] struct CacheKey<'a> { - /// The platform name that this artifact was compiled for. For example, this could be EVM or - /// PVM. - platform_key: &'a TestingPlatform, + /// The identifier of the used compiler. + compiler_identifier: CompilerIdentifier, /// The version of the compiler that was used to compile the artifacts. compiler_version: Version, diff --git a/crates/core/src/driver/mod.rs b/crates/core/src/driver/mod.rs index b683bde..1088ae1 100644 --- a/crates/core/src/driver/mod.rs +++ b/crates/core/src/driver/mod.rs @@ -1,14 +1,13 @@ //! The test driver handles the compilation and execution of the test cases. use std::collections::HashMap; -use std::marker::PhantomData; use std::path::PathBuf; use alloy::consensus::EMPTY_ROOT_HASH; use alloy::hex; use alloy::json_abi::JsonAbi; use alloy::network::{Ethereum, TransactionBuilder}; -use alloy::primitives::U256; +use alloy::primitives::{TxHash, U256}; use alloy::rpc::types::TransactionReceipt; use alloy::rpc::types::trace::geth::{ CallFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType, @@ -19,8 +18,9 @@ use alloy::{ rpc::types::{TransactionRequest, trace::geth::DiffMode}, }; use anyhow::Context as _; -use futures::TryStreamExt; +use futures::{TryStreamExt, future::try_join_all}; use indexmap::IndexMap; +use revive_dt_common::types::PlatformIdentifier; use revive_dt_format::traits::{ResolutionContext, ResolverApi}; use revive_dt_report::ExecutionSpecificReporter; use semver::Version; @@ -36,9 +36,7 @@ use revive_dt_node_interaction::EthereumNode; use tokio::try_join; use tracing::{Instrument, info, info_span, instrument}; -use crate::Platform; - -pub struct CaseState { +pub struct CaseState { /// A map of all of the compiled contracts for the given metadata file. compiled_contracts: HashMap>, @@ -54,14 +52,9 @@ pub struct CaseState { /// The execution reporter. execution_reporter: ExecutionSpecificReporter, - - phantom: PhantomData, } -impl CaseState -where - T: Platform, -{ +impl CaseState { pub fn new( compiler_version: Version, compiled_contracts: HashMap>, @@ -74,7 +67,6 @@ where variables: Default::default(), compiler_version, execution_reporter, - phantom: PhantomData, } } @@ -82,7 +74,7 @@ where &mut self, metadata: &Metadata, step: &Step, - node: &T::Blockchain, + node: &dyn EthereumNode, ) -> anyhow::Result { match step { Step::FunctionCall(input) => { @@ -113,8 +105,10 @@ where &mut self, metadata: &Metadata, input: &Input, - node: &T::Blockchain, + node: &dyn EthereumNode, ) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> { + let resolver = node.resolver().await?; + let deployment_receipts = self .handle_input_contract_deployment(metadata, input, node) .await @@ -124,14 +118,19 @@ where .await .context("Failed during transaction execution phase of input handling")?; let tracing_result = self - .handle_input_call_frame_tracing(&execution_receipt, node) + .handle_input_call_frame_tracing(execution_receipt.transaction_hash, node) .await .context("Failed during callframe tracing phase of input handling")?; self.handle_input_variable_assignment(input, &tracing_result) .context("Failed to assign variables from callframe output")?; let (_, (geth_trace, diff_mode)) = try_join!( - self.handle_input_expectations(input, &execution_receipt, node, &tracing_result), - self.handle_input_diff(&execution_receipt, node) + self.handle_input_expectations( + input, + &execution_receipt, + resolver.as_ref(), + &tracing_result + ), + self.handle_input_diff(execution_receipt.transaction_hash, node) ) .context("Failed while evaluating expectations and diffs in parallel")?; Ok((execution_receipt, geth_trace, diff_mode)) @@ -142,7 +141,7 @@ where &mut self, metadata: &Metadata, balance_assertion: &BalanceAssertion, - node: &T::Blockchain, + node: &dyn EthereumNode, ) -> anyhow::Result<()> { self.handle_balance_assertion_contract_deployment(metadata, balance_assertion, node) .await @@ -158,7 +157,7 @@ where &mut self, metadata: &Metadata, storage_empty: &StorageEmptyAssertion, - node: &T::Blockchain, + node: &dyn EthereumNode, ) -> anyhow::Result<()> { self.handle_storage_empty_assertion_contract_deployment(metadata, storage_empty, node) .await @@ -175,7 +174,7 @@ where &mut self, metadata: &Metadata, input: &Input, - node: &T::Blockchain, + node: &dyn EthereumNode, ) -> anyhow::Result> { let mut instances_we_must_deploy = IndexMap::::new(); for instance in input.find_all_contract_instances().into_iter() { @@ -220,7 +219,7 @@ where &mut self, input: &Input, mut deployment_receipts: HashMap, - node: &T::Blockchain, + node: &dyn EthereumNode, ) -> anyhow::Result { match input.method { // This input was already executed when `handle_input` was called. We just need to @@ -229,8 +228,9 @@ where .remove(&input.instance) .context("Failed to find deployment receipt for constructor call"), Method::Fallback | Method::FunctionName(_) => { + let resolver = node.resolver().await?; let tx = match input - .legacy_transaction(node, self.default_resolution_context()) + .legacy_transaction(resolver.as_ref(), self.default_resolution_context()) .await { Ok(tx) => tx, @@ -250,11 +250,11 @@ where #[instrument(level = "info", skip_all)] async fn handle_input_call_frame_tracing( &self, - execution_receipt: &TransactionReceipt, - node: &T::Blockchain, + tx_hash: TxHash, + node: &dyn EthereumNode, ) -> anyhow::Result { node.trace_transaction( - execution_receipt, + tx_hash, GethDebugTracingOptions { tracer: Some(GethDebugTracerType::BuiltInTracer( GethDebugBuiltInTracerType::CallTracer, @@ -314,7 +314,7 @@ where &self, input: &Input, execution_receipt: &TransactionReceipt, - resolver: &impl ResolverApi, + resolver: &(impl ResolverApi + ?Sized), tracing_result: &CallFrame, ) -> anyhow::Result<()> { // Resolving the `input.expected` into a series of expectations that we can then assert on. @@ -362,7 +362,7 @@ where async fn handle_input_expectation_item( &self, execution_receipt: &TransactionReceipt, - resolver: &impl ResolverApi, + resolver: &(impl ResolverApi + ?Sized), expectation: ExpectedOutput, tracing_result: &CallFrame, ) -> anyhow::Result<()> { @@ -507,8 +507,8 @@ where #[instrument(level = "info", skip_all)] async fn handle_input_diff( &self, - execution_receipt: &TransactionReceipt, - node: &T::Blockchain, + tx_hash: TxHash, + node: &dyn EthereumNode, ) -> anyhow::Result<(GethTrace, DiffMode)> { let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { diff_mode: Some(true), @@ -517,11 +517,11 @@ where }); let trace = node - .trace_transaction(execution_receipt, trace_options) + .trace_transaction(tx_hash, trace_options) .await .context("Failed to obtain geth prestate tracer output")?; let diff = node - .state_diff(execution_receipt) + .state_diff(tx_hash) .await .context("Failed to obtain state diff for transaction")?; @@ -533,7 +533,7 @@ where &mut self, metadata: &Metadata, balance_assertion: &BalanceAssertion, - node: &T::Blockchain, + node: &dyn EthereumNode, ) -> anyhow::Result<()> { let Some(instance) = balance_assertion .address @@ -562,11 +562,12 @@ where expected_balance: amount, .. }: &BalanceAssertion, - node: &T::Blockchain, + node: &dyn EthereumNode, ) -> anyhow::Result<()> { + let resolver = node.resolver().await?; let address = Address::from_slice( Calldata::new_compound([address_string]) - .calldata(node, self.default_resolution_context()) + .calldata(resolver.as_ref(), self.default_resolution_context()) .await? .get(12..32) .expect("Can't fail"), @@ -595,7 +596,7 @@ where &mut self, metadata: &Metadata, storage_empty_assertion: &StorageEmptyAssertion, - node: &T::Blockchain, + node: &dyn EthereumNode, ) -> anyhow::Result<()> { let Some(instance) = storage_empty_assertion .address @@ -624,11 +625,12 @@ where is_storage_empty, .. }: &StorageEmptyAssertion, - node: &T::Blockchain, + node: &dyn EthereumNode, ) -> anyhow::Result<()> { + let resolver = node.resolver().await?; let address = Address::from_slice( Calldata::new_compound([address_string]) - .calldata(node, self.default_resolution_context()) + .calldata(resolver.as_ref(), self.default_resolution_context()) .await? .get(12..32) .expect("Can't fail"), @@ -667,7 +669,7 @@ where deployer: Address, calldata: Option<&Calldata>, value: Option, - node: &T::Blockchain, + node: &dyn EthereumNode, ) -> anyhow::Result<(Address, JsonAbi, Option)> { if let Some((_, address, abi)) = self.deployed_contracts.get(contract_instance) { return Ok((*address, abi.clone(), None)); @@ -710,8 +712,9 @@ where }; if let Some(calldata) = calldata { + let resolver = node.resolver().await?; let calldata = calldata - .calldata(node, self.default_resolution_context()) + .calldata(resolver.as_ref(), self.default_resolution_context()) .await?; code.extend(calldata); } @@ -728,11 +731,7 @@ where let receipt = match node.execute_transaction(tx).await { Ok(receipt) => receipt, Err(error) => { - tracing::error!( - node = std::any::type_name::(), - ?error, - "Contract deployment transaction failed." - ); + tracing::error!(?error, "Contract deployment transaction failed."); return Err(error); } }; @@ -763,36 +762,23 @@ where } } -pub struct CaseDriver<'a, Leader: Platform, Follower: Platform> { +pub struct CaseDriver<'a> { metadata: &'a Metadata, case: &'a Case, - leader_node: &'a Leader::Blockchain, - follower_node: &'a Follower::Blockchain, - leader_state: CaseState, - follower_state: CaseState, + platform_state: Vec<(&'a dyn EthereumNode, PlatformIdentifier, CaseState)>, } -impl<'a, L, F> CaseDriver<'a, L, F> -where - L: Platform, - F: Platform, -{ +impl<'a> CaseDriver<'a> { #[allow(clippy::too_many_arguments)] pub fn new( metadata: &'a Metadata, case: &'a Case, - leader_node: &'a L::Blockchain, - follower_node: &'a F::Blockchain, - leader_state: CaseState, - follower_state: CaseState, - ) -> CaseDriver<'a, L, F> { + platform_state: Vec<(&'a dyn EthereumNode, PlatformIdentifier, CaseState)>, + ) -> CaseDriver<'a> { Self { metadata, case, - leader_node, - follower_node, - leader_state, - follower_state, + platform_state, } } @@ -805,42 +791,44 @@ where .enumerate() .map(|(idx, v)| (StepIdx::new(idx), v)) { - let (leader_step_output, follower_step_output) = try_join!( - self.leader_state - .handle_step(self.metadata, &step, self.leader_node) - .instrument(info_span!( - "Handling Step", - %step_idx, - target = "Leader", - )), - self.follower_state - .handle_step(self.metadata, &step, self.follower_node) - .instrument(info_span!( - "Handling Step", - %step_idx, - target = "Follower", - )) - )?; + // Run this step concurrently across all platforms; short-circuit on first failure + let metadata = self.metadata; + let step_futs = + self.platform_state + .iter_mut() + .map(|(node, platform_id, case_state)| { + let platform_id = *platform_id; + let node_ref = *node; + let step_clone = step.clone(); + let span = info_span!( + "Handling Step", + %step_idx, + platform = %platform_id, + ); + async move { + case_state + .handle_step(metadata, &step_clone, node_ref) + .await + .map_err(|e| (platform_id, e)) + } + .instrument(span) + }); - match (leader_step_output, follower_step_output) { - (StepOutput::FunctionCall(..), StepOutput::FunctionCall(..)) => { - // TODO: We need to actually work out how/if we will compare the diff between - // the leader and the follower. The diffs are almost guaranteed to be different - // from leader and follower and therefore without an actual strategy for this - // we have something that's guaranteed to fail. Even a simple call to some - // contract will produce two non-equal diffs because on the leader the contract - // has address X and on the follower it has address Y. On the leader contract X - // contains address A in the state and on the follower it contains address B. So - // this isn't exactly a straightforward thing to do and I'm not even sure that - // it's possible to do. Once we have an actual strategy for doing the diffs we - // will implement it here. Until then, this remains empty. + match try_join_all(step_futs).await { + Ok(_outputs) => { + // All platforms succeeded for this step + steps_executed += 1; + } + Err((platform_id, error)) => { + tracing::error!( + %step_idx, + platform = %platform_id, + ?error, + "Step failed on platform", + ); + return Err(error); } - (StepOutput::BalanceAssertion, StepOutput::BalanceAssertion) => {} - (StepOutput::StorageEmptyAssertion, StepOutput::StorageEmptyAssertion) => {} - _ => unreachable!("The two step outputs can not be of a different kind"), } - - steps_executed += 1; } Ok(steps_executed) diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index b729b42..8bc93e2 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -3,45 +3,370 @@ //! This crate defines the testing configuration and //! provides a helper utility to execute tests. -use revive_dt_compiler::{SolidityCompiler, revive_resolc, solc}; -use revive_dt_config::TestingPlatform; -use revive_dt_format::traits::ResolverApi; -use revive_dt_node::{Node, geth, kitchensink::KitchensinkNode}; +use std::{ + pin::Pin, + thread::{self, JoinHandle}, +}; + +use alloy::genesis::Genesis; +use anyhow::Context as _; +use revive_dt_common::types::*; +use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc}; +use revive_dt_config::*; +use revive_dt_node::{Node, geth::GethNode, substrate::SubstrateNode}; use revive_dt_node_interaction::EthereumNode; +use tracing::info; pub mod driver; -/// One platform can be tested differentially against another. -/// -/// For this we need a blockchain node implementation and a compiler. +/// A trait that describes the interface for the platforms that are supported by the tool. +#[allow(clippy::type_complexity)] pub trait Platform { - type Blockchain: EthereumNode + Node + ResolverApi; - type Compiler: SolidityCompiler; + /// Returns the identifier of this platform. This is a combination of the node and the compiler + /// used. + fn platform_identifier(&self) -> PlatformIdentifier; - /// Returns the matching [TestingPlatform] of the [revive_dt_config::Arguments]. - fn config_id() -> &'static TestingPlatform; + /// Returns a full identifier for the platform. + fn full_identifier(&self) -> (NodeIdentifier, VmIdentifier, CompilerIdentifier) { + ( + self.node_identifier(), + self.vm_identifier(), + self.compiler_identifier(), + ) + } + + /// Returns the identifier of the node used. + fn node_identifier(&self) -> NodeIdentifier; + + /// Returns the identifier of the vm used. + fn vm_identifier(&self) -> VmIdentifier; + + /// Returns the identifier of the compiler used. + fn compiler_identifier(&self) -> CompilerIdentifier; + + /// Creates a new node for the platform by spawning a new thread, creating the node object, + /// initializing it, spawning it, and waiting for it to start up. + fn new_node( + &self, + context: Context, + ) -> anyhow::Result>>>; + + /// Creates a new compiler for the provided platform + fn new_compiler( + &self, + context: Context, + version: Option, + ) -> Pin>>>>; } -#[derive(Default)] -pub struct Geth; +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] +pub struct GethEvmSolcPlatform; -impl Platform for Geth { - type Blockchain = geth::GethNode; - type Compiler = solc::Solc; +impl Platform for GethEvmSolcPlatform { + fn platform_identifier(&self) -> PlatformIdentifier { + PlatformIdentifier::GethEvmSolc + } - fn config_id() -> &'static TestingPlatform { - &TestingPlatform::Geth + fn node_identifier(&self) -> NodeIdentifier { + NodeIdentifier::Geth + } + + fn vm_identifier(&self) -> VmIdentifier { + VmIdentifier::Evm + } + + fn compiler_identifier(&self) -> CompilerIdentifier { + CompilerIdentifier::Solc + } + + fn new_node( + &self, + context: Context, + ) -> anyhow::Result>>> { + let genesis_configuration = AsRef::::as_ref(&context); + let genesis = genesis_configuration.genesis()?.clone(); + Ok(thread::spawn(move || { + let node = GethNode::new(context); + let node = spawn_node::(node, genesis)?; + Ok(Box::new(node) as Box<_>) + })) + } + + fn new_compiler( + &self, + context: Context, + version: Option, + ) -> Pin>>>> { + Box::pin(async move { + let compiler = Solc::new(context, version).await; + compiler.map(|compiler| Box::new(compiler) as Box) + }) } } -#[derive(Default)] -pub struct Kitchensink; +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] +pub struct KitchensinkPolkavmResolcPlatform; -impl Platform for Kitchensink { - type Blockchain = KitchensinkNode; - type Compiler = revive_resolc::Resolc; +impl Platform for KitchensinkPolkavmResolcPlatform { + fn platform_identifier(&self) -> PlatformIdentifier { + PlatformIdentifier::KitchensinkPolkavmResolc + } - fn config_id() -> &'static TestingPlatform { - &TestingPlatform::Kitchensink + fn node_identifier(&self) -> NodeIdentifier { + NodeIdentifier::Kitchensink + } + + fn vm_identifier(&self) -> VmIdentifier { + VmIdentifier::PolkaVM + } + + fn compiler_identifier(&self) -> CompilerIdentifier { + CompilerIdentifier::Resolc + } + + fn new_node( + &self, + context: Context, + ) -> anyhow::Result>>> { + let genesis_configuration = AsRef::::as_ref(&context); + let kitchensink_path = AsRef::::as_ref(&context) + .path + .clone(); + let genesis = genesis_configuration.genesis()?.clone(); + Ok(thread::spawn(move || { + let node = SubstrateNode::new( + kitchensink_path, + SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND, + context, + ); + let node = spawn_node(node, genesis)?; + Ok(Box::new(node) as Box<_>) + })) + } + + fn new_compiler( + &self, + context: Context, + version: Option, + ) -> Pin>>>> { + Box::pin(async move { + let compiler = Resolc::new(context, version).await; + compiler.map(|compiler| Box::new(compiler) as Box) + }) } } + +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] +pub struct KitchensinkRevmSolcPlatform; + +impl Platform for KitchensinkRevmSolcPlatform { + fn platform_identifier(&self) -> PlatformIdentifier { + PlatformIdentifier::KitchensinkRevmSolc + } + + fn node_identifier(&self) -> NodeIdentifier { + NodeIdentifier::Kitchensink + } + + fn vm_identifier(&self) -> VmIdentifier { + VmIdentifier::Evm + } + + fn compiler_identifier(&self) -> CompilerIdentifier { + CompilerIdentifier::Solc + } + + fn new_node( + &self, + context: Context, + ) -> anyhow::Result>>> { + let genesis_configuration = AsRef::::as_ref(&context); + let kitchensink_path = AsRef::::as_ref(&context) + .path + .clone(); + let genesis = genesis_configuration.genesis()?.clone(); + Ok(thread::spawn(move || { + let node = SubstrateNode::new( + kitchensink_path, + SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND, + context, + ); + let node = spawn_node(node, genesis)?; + Ok(Box::new(node) as Box<_>) + })) + } + + fn new_compiler( + &self, + context: Context, + version: Option, + ) -> Pin>>>> { + Box::pin(async move { + let compiler = Solc::new(context, version).await; + compiler.map(|compiler| Box::new(compiler) as Box) + }) + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] +pub struct ReviveDevNodePolkavmResolcPlatform; + +impl Platform for ReviveDevNodePolkavmResolcPlatform { + fn platform_identifier(&self) -> PlatformIdentifier { + PlatformIdentifier::ReviveDevNodePolkavmResolc + } + + fn node_identifier(&self) -> NodeIdentifier { + NodeIdentifier::ReviveDevNode + } + + fn vm_identifier(&self) -> VmIdentifier { + VmIdentifier::PolkaVM + } + + fn compiler_identifier(&self) -> CompilerIdentifier { + CompilerIdentifier::Resolc + } + + fn new_node( + &self, + context: Context, + ) -> anyhow::Result>>> { + let genesis_configuration = AsRef::::as_ref(&context); + let revive_dev_node_path = AsRef::::as_ref(&context) + .path + .clone(); + let genesis = genesis_configuration.genesis()?.clone(); + Ok(thread::spawn(move || { + let node = SubstrateNode::new( + revive_dev_node_path, + SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND, + context, + ); + let node = spawn_node(node, genesis)?; + Ok(Box::new(node) as Box<_>) + })) + } + + fn new_compiler( + &self, + context: Context, + version: Option, + ) -> Pin>>>> { + Box::pin(async move { + let compiler = Resolc::new(context, version).await; + compiler.map(|compiler| Box::new(compiler) as Box) + }) + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] +pub struct ReviveDevNodeRevmSolcPlatform; + +impl Platform for ReviveDevNodeRevmSolcPlatform { + fn platform_identifier(&self) -> PlatformIdentifier { + PlatformIdentifier::ReviveDevNodeRevmSolc + } + + fn node_identifier(&self) -> NodeIdentifier { + NodeIdentifier::ReviveDevNode + } + + fn vm_identifier(&self) -> VmIdentifier { + VmIdentifier::Evm + } + + fn compiler_identifier(&self) -> CompilerIdentifier { + CompilerIdentifier::Solc + } + + fn new_node( + &self, + context: Context, + ) -> anyhow::Result>>> { + let genesis_configuration = AsRef::::as_ref(&context); + let revive_dev_node_path = AsRef::::as_ref(&context) + .path + .clone(); + let genesis = genesis_configuration.genesis()?.clone(); + Ok(thread::spawn(move || { + let node = SubstrateNode::new( + revive_dev_node_path, + SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND, + context, + ); + let node = spawn_node(node, genesis)?; + Ok(Box::new(node) as Box<_>) + })) + } + + fn new_compiler( + &self, + context: Context, + version: Option, + ) -> Pin>>>> { + Box::pin(async move { + let compiler = Solc::new(context, version).await; + compiler.map(|compiler| Box::new(compiler) as Box) + }) + } +} + +impl From for Box { + fn from(value: PlatformIdentifier) -> Self { + match value { + PlatformIdentifier::GethEvmSolc => Box::new(GethEvmSolcPlatform) as Box<_>, + PlatformIdentifier::KitchensinkPolkavmResolc => { + Box::new(KitchensinkPolkavmResolcPlatform) as Box<_> + } + PlatformIdentifier::KitchensinkRevmSolc => { + Box::new(KitchensinkRevmSolcPlatform) as Box<_> + } + PlatformIdentifier::ReviveDevNodePolkavmResolc => { + Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_> + } + PlatformIdentifier::ReviveDevNodeRevmSolc => { + Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_> + } + } + } +} + +impl From for &dyn Platform { + fn from(value: PlatformIdentifier) -> Self { + match value { + PlatformIdentifier::GethEvmSolc => &GethEvmSolcPlatform as &dyn Platform, + PlatformIdentifier::KitchensinkPolkavmResolc => { + &KitchensinkPolkavmResolcPlatform as &dyn Platform + } + PlatformIdentifier::KitchensinkRevmSolc => { + &KitchensinkRevmSolcPlatform as &dyn Platform + } + PlatformIdentifier::ReviveDevNodePolkavmResolc => { + &ReviveDevNodePolkavmResolcPlatform as &dyn Platform + } + PlatformIdentifier::ReviveDevNodeRevmSolc => { + &ReviveDevNodeRevmSolcPlatform as &dyn Platform + } + } + } +} + +fn spawn_node( + mut node: T, + genesis: Genesis, +) -> anyhow::Result { + info!( + id = node.id(), + connection_string = node.connection_string(), + "Spawning node" + ); + node.spawn(genesis) + .context("Failed to spawn node process")?; + info!( + id = node.id(), + connection_string = node.connection_string(), + "Spawned node" + ); + Ok(node) +} diff --git a/crates/core/src/main.rs b/crates/core/src/main.rs index 99f292f..cb2574c 100644 --- a/crates/core/src/main.rs +++ b/crates/core/src/main.rs @@ -1,8 +1,9 @@ mod cached_compiler; +mod pool; use std::{ borrow::Cow, - collections::{BTreeMap, HashMap}, + collections::{BTreeSet, HashMap}, io::{BufWriter, Write, stderr}, path::Path, sync::Arc, @@ -20,20 +21,19 @@ use futures::{Stream, StreamExt}; use indexmap::{IndexMap, indexmap}; use revive_dt_node_interaction::EthereumNode; use revive_dt_report::{ - NodeDesignation, ReportAggregator, Reporter, ReporterEvent, TestCaseStatus, + ExecutionSpecificReporter, ReportAggregator, Reporter, ReporterEvent, TestCaseStatus, TestSpecificReporter, TestSpecifier, }; use schemars::schema_for; use serde_json::{Value, json}; -use tokio::try_join; use tracing::{debug, error, info, info_span, instrument}; use tracing_subscriber::{EnvFilter, FmtSubscriber}; use revive_dt_common::{iterators::EitherIter, types::Mode}; -use revive_dt_compiler::{CompilerOutput, SolidityCompiler}; +use revive_dt_compiler::SolidityCompiler; use revive_dt_config::{Context, *}; use revive_dt_core::{ - Geth, Kitchensink, Platform, + Platform, driver::{CaseDriver, CaseState}, }; use revive_dt_format::{ @@ -43,9 +43,9 @@ use revive_dt_format::{ metadata::{ContractPathAndIdent, Metadata, MetadataFile}, mode::ParsedMode, }; -use revive_dt_node::{Node, pool::NodePool}; use crate::cached_compiler::CachedCompiler; +use crate::pool::NodePool; fn main() -> anyhow::Result<()> { let (writer, _guard) = tracing_appender::non_blocking::NonBlockingBuilder::default() @@ -112,7 +112,7 @@ fn main() -> anyhow::Result<()> { #[instrument(level = "debug", name = "Collecting Corpora", skip_all)] fn collect_corpora( - context: &ExecutionContext, + context: &TestExecutionContext, ) -> anyhow::Result>> { let mut corpora = HashMap::new(); @@ -133,32 +133,35 @@ fn collect_corpora( Ok(corpora) } -async fn run_driver( - context: ExecutionContext, +async fn run_driver( + context: TestExecutionContext, metadata_files: &[MetadataFile], reporter: Reporter, report_aggregator_task: impl Future>, -) -> anyhow::Result<()> -where - L: Platform, - F: Platform, - L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, - F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, -{ - let leader_nodes = NodePool::::new(context.clone()) - .context("Failed to initialize leader node pool")?; - let follower_nodes = NodePool::::new(context.clone()) - .context("Failed to initialize follower node pool")?; + platforms: Vec<&dyn Platform>, +) -> anyhow::Result<()> { + let mut nodes = Vec::<(&dyn Platform, NodePool)>::new(); + for platform in platforms.into_iter() { + let pool = NodePool::new(Context::ExecuteTests(Box::new(context.clone())), platform) + .inspect_err(|err| { + error!( + ?err, + platform_identifier = %platform.platform_identifier(), + "Failed to initialize the node pool for the platform." + ) + }) + .context("Failed to initialize the node pool")?; + nodes.push((platform, pool)); + } let tests_stream = tests_stream( &context, metadata_files.iter(), - &leader_nodes, - &follower_nodes, + nodes.as_slice(), reporter.clone(), ) .await; - let driver_task = start_driver_task::(&context, tests_stream) + let driver_task = start_driver_task(&context, tests_stream) .await .context("Failed to start driver task")?; let cli_reporting_task = start_cli_reporting_task(reporter); @@ -169,19 +172,12 @@ where Ok(()) } -async fn tests_stream<'a, L, F>( - args: &ExecutionContext, +async fn tests_stream<'a>( + args: &TestExecutionContext, metadata_files: impl IntoIterator + Clone, - leader_node_pool: &'a NodePool, - follower_node_pool: &'a NodePool, + nodes: &'a [(&dyn Platform, NodePool)], reporter: Reporter, -) -> impl Stream> -where - L: Platform, - F: Platform, - L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, - F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, -{ +) -> impl Stream> { let tests = metadata_files .into_iter() .flat_map(|metadata_file| { @@ -231,35 +227,36 @@ where stream::iter(tests.into_iter()) .filter_map( move |(metadata_file, case_idx, case, mode, reporter)| async move { - let leader_compiler = ::new( - args, - mode.version.clone().map(Into::into), - ) - .await - .inspect_err(|err| error!(?err, "Failed to instantiate the leader compiler")) - .ok()?; + let mut platforms = Vec::new(); + for (platform, node_pool) in nodes.iter() { + let node = node_pool.round_robbin(); + let compiler = platform + .new_compiler( + Context::ExecuteTests(Box::new(args.clone())), + mode.version.clone().map(Into::into), + ) + .await + .inspect_err(|err| { + error!( + ?err, + platform_identifier = %platform.platform_identifier(), + "Failed to instantiate the compiler" + ) + }) + .ok()?; - let follower_compiler = ::new( - args, - mode.version.clone().map(Into::into), - ) - .await - .inspect_err(|err| error!(?err, "Failed to instantiate the follower compiler")) - .ok()?; + let reporter = reporter + .execution_specific_reporter(node.id(), platform.platform_identifier()); + platforms.push((*platform, node, compiler, reporter)); + } - let leader_node = leader_node_pool.round_robbin(); - let follower_node = follower_node_pool.round_robbin(); - - Some(Test:: { + Some(Test { metadata: metadata_file, metadata_file_path: metadata_file.metadata_file_path.as_path(), mode: mode.clone(), case_idx: CaseIdx::new(case_idx), case, - leader_node, - follower_node, - leader_compiler, - follower_compiler, + platforms, reporter, }) }, @@ -293,18 +290,10 @@ where }) } -async fn start_driver_task<'a, L, F>( - context: &ExecutionContext, - tests: impl Stream>, -) -> anyhow::Result> -where - L: Platform, - F: Platform, - L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, - F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, - L::Compiler: 'a, - F::Compiler: 'a, -{ +async fn start_driver_task<'a>( + context: &TestExecutionContext, + tests: impl Stream>, +) -> anyhow::Result> { info!("Starting driver task"); let cached_compiler = Arc::new( @@ -327,23 +316,18 @@ where let cached_compiler = cached_compiler.clone(); async move { - test.reporter - .report_leader_node_assigned_event( - test.leader_node.id(), - *L::config_id(), - test.leader_node.connection_string(), - ) - .expect("Can't fail"); - test.reporter - .report_follower_node_assigned_event( - test.follower_node.id(), - *F::config_id(), - test.follower_node.connection_string(), - ) - .expect("Can't fail"); + for (platform, node, _, _) in test.platforms.iter() { + test.reporter + .report_node_assigned_event( + node.id(), + platform.platform_identifier(), + node.connection_string(), + ) + .expect("Can't fail"); + } let reporter = test.reporter.clone(); - let result = handle_case_driver::(test, cached_compiler).await; + let result = handle_case_driver(&test, cached_compiler).await; match result { Ok(steps_executed) => reporter @@ -449,230 +433,174 @@ async fn start_cli_reporting_task(reporter: Reporter) { mode = %test.mode, case_idx = %test.case_idx, case_name = test.case.name.as_deref().unwrap_or("Unnamed Case"), - leader_node = test.leader_node.id(), - follower_node = test.follower_node.id(), ) )] -async fn handle_case_driver<'a, L, F>( - test: Test<'a, L, F>, +async fn handle_case_driver<'a>( + test: &Test<'a>, cached_compiler: Arc>, -) -> anyhow::Result -where - L: Platform, - F: Platform, - L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, - F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, - L::Compiler: 'a, - F::Compiler: 'a, -{ - let leader_reporter = test - .reporter - .execution_specific_reporter(test.leader_node.id(), NodeDesignation::Leader); - let follower_reporter = test - .reporter - .execution_specific_reporter(test.follower_node.id(), NodeDesignation::Follower); +) -> anyhow::Result { + let platform_state = stream::iter(test.platforms.iter()) + // Compiling the pre-link contracts. + .filter_map(|(platform, node, compiler, reporter)| { + let cached_compiler = cached_compiler.clone(); - let ( - CompilerOutput { - contracts: leader_pre_link_contracts, - }, - CompilerOutput { - contracts: follower_pre_link_contracts, - }, - ) = try_join!( - cached_compiler.compile_contracts::( - test.metadata, - test.metadata_file_path, - test.mode.clone(), - None, - &test.leader_compiler, - &leader_reporter, - ), - cached_compiler.compile_contracts::( - test.metadata, - test.metadata_file_path, - test.mode.clone(), - None, - &test.follower_compiler, - &follower_reporter - ) - ) - .context("Failed to compile pre-link contracts for leader/follower in parallel")?; - - let mut leader_deployed_libraries = None::>; - let mut follower_deployed_libraries = None::>; - let mut contract_sources = test - .metadata - .contract_sources() - .context("Failed to retrieve contract sources from metadata")?; - for library_instance in test - .metadata - .libraries - .iter() - .flatten() - .flat_map(|(_, map)| map.values()) - { - debug!(%library_instance, "Deploying Library Instance"); - - let ContractPathAndIdent { - contract_source_path: library_source_path, - contract_ident: library_ident, - } = contract_sources - .remove(library_instance) - .context("Failed to find the contract source")?; - - let (leader_code, leader_abi) = leader_pre_link_contracts - .get(&library_source_path) - .and_then(|contracts| contracts.get(library_ident.as_str())) - .context("Declared library was not compiled")?; - let (follower_code, follower_abi) = follower_pre_link_contracts - .get(&library_source_path) - .and_then(|contracts| contracts.get(library_ident.as_str())) - .context("Declared library was not compiled")?; - - let leader_code = match alloy::hex::decode(leader_code) { - Ok(code) => code, - Err(error) => { - anyhow::bail!("Failed to hex-decode the byte code {}", error) + async move { + let compiler_output = cached_compiler + .compile_contracts( + test.metadata, + test.metadata_file_path, + test.mode.clone(), + None, + compiler.as_ref(), + *platform, + reporter, + ) + .await + .inspect_err(|err| { + error!( + ?err, + platform_identifier = %platform.platform_identifier(), + "Pre-linking compilation failed" + ) + }) + .ok()?; + Some((test, platform, node, compiler, reporter, compiler_output)) } - }; - let follower_code = match alloy::hex::decode(follower_code) { - Ok(code) => code, - Err(error) => { - anyhow::bail!("Failed to hex-decode the byte code {}", error) - } - }; + }) + // Deploying the libraries for the platform. + .filter_map( + |(test, platform, node, compiler, reporter, compiler_output)| async move { + let mut deployed_libraries = None::>; + let mut contract_sources = test + .metadata + .contract_sources() + .inspect_err(|err| { + error!( + ?err, + platform_identifier = %platform.platform_identifier(), + "Failed to retrieve contract sources from metadata" + ) + }) + .ok()?; + for library_instance in test + .metadata + .libraries + .iter() + .flatten() + .flat_map(|(_, map)| map.values()) + { + debug!(%library_instance, "Deploying Library Instance"); - // Getting the deployer address from the cases themselves. This is to ensure that we're - // doing the deployments from different accounts and therefore we're not slowed down by - // the nonce. - let deployer_address = test - .case - .steps - .iter() - .filter_map(|step| match step { - Step::FunctionCall(input) => Some(input.caller), - Step::BalanceAssertion(..) => None, - Step::StorageEmptyAssertion(..) => None, - }) - .next() - .unwrap_or(Input::default_caller()); - let leader_tx = TransactionBuilder::::with_deploy_code( - TransactionRequest::default().from(deployer_address), - leader_code, - ); - let follower_tx = TransactionBuilder::::with_deploy_code( - TransactionRequest::default().from(deployer_address), - follower_code, - ); + let ContractPathAndIdent { + contract_source_path: library_source_path, + contract_ident: library_ident, + } = contract_sources.remove(library_instance)?; - let (leader_receipt, follower_receipt) = try_join!( - test.leader_node.execute_transaction(leader_tx), - test.follower_node.execute_transaction(follower_tx) - )?; + let (code, abi) = compiler_output + .contracts + .get(&library_source_path) + .and_then(|contracts| contracts.get(library_ident.as_str()))?; - debug!( - ?library_instance, - library_address = ?leader_receipt.contract_address, - "Deployed library to leader" - ); - debug!( - ?library_instance, - library_address = ?follower_receipt.contract_address, - "Deployed library to follower" - ); + let code = alloy::hex::decode(code).ok()?; - let leader_library_address = leader_receipt - .contract_address - .context("Contract deployment didn't return an address")?; - let follower_library_address = follower_receipt - .contract_address - .context("Contract deployment didn't return an address")?; + // Getting the deployer address from the cases themselves. This is to ensure + // that we're doing the deployments from different accounts and therefore we're + // not slowed down by the nonce. + let deployer_address = test + .case + .steps + .iter() + .filter_map(|step| match step { + Step::FunctionCall(input) => Some(input.caller), + Step::BalanceAssertion(..) => None, + Step::StorageEmptyAssertion(..) => None, + }) + .next() + .unwrap_or(Input::default_caller()); + let tx = TransactionBuilder::::with_deploy_code( + TransactionRequest::default().from(deployer_address), + code, + ); + let receipt = node + .execute_transaction(tx) + .await + .inspect_err(|err| { + error!( + ?err, + %library_instance, + platform_identifier = %platform.platform_identifier(), + "Failed to deploy the library" + ) + }) + .ok()?; - leader_deployed_libraries.get_or_insert_default().insert( - library_instance.clone(), - ( - library_ident.clone(), - leader_library_address, - leader_abi.clone(), - ), - ); - follower_deployed_libraries.get_or_insert_default().insert( - library_instance.clone(), - ( - library_ident, - follower_library_address, - follower_abi.clone(), - ), - ); - } - if let Some(ref leader_deployed_libraries) = leader_deployed_libraries { - leader_reporter.report_libraries_deployed_event( - leader_deployed_libraries - .clone() - .into_iter() - .map(|(key, (_, address, _))| (key, address)) - .collect::>(), - )?; - } - if let Some(ref follower_deployed_libraries) = follower_deployed_libraries { - follower_reporter.report_libraries_deployed_event( - follower_deployed_libraries - .clone() - .into_iter() - .map(|(key, (_, address, _))| (key, address)) - .collect::>(), - )?; - } + debug!( + ?library_instance, + platform_identifier = %platform.platform_identifier(), + "Deployed library" + ); - let ( - CompilerOutput { - contracts: leader_post_link_contracts, - }, - CompilerOutput { - contracts: follower_post_link_contracts, - }, - ) = try_join!( - cached_compiler.compile_contracts::( - test.metadata, - test.metadata_file_path, - test.mode.clone(), - leader_deployed_libraries.as_ref(), - &test.leader_compiler, - &leader_reporter, - ), - cached_compiler.compile_contracts::( - test.metadata, - test.metadata_file_path, - test.mode.clone(), - follower_deployed_libraries.as_ref(), - &test.follower_compiler, - &follower_reporter + let library_address = receipt.contract_address?; + + deployed_libraries.get_or_insert_default().insert( + library_instance.clone(), + (library_ident.clone(), library_address, abi.clone()), + ); + } + + Some(( + test, + platform, + node, + compiler, + reporter, + compiler_output, + deployed_libraries, + )) + }, ) - ) - .context("Failed to compile post-link contracts for leader/follower in parallel")?; + // Compiling the post-link contracts. + .filter_map( + |(test, platform, node, compiler, reporter, _, deployed_libraries)| { + let cached_compiler = cached_compiler.clone(); - let leader_state = CaseState::::new( - test.leader_compiler.version().clone(), - leader_post_link_contracts, - leader_deployed_libraries.unwrap_or_default(), - leader_reporter, - ); - let follower_state = CaseState::::new( - test.follower_compiler.version().clone(), - follower_post_link_contracts, - follower_deployed_libraries.unwrap_or_default(), - follower_reporter, - ); + async move { + let compiler_output = cached_compiler + .compile_contracts( + test.metadata, + test.metadata_file_path, + test.mode.clone(), + deployed_libraries.as_ref(), + compiler.as_ref(), + *platform, + reporter, + ) + .await + .inspect_err(|err| { + error!( + ?err, + platform_identifier = %platform.platform_identifier(), + "Pre-linking compilation failed" + ) + }) + .ok()?; - let mut driver = CaseDriver::::new( - test.metadata, - test.case, - test.leader_node, - test.follower_node, - leader_state, - follower_state, - ); + let case_state = CaseState::new( + compiler.version().clone(), + compiler_output.contracts, + deployed_libraries.unwrap_or_default(), + reporter.clone(), + ); + + Some((*node, platform.platform_identifier(), case_state)) + } + }, + ) + // Collect + .collect::>() + .await; + + let mut driver = CaseDriver::new(test.metadata, test.case, platform_state); driver .execute() .await @@ -680,41 +608,43 @@ where } async fn execute_corpus( - context: ExecutionContext, + context: TestExecutionContext, tests: &[MetadataFile], reporter: Reporter, report_aggregator_task: impl Future>, ) -> anyhow::Result<()> { - match (&context.leader, &context.follower) { - (TestingPlatform::Geth, TestingPlatform::Kitchensink) => { - run_driver::(context, tests, reporter, report_aggregator_task) - .await? - } - (TestingPlatform::Geth, TestingPlatform::Geth) => { - run_driver::(context, tests, reporter, report_aggregator_task).await? - } - _ => unimplemented!(), - } + let platforms = context + .platforms + .iter() + .copied() + .collect::>() + .into_iter() + .map(Into::<&dyn Platform>::into) + .collect::>(); + + run_driver(context, tests, reporter, report_aggregator_task, platforms).await?; Ok(()) } /// this represents a single "test"; a mode, path and collection of cases. -#[derive(Clone)] -struct Test<'a, L: Platform, F: Platform> { +#[allow(clippy::type_complexity)] +struct Test<'a> { metadata: &'a MetadataFile, metadata_file_path: &'a Path, mode: Cow<'a, Mode>, case_idx: CaseIdx, case: &'a Case, - leader_node: &'a ::Blockchain, - follower_node: &'a ::Blockchain, - leader_compiler: L::Compiler, - follower_compiler: F::Compiler, + platforms: Vec<( + &'a dyn Platform, + &'a dyn EthereumNode, + Box, + ExecutionSpecificReporter, + )>, reporter: TestSpecificReporter, } -impl<'a, L: Platform, F: Platform> Test<'a, L, F> { +impl<'a> Test<'a> { /// Checks if this test can be ran with the current configuration. pub fn check_compatibility(&self) -> TestCheckFunctionResult { self.check_metadata_file_ignored()?; @@ -743,74 +673,94 @@ impl<'a, L: Platform, F: Platform> Test<'a, L, F> { } } - /// Checks if the leader and the follower both support the desired targets in the metadata file. + /// Checks if the platforms all support the desired targets in the metadata file. fn check_target_compatibility(&self) -> TestCheckFunctionResult { - let leader_support = - ::matches_target(self.metadata.targets.as_deref()); - let follower_support = - ::matches_target(self.metadata.targets.as_deref()); - let is_allowed = leader_support && follower_support; + let mut error_map = indexmap! { + "test_desired_targets" => json!(self.metadata.targets.as_ref()), + }; + let mut is_allowed = true; + for (platform, ..) in self.platforms.iter() { + let is_allowed_for_platform = match self.metadata.targets.as_ref() { + None => true, + Some(targets) => { + let mut target_matches = false; + for target in targets.iter() { + if &platform.vm_identifier() == target { + target_matches = true; + break; + } + } + target_matches + } + }; + is_allowed &= is_allowed_for_platform; + error_map.insert( + platform.platform_identifier().into(), + json!(is_allowed_for_platform), + ); + } if is_allowed { Ok(()) } else { Err(( - "Either the leader or the follower do not support the target desired by the test.", - indexmap! { - "test_desired_targets" => json!(self.metadata.targets.as_ref()), - "leader_support" => json!(leader_support), - "follower_support" => json!(follower_support), - }, + "One of the platforms do do not support the targets allowed by the test.", + error_map, )) } } - // Checks for the compatibility of the EVM version with the leader and follower nodes. + // Checks for the compatibility of the EVM version with the platforms specified. fn check_evm_version_compatibility(&self) -> TestCheckFunctionResult { let Some(evm_version_requirement) = self.metadata.required_evm_version else { return Ok(()); }; - let leader_support = evm_version_requirement - .matches(&::evm_version()); - let follower_support = evm_version_requirement - .matches(&::evm_version()); - let is_allowed = leader_support && follower_support; + let mut error_map = indexmap! { + "test_desired_evm_version" => json!(self.metadata.required_evm_version), + }; + let mut is_allowed = true; + for (platform, node, ..) in self.platforms.iter() { + let is_allowed_for_platform = evm_version_requirement.matches(&node.evm_version()); + is_allowed &= is_allowed_for_platform; + error_map.insert( + platform.platform_identifier().into(), + json!(is_allowed_for_platform), + ); + } if is_allowed { Ok(()) } else { Err(( - "EVM version is incompatible with either the leader or the follower.", - indexmap! { - "test_desired_evm_version" => json!(self.metadata.required_evm_version), - "leader_support" => json!(leader_support), - "follower_support" => json!(follower_support), - }, + "EVM version is incompatible for the platforms specified", + error_map, )) } } - /// Checks if the leader and follower compilers support the mode that the test is for. + /// Checks if the platforms compilers support the mode that the test is for. fn check_compiler_compatibility(&self) -> TestCheckFunctionResult { - let leader_support = self - .leader_compiler - .supports_mode(self.mode.optimize_setting, self.mode.pipeline); - let follower_support = self - .follower_compiler - .supports_mode(self.mode.optimize_setting, self.mode.pipeline); - let is_allowed = leader_support && follower_support; + let mut error_map = indexmap! { + "test_desired_evm_version" => json!(self.metadata.required_evm_version), + }; + let mut is_allowed = true; + for (platform, _, compiler, ..) in self.platforms.iter() { + let is_allowed_for_platform = + compiler.supports_mode(self.mode.optimize_setting, self.mode.pipeline); + is_allowed &= is_allowed_for_platform; + error_map.insert( + platform.platform_identifier().into(), + json!(is_allowed_for_platform), + ); + } if is_allowed { Ok(()) } else { Err(( - "Compilers do not support this mode either for the leader or for the follower.", - indexmap! { - "mode" => json!(self.mode), - "leader_support" => json!(leader_support), - "follower_support" => json!(follower_support), - }, + "Compilers do not support this mode either for the provided platforms.", + error_map, )) } } diff --git a/crates/core/src/pool.rs b/crates/core/src/pool.rs new file mode 100644 index 0000000..06fb2be --- /dev/null +++ b/crates/core/src/pool.rs @@ -0,0 +1,52 @@ +//! This crate implements concurrent handling of testing node. + +use std::sync::atomic::{AtomicUsize, Ordering}; + +use anyhow::Context as _; +use revive_dt_config::*; +use revive_dt_core::Platform; +use revive_dt_node_interaction::EthereumNode; + +/// The node pool starts one or more [Node] which then can be accessed +/// in a round robbin fashion. +pub struct NodePool { + next: AtomicUsize, + nodes: Vec>, +} + +impl NodePool { + /// Create a new Pool. This will start as many nodes as there are workers in `config`. + pub fn new(context: Context, platform: &dyn Platform) -> anyhow::Result { + let concurrency_configuration = AsRef::::as_ref(&context); + let nodes = concurrency_configuration.number_of_nodes; + + let mut handles = Vec::with_capacity(nodes); + for _ in 0..nodes { + let context = context.clone(); + handles.push(platform.new_node(context)?); + } + + let mut nodes = Vec::with_capacity(nodes); + for handle in handles { + nodes.push( + handle + .join() + .map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error)) + .context("Failed to join node spawn thread")? + .map_err(|error| anyhow::anyhow!("node failed to spawn: {error}")) + .context("Node failed to spawn")?, + ); + } + + Ok(Self { + nodes, + next: Default::default(), + }) + } + + /// Get a handle to the next node. + pub fn round_robbin(&self) -> &dyn EthereumNode { + let current = self.next.fetch_add(1, Ordering::SeqCst) % self.nodes.len(); + self.nodes.get(current).unwrap().as_ref() + } +} diff --git a/crates/format/src/input.rs b/crates/format/src/input.rs index d57f18e..bf187e2 100644 --- a/crates/format/src/input.rs +++ b/crates/format/src/input.rs @@ -308,7 +308,7 @@ impl Input { pub async fn encoded_input( &self, - resolver: &impl ResolverApi, + resolver: &(impl ResolverApi + ?Sized), context: ResolutionContext<'_>, ) -> anyhow::Result { match self.method { @@ -377,7 +377,7 @@ impl Input { /// Parse this input into a legacy transaction. pub async fn legacy_transaction( &self, - resolver: &impl ResolverApi, + resolver: &(impl ResolverApi + ?Sized), context: ResolutionContext<'_>, ) -> anyhow::Result { let input_data = self @@ -466,7 +466,7 @@ impl Calldata { pub async fn calldata( &self, - resolver: &impl ResolverApi, + resolver: &(impl ResolverApi + ?Sized), context: ResolutionContext<'_>, ) -> anyhow::Result> { let mut buffer = Vec::::with_capacity(self.size_requirement()); @@ -478,7 +478,7 @@ impl Calldata { pub async fn calldata_into_slice( &self, buffer: &mut Vec, - resolver: &impl ResolverApi, + resolver: &(impl ResolverApi + ?Sized), context: ResolutionContext<'_>, ) -> anyhow::Result<()> { match self { @@ -515,7 +515,7 @@ impl Calldata { pub async fn is_equivalent( &self, other: &[u8], - resolver: &impl ResolverApi, + resolver: &(impl ResolverApi + ?Sized), context: ResolutionContext<'_>, ) -> anyhow::Result { match self { @@ -557,7 +557,7 @@ impl CalldataItem { #[instrument(level = "info", skip_all, err)] async fn resolve( &self, - resolver: &impl ResolverApi, + resolver: &(impl ResolverApi + ?Sized), context: ResolutionContext<'_>, ) -> anyhow::Result { let mut stack = Vec::>::new(); @@ -662,7 +662,7 @@ impl> CalldataToken { /// https://github.com/matter-labs/era-compiler-tester/blob/0ed598a27f6eceee7008deab3ff2311075a2ec69/compiler_tester/src/test/case/input/value.rs#L43-L146 async fn resolve( self, - resolver: &impl ResolverApi, + resolver: &(impl ResolverApi + ?Sized), context: ResolutionContext<'_>, ) -> anyhow::Result> { match self { @@ -695,7 +695,7 @@ impl> CalldataToken { context .transaction_hash() .context("No transaction hash provided to get the transaction gas price") - .map(|tx_hash| resolver.transaction_gas_price(tx_hash))? + .map(|tx_hash| resolver.transaction_gas_price(*tx_hash))? .await .map(U256::from) } else if item == Self::GAS_LIMIT_VARIABLE { @@ -799,7 +799,7 @@ mod tests { use alloy::{eips::BlockNumberOrTag, json_abi::JsonAbi}; use alloy_primitives::{BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, address}; use alloy_sol_types::SolValue; - use std::collections::HashMap; + use std::{collections::HashMap, pin::Pin}; use super::*; use crate::metadata::ContractIdent; @@ -807,40 +807,63 @@ mod tests { struct MockResolver; impl ResolverApi for MockResolver { - async fn chain_id(&self) -> anyhow::Result { - Ok(0x123) + fn chain_id(&self) -> Pin> + '_>> { + Box::pin(async move { Ok(0x123) }) } - async fn block_gas_limit(&self, _: BlockNumberOrTag) -> anyhow::Result { - Ok(0x1234) + fn block_gas_limit( + &self, + _: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { Ok(0x1234) }) } - async fn block_coinbase(&self, _: BlockNumberOrTag) -> anyhow::Result

{ - Ok(Address::ZERO) + fn block_coinbase( + &self, + _: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { Ok(Address::ZERO) }) } - async fn block_difficulty(&self, _: BlockNumberOrTag) -> anyhow::Result { - Ok(U256::from(0x12345u128)) + fn block_difficulty( + &self, + _: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { Ok(U256::from(0x12345u128)) }) } - async fn block_base_fee(&self, _: BlockNumberOrTag) -> anyhow::Result { - Ok(0x100) + fn block_base_fee( + &self, + _: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { Ok(0x100) }) } - async fn block_hash(&self, _: BlockNumberOrTag) -> anyhow::Result { - Ok([0xEE; 32].into()) + fn block_hash( + &self, + _: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { Ok([0xEE; 32].into()) }) } - async fn block_timestamp(&self, _: BlockNumberOrTag) -> anyhow::Result { - Ok(0x123456) + fn block_timestamp( + &self, + _: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { Ok(0x123456) }) } - async fn last_block_number(&self) -> anyhow::Result { - Ok(0x1234567) + fn last_block_number( + &self, + ) -> Pin> + '_>> { + Box::pin(async move { Ok(0x1234567) }) } - async fn transaction_gas_price(&self, _: &TxHash) -> anyhow::Result { - Ok(0x200) + fn transaction_gas_price( + &self, + _: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { Ok(0x200) }) } } @@ -987,7 +1010,7 @@ mod tests { async fn resolve_calldata_item( input: &str, deployed_contracts: &HashMap, - resolver: &impl ResolverApi, + resolver: &(impl ResolverApi + ?Sized), ) -> anyhow::Result { let context = ResolutionContext::default().with_deployed_contracts(deployed_contracts); CalldataItem::new(input).resolve(resolver, context).await diff --git a/crates/format/src/metadata.rs b/crates/format/src/metadata.rs index 589aa5c..7632866 100644 --- a/crates/format/src/metadata.rs +++ b/crates/format/src/metadata.rs @@ -13,8 +13,10 @@ use serde::{Deserialize, Serialize}; use revive_common::EVMVersion; use revive_dt_common::{ - cached_fs::read_to_string, iterators::FilesWithExtensionIterator, macros::define_wrapper_type, - types::Mode, + cached_fs::read_to_string, + iterators::FilesWithExtensionIterator, + macros::define_wrapper_type, + types::{Mode, VmIdentifier}, }; use tracing::error; @@ -81,7 +83,7 @@ pub struct Metadata { /// example, if we wish for the metadata file's cases to only be run on PolkaVM then we'd /// specify a target of "PolkaVM" in here. #[serde(skip_serializing_if = "Option::is_none")] - pub targets: Option>, + pub targets: Option>, /// A vector of the test cases and workloads contained within the metadata file. This is their /// primary description. diff --git a/crates/format/src/traits.rs b/crates/format/src/traits.rs index 1ad6cc3..ea6e888 100644 --- a/crates/format/src/traits.rs +++ b/crates/format/src/traits.rs @@ -1,4 +1,5 @@ use std::collections::HashMap; +use std::pin::Pin; use alloy::eips::BlockNumberOrTag; use alloy::json_abi::JsonAbi; @@ -12,36 +13,54 @@ use crate::metadata::{ContractIdent, ContractInstance}; /// crate implements to go from string calldata and into the bytes calldata. pub trait ResolverApi { /// Returns the ID of the chain that the node is on. - fn chain_id(&self) -> impl Future>; + fn chain_id(&self) -> Pin> + '_>>; /// Returns the gas price for the specified transaction. - fn transaction_gas_price(&self, tx_hash: &TxHash) -> impl Future>; + fn transaction_gas_price( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>>; - // TODO: This is currently a u128 due to Kitchensink needing more than 64 bits for its gas limit + // TODO: This is currently a u128 due to substrate needing more than 64 bits for its gas limit // when we implement the changes to the gas we need to adjust this to be a u64. /// Returns the gas limit of the specified block. - fn block_gas_limit(&self, number: BlockNumberOrTag) -> impl Future>; + fn block_gas_limit( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>>; /// Returns the coinbase of the specified block. - fn block_coinbase(&self, number: BlockNumberOrTag) -> impl Future>; + fn block_coinbase( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>>; /// Returns the difficulty of the specified block. - fn block_difficulty(&self, number: BlockNumberOrTag) -> impl Future>; + fn block_difficulty( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>>; /// Returns the base fee of the specified block. - fn block_base_fee(&self, number: BlockNumberOrTag) -> impl Future>; + fn block_base_fee( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>>; /// Returns the hash of the specified block. - fn block_hash(&self, number: BlockNumberOrTag) -> impl Future>; + fn block_hash( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>>; /// Returns the timestamp of the specified block, fn block_timestamp( &self, number: BlockNumberOrTag, - ) -> impl Future>; + ) -> Pin> + '_>>; /// Returns the number of the last block. - fn last_block_number(&self) -> impl Future>; + fn last_block_number(&self) -> Pin> + '_>>; } #[derive(Clone, Copy, Debug, Default)] diff --git a/crates/node-interaction/Cargo.toml b/crates/node-interaction/Cargo.toml index c5c002e..361a109 100644 --- a/crates/node-interaction/Cargo.toml +++ b/crates/node-interaction/Cargo.toml @@ -9,6 +9,10 @@ repository.workspace = true rust-version.workspace = true [dependencies] +revive-common = { workspace = true } + +revive-dt-format = { workspace = true } + alloy = { workspace = true } anyhow = { workspace = true } diff --git a/crates/node-interaction/src/lib.rs b/crates/node-interaction/src/lib.rs index a6e3b38..57b0b67 100644 --- a/crates/node-interaction/src/lib.rs +++ b/crates/node-interaction/src/lib.rs @@ -1,35 +1,53 @@ //! This crate implements all node interactions. -use alloy::primitives::{Address, StorageKey, U256}; +use std::pin::Pin; +use std::sync::Arc; + +use alloy::primitives::{Address, StorageKey, TxHash, U256}; use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace}; use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest}; use anyhow::Result; +use revive_common::EVMVersion; +use revive_dt_format::traits::ResolverApi; + /// An interface for all interactions with Ethereum compatible nodes. +#[allow(clippy::type_complexity)] pub trait EthereumNode { + fn id(&self) -> usize; + + /// Returns the nodes connection string. + fn connection_string(&self) -> &str; + /// Execute the [TransactionRequest] and return a [TransactionReceipt]. fn execute_transaction( &self, transaction: TransactionRequest, - ) -> impl Future>; + ) -> Pin> + '_>>; /// Trace the transaction in the [TransactionReceipt] and return a [GethTrace]. fn trace_transaction( &self, - receipt: &TransactionReceipt, + tx_hash: TxHash, trace_options: GethDebugTracingOptions, - ) -> impl Future>; + ) -> Pin> + '_>>; /// Returns the state diff of the transaction hash in the [TransactionReceipt]. - fn state_diff(&self, receipt: &TransactionReceipt) -> impl Future>; + fn state_diff(&self, tx_hash: TxHash) -> Pin> + '_>>; /// Returns the balance of the provided [`Address`] back. - fn balance_of(&self, address: Address) -> impl Future>; + fn balance_of(&self, address: Address) -> Pin> + '_>>; /// Returns the latest storage proof of the provided [`Address`] fn latest_state_proof( &self, address: Address, keys: Vec, - ) -> impl Future>; + ) -> Pin> + '_>>; + + /// Returns the resolver that is to use with this ethereum node. + fn resolver(&self) -> Pin>> + '_>>; + + /// Returns the EVM version of the node. + fn evm_version(&self) -> EVMVersion; } diff --git a/crates/node/src/constants.rs b/crates/node/src/constants.rs index 5b02dca..70cbf66 100644 --- a/crates/node/src/constants.rs +++ b/crates/node/src/constants.rs @@ -1,5 +1,5 @@ /// This constant defines how much Wei accounts are pre-seeded with in genesis. /// -/// Note: After changing this number, check that the tests for kitchensink work as we encountered -/// some issues with different values of the initial balance on Kitchensink. +/// Note: After changing this number, check that the tests for substrate work as we encountered +/// some issues with different values of the initial balance on substrate. pub const INITIAL_BALANCE: u128 = 10u128.pow(37); diff --git a/crates/node/src/geth.rs b/crates/node/src/geth.rs index 13c32de..cf58755 100644 --- a/crates/node/src/geth.rs +++ b/crates/node/src/geth.rs @@ -5,6 +5,7 @@ use std::{ io::{BufRead, BufReader, Read, Write}, ops::ControlFlow, path::PathBuf, + pin::Pin, process::{Child, Command, Stdio}, sync::{ Arc, @@ -24,7 +25,7 @@ use alloy::{ fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller}, }, rpc::types::{ - EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest, + EIP1186AccountProofResponse, TransactionRequest, trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame}, }, }; @@ -92,6 +93,43 @@ impl GethNode { const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60); const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60); + pub fn new( + context: impl AsRef + + AsRef + + AsRef + + Clone, + ) -> Self { + let working_directory_configuration = + AsRef::::as_ref(&context); + let wallet_configuration = AsRef::::as_ref(&context); + let geth_configuration = AsRef::::as_ref(&context); + + let geth_directory = working_directory_configuration + .as_path() + .join(Self::BASE_DIRECTORY); + let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); + let base_directory = geth_directory.join(id.to_string()); + + let wallet = wallet_configuration.wallet(); + + Self { + connection_string: base_directory.join(Self::IPC_FILE).display().to_string(), + data_directory: base_directory.join(Self::DATA_DIRECTORY), + logs_directory: base_directory.join(Self::LOGS_DIRECTORY), + base_directory, + geth: geth_configuration.path.clone(), + id, + handle: None, + start_timeout: geth_configuration.start_timeout_ms, + wallet: wallet.clone(), + chain_id_filler: Default::default(), + nonce_manager: Default::default(), + // We know that we only need to be storing 2 files so we can specify that when creating + // the vector. It's the stdout and stderr of the geth node. + logs_file_to_flush: Vec::with_capacity(2), + } + } + /// Create the node directory and call `geth init` to configure the genesis. #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> { @@ -289,320 +327,327 @@ impl GethNode { } impl EthereumNode for GethNode { + fn id(&self) -> usize { + self.id as _ + } + + fn connection_string(&self) -> &str { + &self.connection_string + } + #[instrument( level = "info", skip_all, fields(geth_node_id = self.id, connection_string = self.connection_string), err, )] - async fn execute_transaction( + fn execute_transaction( &self, transaction: TransactionRequest, - ) -> anyhow::Result { - let provider = self - .provider() - .await - .context("Failed to create provider for transaction submission")?; + ) -> Pin> + '_>> + { + Box::pin(async move { + let provider = self + .provider() + .await + .context("Failed to create provider for transaction submission")?; - let pending_transaction = provider + let pending_transaction = provider .send_transaction(transaction) .await .inspect_err( |err| tracing::error!(%err, "Encountered an error when submitting the transaction"), ) .context("Failed to submit transaction to geth node")?; - let transaction_hash = *pending_transaction.tx_hash(); + let transaction_hash = *pending_transaction.tx_hash(); - // The following is a fix for the "transaction indexing is in progress" error that we used - // to get. You can find more information on this in the following GH issue in geth - // https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on, - // before we can get the receipt of the transaction it needs to have been indexed by the - // node's indexer. Just because the transaction has been confirmed it doesn't mean that it - // has been indexed. When we call alloy's `get_receipt` it checks if the transaction was - // confirmed. If it has been, then it will call `eth_getTransactionReceipt` method which - // _might_ return the above error if the tx has not yet been indexed yet. So, we need to - // implement a retry mechanism for the receipt to keep retrying to get it until it - // eventually works, but we only do that if the error we get back is the "transaction - // indexing is in progress" error or if the receipt is None. - // - // Getting the transaction indexed and taking a receipt can take a long time especially when - // a lot of transactions are being submitted to the node. Thus, while initially we only - // allowed for 60 seconds of waiting with a 1 second delay in polling, we need to allow for - // a larger wait time. Therefore, in here we allow for 5 minutes of waiting with exponential - // backoff each time we attempt to get the receipt and find that it's not available. - let provider = Arc::new(provider); - poll( - Self::RECEIPT_POLLING_DURATION, - PollingWaitBehavior::Constant(Duration::from_millis(200)), - move || { - let provider = provider.clone(); - async move { - match provider.get_transaction_receipt(transaction_hash).await { - Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)), - Ok(None) => Ok(ControlFlow::Continue(())), - Err(error) => { - let error_string = error.to_string(); - match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) { - true => Ok(ControlFlow::Continue(())), - false => Err(error.into()), + // The following is a fix for the "transaction indexing is in progress" error that we used + // to get. You can find more information on this in the following GH issue in geth + // https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on, + // before we can get the receipt of the transaction it needs to have been indexed by the + // node's indexer. Just because the transaction has been confirmed it doesn't mean that it + // has been indexed. When we call alloy's `get_receipt` it checks if the transaction was + // confirmed. If it has been, then it will call `eth_getTransactionReceipt` method which + // _might_ return the above error if the tx has not yet been indexed yet. So, we need to + // implement a retry mechanism for the receipt to keep retrying to get it until it + // eventually works, but we only do that if the error we get back is the "transaction + // indexing is in progress" error or if the receipt is None. + // + // Getting the transaction indexed and taking a receipt can take a long time especially when + // a lot of transactions are being submitted to the node. Thus, while initially we only + // allowed for 60 seconds of waiting with a 1 second delay in polling, we need to allow for + // a larger wait time. Therefore, in here we allow for 5 minutes of waiting with exponential + // backoff each time we attempt to get the receipt and find that it's not available. + let provider = Arc::new(provider); + poll( + Self::RECEIPT_POLLING_DURATION, + PollingWaitBehavior::Constant(Duration::from_millis(200)), + move || { + let provider = provider.clone(); + async move { + match provider.get_transaction_receipt(transaction_hash).await { + Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)), + Ok(None) => Ok(ControlFlow::Continue(())), + Err(error) => { + let error_string = error.to_string(); + match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) { + true => Ok(ControlFlow::Continue(())), + false => Err(error.into()), + } } } } - } - }, - ) - .instrument(tracing::info_span!( - "Awaiting transaction receipt", - ?transaction_hash - )) - .await + }, + ) + .instrument(tracing::info_span!( + "Awaiting transaction receipt", + ?transaction_hash + )) + .await + }) } #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - async fn trace_transaction( + fn trace_transaction( &self, - transaction: &TransactionReceipt, + tx_hash: TxHash, trace_options: GethDebugTracingOptions, - ) -> anyhow::Result { - let provider = Arc::new( + ) -> Pin> + '_>> + { + Box::pin(async move { + let provider = Arc::new( + self.provider() + .await + .context("Failed to create provider for tracing")?, + ); + poll( + Self::TRACE_POLLING_DURATION, + PollingWaitBehavior::Constant(Duration::from_millis(200)), + move || { + let provider = provider.clone(); + let trace_options = trace_options.clone(); + async move { + match provider + .debug_trace_transaction(tx_hash, trace_options) + .await + { + Ok(trace) => Ok(ControlFlow::Break(trace)), + Err(error) => { + let error_string = error.to_string(); + match error_string.contains(Self::TRANSACTION_TRACING_ERROR) { + true => Ok(ControlFlow::Continue(())), + false => Err(error.into()), + } + } + } + } + }, + ) + .await + }) + } + + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn state_diff( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { + let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { + diff_mode: Some(true), + disable_code: None, + disable_storage: None, + }); + match self + .trace_transaction(tx_hash, trace_options) + .await + .context("Failed to trace transaction for prestate diff")? + .try_into_pre_state_frame() + .context("Failed to convert trace into pre-state frame")? + { + PreStateFrame::Diff(diff) => Ok(diff), + _ => anyhow::bail!("expected a diff mode trace"), + } + }) + } + + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn balance_of( + &self, + address: Address, + ) -> Pin> + '_>> { + Box::pin(async move { self.provider() .await - .context("Failed to create provider for tracing")?, - ); - poll( - Self::TRACE_POLLING_DURATION, - PollingWaitBehavior::Constant(Duration::from_millis(200)), - move || { - let provider = provider.clone(); - let trace_options = trace_options.clone(); - async move { - match provider - .debug_trace_transaction(transaction.transaction_hash, trace_options) - .await - { - Ok(trace) => Ok(ControlFlow::Break(trace)), - Err(error) => { - let error_string = error.to_string(); - match error_string.contains(Self::TRANSACTION_TRACING_ERROR) { - true => Ok(ControlFlow::Continue(())), - false => Err(error.into()), - } - } - } - } - }, - ) - .await + .context("Failed to get the Geth provider")? + .get_balance(address) + .await + .map_err(Into::into) + }) } #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result { - let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { - diff_mode: Some(true), - disable_code: None, - disable_storage: None, - }); - match self - .trace_transaction(transaction, trace_options) - .await - .context("Failed to trace transaction for prestate diff")? - .try_into_pre_state_frame() - .context("Failed to convert trace into pre-state frame")? - { - PreStateFrame::Diff(diff) => Ok(diff), - _ => anyhow::bail!("expected a diff mode trace"), - } - } - - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - async fn balance_of(&self, address: Address) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Geth provider")? - .get_balance(address) - .await - .map_err(Into::into) - } - - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - async fn latest_state_proof( + fn latest_state_proof( &self, address: Address, keys: Vec, - ) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Geth provider")? - .get_proof(address, keys) - .latest() - .await - .map_err(Into::into) + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider() + .await + .context("Failed to get the Geth provider")? + .get_proof(address, keys) + .latest() + .await + .map_err(Into::into) + }) + } + + // #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn resolver( + &self, + ) -> Pin>> + '_>> { + Box::pin(async move { + let id = self.id; + let provider = self.provider().await?; + Ok(Arc::new(GethNodeResolver { id, provider }) as Arc) + }) + } + + fn evm_version(&self) -> EVMVersion { + EVMVersion::Cancun } } -impl ResolverApi for GethNode { +pub struct GethNodeResolver, P: Provider> { + id: u32, + provider: FillProvider, +} + +impl, P: Provider> ResolverApi for GethNodeResolver { #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - async fn chain_id(&self) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Geth provider")? - .get_chain_id() - .await - .map_err(Into::into) + fn chain_id( + &self, + ) -> Pin> + '_>> { + Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) }) } #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Geth provider")? - .get_transaction_receipt(*tx_hash) - .await? - .context("Failed to get the transaction receipt") - .map(|receipt| receipt.effective_gas_price) + fn transaction_gas_price( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_transaction_receipt(tx_hash) + .await? + .context("Failed to get the transaction receipt") + .map(|receipt| receipt.effective_gas_price) + }) } #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Geth provider")? - .get_block_by_number(number) - .await - .context("Failed to get the geth block")? - .context("Failed to get the Geth block, perhaps there are no blocks?") - .map(|block| block.header.gas_limit as _) + fn block_gas_limit( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| block.header.gas_limit as _) + }) } #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result
{ - self.provider() - .await - .context("Failed to get the Geth provider")? - .get_block_by_number(number) - .await - .context("Failed to get the geth block")? - .context("Failed to get the Geth block, perhaps there are no blocks?") - .map(|block| block.header.beneficiary) + fn block_coinbase( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| block.header.beneficiary) + }) } #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Geth provider")? - .get_block_by_number(number) - .await - .context("Failed to get the geth block")? - .context("Failed to get the Geth block, perhaps there are no blocks?") - .map(|block| U256::from_be_bytes(block.header.mix_hash.0)) + fn block_difficulty( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| U256::from_be_bytes(block.header.mix_hash.0)) + }) } #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Geth provider")? - .get_block_by_number(number) - .await - .context("Failed to get the geth block")? - .context("Failed to get the Geth block, perhaps there are no blocks?") - .and_then(|block| { - block - .header - .base_fee_per_gas - .context("Failed to get the base fee per gas") - }) + fn block_base_fee( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .and_then(|block| { + block + .header + .base_fee_per_gas + .context("Failed to get the base fee per gas") + }) + }) } #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Geth provider")? - .get_block_by_number(number) - .await - .context("Failed to get the geth block")? - .context("Failed to get the Geth block, perhaps there are no blocks?") - .map(|block| block.header.hash) + fn block_hash( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| block.header.hash) + }) } #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Geth provider")? - .get_block_by_number(number) - .await - .context("Failed to get the geth block")? - .context("Failed to get the Geth block, perhaps there are no blocks?") - .map(|block| block.header.timestamp) + fn block_timestamp( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| block.header.timestamp) + }) } #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - async fn last_block_number(&self) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Geth provider")? - .get_block_number() - .await - .map_err(Into::into) + fn last_block_number(&self) -> Pin> + '_>> { + Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) }) } } impl Node for GethNode { - fn new( - context: impl AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + Clone, - ) -> Self { - let working_directory_configuration = - AsRef::::as_ref(&context); - let wallet_configuration = AsRef::::as_ref(&context); - let geth_configuration = AsRef::::as_ref(&context); - - let geth_directory = working_directory_configuration - .as_path() - .join(Self::BASE_DIRECTORY); - let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); - let base_directory = geth_directory.join(id.to_string()); - - let wallet = wallet_configuration.wallet(); - - Self { - connection_string: base_directory.join(Self::IPC_FILE).display().to_string(), - data_directory: base_directory.join(Self::DATA_DIRECTORY), - logs_directory: base_directory.join(Self::LOGS_DIRECTORY), - base_directory, - geth: geth_configuration.path.clone(), - id, - handle: None, - start_timeout: geth_configuration.start_timeout_ms, - wallet: wallet.clone(), - chain_id_filler: Default::default(), - nonce_manager: Default::default(), - // We know that we only need to be storing 2 files so we can specify that when creating - // the vector. It's the stdout and stderr of the geth node. - logs_file_to_flush: Vec::with_capacity(2), - } - } - - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn id(&self) -> usize { - self.id as _ - } - - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn connection_string(&self) -> String { - self.connection_string.clone() - } - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn shutdown(&mut self) -> anyhow::Result<()> { // Terminate the processes in a graceful manner to allow for the output to be flushed. @@ -645,17 +690,6 @@ impl Node for GethNode { .stdout; Ok(String::from_utf8_lossy(&output).into()) } - - fn matches_target(targets: Option<&[String]>) -> bool { - match targets { - None => true, - Some(targets) => targets.iter().any(|str| str.as_str() == "evm"), - } - } - - fn evm_version() -> EVMVersion { - EVMVersion::Cancun - } } impl Drop for GethNode { @@ -669,11 +703,11 @@ impl Drop for GethNode { mod tests { use super::*; - fn test_config() -> ExecutionContext { - ExecutionContext::default() + fn test_config() -> TestExecutionContext { + TestExecutionContext::default() } - fn new_node() -> (ExecutionContext, GethNode) { + fn new_node() -> (TestExecutionContext, GethNode) { let context = test_config(); let mut node = GethNode::new(&context); node.init(context.genesis_configuration.genesis().unwrap().clone()) @@ -698,7 +732,7 @@ mod tests { let (_context, node) = new_node(); // Act - let chain_id = node.chain_id().await; + let chain_id = node.resolver().await.unwrap().chain_id().await; // Assert let chain_id = chain_id.expect("Failed to get the chain id"); @@ -711,7 +745,12 @@ mod tests { let (_context, node) = new_node(); // Act - let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest).await; + let gas_limit = node + .resolver() + .await + .unwrap() + .block_gas_limit(BlockNumberOrTag::Latest) + .await; // Assert let gas_limit = gas_limit.expect("Failed to get the gas limit"); @@ -724,7 +763,12 @@ mod tests { let (_context, node) = new_node(); // Act - let coinbase = node.block_coinbase(BlockNumberOrTag::Latest).await; + let coinbase = node + .resolver() + .await + .unwrap() + .block_coinbase(BlockNumberOrTag::Latest) + .await; // Assert let coinbase = coinbase.expect("Failed to get the coinbase"); @@ -737,7 +781,12 @@ mod tests { let (_context, node) = new_node(); // Act - let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest).await; + let block_difficulty = node + .resolver() + .await + .unwrap() + .block_difficulty(BlockNumberOrTag::Latest) + .await; // Assert let block_difficulty = block_difficulty.expect("Failed to get the block difficulty"); @@ -750,7 +799,12 @@ mod tests { let (_context, node) = new_node(); // Act - let block_hash = node.block_hash(BlockNumberOrTag::Latest).await; + let block_hash = node + .resolver() + .await + .unwrap() + .block_hash(BlockNumberOrTag::Latest) + .await; // Assert let _ = block_hash.expect("Failed to get the block hash"); @@ -762,7 +816,12 @@ mod tests { let (_context, node) = new_node(); // Act - let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest).await; + let block_timestamp = node + .resolver() + .await + .unwrap() + .block_timestamp(BlockNumberOrTag::Latest) + .await; // Assert let _ = block_timestamp.expect("Failed to get the block timestamp"); @@ -774,7 +833,7 @@ mod tests { let (_context, node) = new_node(); // Act - let block_number = node.last_block_number().await; + let block_number = node.resolver().await.unwrap().last_block_number().await; // Assert let block_number = block_number.expect("Failed to get the block number"); diff --git a/crates/node/src/lib.rs b/crates/node/src/lib.rs index 3ee7b4f..80babec 100644 --- a/crates/node/src/lib.rs +++ b/crates/node/src/lib.rs @@ -1,34 +1,15 @@ //! This crate implements the testing nodes. use alloy::genesis::Genesis; -use revive_common::EVMVersion; -use revive_dt_config::*; use revive_dt_node_interaction::EthereumNode; pub mod common; pub mod constants; pub mod geth; -pub mod kitchensink; -pub mod pool; +pub mod substrate; /// An abstract interface for testing nodes. pub trait Node: EthereumNode { - /// Create a new uninitialized instance. - fn new( - context: impl AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + Clone, - ) -> Self; - - /// Returns the identifier of the node. - fn id(&self) -> usize; - /// Spawns a node configured according to the genesis json. /// /// Blocking until it's ready to accept transactions. @@ -39,16 +20,6 @@ pub trait Node: EthereumNode { /// Blocking until it's completely stopped. fn shutdown(&mut self) -> anyhow::Result<()>; - /// Returns the nodes connection string. - fn connection_string(&self) -> String; - /// Returns the node version. fn version(&self) -> anyhow::Result; - - /// Given a list of targets from the metadata file, this function determines if the metadata - /// file can be ran on this node or not. - fn matches_target(targets: Option<&[String]>) -> bool; - - /// Returns the EVM version of the node. - fn evm_version() -> EVMVersion; } diff --git a/crates/node/src/pool.rs b/crates/node/src/pool.rs deleted file mode 100644 index 9221d92..0000000 --- a/crates/node/src/pool.rs +++ /dev/null @@ -1,110 +0,0 @@ -//! This crate implements concurrent handling of testing node. - -use std::{ - sync::atomic::{AtomicUsize, Ordering}, - thread, -}; - -use alloy::genesis::Genesis; -use anyhow::Context as _; -use revive_dt_config::{ - ConcurrencyConfiguration, EthRpcConfiguration, GenesisConfiguration, GethConfiguration, - KitchensinkConfiguration, ReviveDevNodeConfiguration, WalletConfiguration, - WorkingDirectoryConfiguration, -}; -use tracing::info; - -use crate::Node; - -/// The node pool starts one or more [Node] which then can be accessed -/// in a round robbin fasion. -pub struct NodePool { - next: AtomicUsize, - nodes: Vec, -} - -impl NodePool -where - T: Node + Send + 'static, -{ - /// Create a new Pool. This will start as many nodes as there are workers in `config`. - pub fn new( - context: impl AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + Send - + Sync - + Clone - + 'static, - ) -> anyhow::Result { - let concurrency_configuration = AsRef::::as_ref(&context); - let genesis_configuration = AsRef::::as_ref(&context); - - let nodes = concurrency_configuration.number_of_nodes; - let genesis = genesis_configuration.genesis()?; - - let mut handles = Vec::with_capacity(nodes); - for _ in 0..nodes { - let context = context.clone(); - let genesis = genesis.clone(); - handles.push(thread::spawn(move || spawn_node::(context, genesis))); - } - - let mut nodes = Vec::with_capacity(nodes); - for handle in handles { - nodes.push( - handle - .join() - .map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error)) - .context("Failed to join node spawn thread")? - .map_err(|error| anyhow::anyhow!("node failed to spawn: {error}")) - .context("Node failed to spawn")?, - ); - } - - Ok(Self { - nodes, - next: Default::default(), - }) - } - - /// Get a handle to the next node. - pub fn round_robbin(&self) -> &T { - let current = self.next.fetch_add(1, Ordering::SeqCst) % self.nodes.len(); - self.nodes.get(current).unwrap() - } -} - -fn spawn_node( - context: impl AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + Clone - + 'static, - genesis: Genesis, -) -> anyhow::Result { - let mut node = T::new(context); - info!( - id = node.id(), - connection_string = node.connection_string(), - "Spawning node" - ); - node.spawn(genesis) - .context("Failed to spawn node process")?; - info!( - id = node.id(), - connection_string = node.connection_string(), - "Spawned node" - ); - Ok(node) -} diff --git a/crates/node/src/kitchensink.rs b/crates/node/src/substrate.rs similarity index 71% rename from crates/node/src/kitchensink.rs rename to crates/node/src/substrate.rs index 751e583..e738a15 100644 --- a/crates/node/src/kitchensink.rs +++ b/crates/node/src/substrate.rs @@ -2,6 +2,7 @@ use std::{ fs::{File, OpenOptions, create_dir_all, remove_dir_all}, io::{BufRead, Write}, path::{Path, PathBuf}, + pin::Pin, process::{Child, Command, Stdio}, sync::{ Arc, @@ -44,17 +45,21 @@ use sp_runtime::AccountId32; use revive_dt_config::*; use revive_dt_node_interaction::EthereumNode; +use tracing::instrument; use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE}; static NODE_COUNT: AtomicU32 = AtomicU32::new(0); +/// A node implementation for Substrate based chains. Currently, this supports either substrate +/// or the revive-dev-node which is done by changing the path and some of the other arguments passed +/// to the command. #[derive(Debug)] -pub struct KitchensinkNode { +pub struct SubstrateNode { id: u32, - substrate_binary: PathBuf, - dev_node_binary: PathBuf, + node_binary: PathBuf, eth_proxy_binary: PathBuf, + export_chainspec_command: String, rpc_url: String, base_directory: PathBuf, logs_directory: PathBuf, @@ -63,16 +68,11 @@ pub struct KitchensinkNode { wallet: Arc, nonce_manager: CachedNonceManager, chain_id_filler: ChainIdFiller, - use_kitchensink_not_dev_node: bool, - /// This vector stores [`File`] objects that we use for logging which we want to flush when the - /// node object is dropped. We do not store them in a structured fashion at the moment (in - /// separate fields) as the logic that we need to apply to them is all the same regardless of - /// what it belongs to, we just want to flush them on [`Drop`] of the node. logs_file_to_flush: Vec, } -impl KitchensinkNode { - const BASE_DIRECTORY: &str = "kitchensink"; +impl SubstrateNode { + const BASE_DIRECTORY: &str = "Substrate"; const LOGS_DIRECTORY: &str = "logs"; const DATA_DIRECTORY: &str = "chains"; @@ -85,52 +85,82 @@ impl KitchensinkNode { const SUBSTRATE_LOG_ENV: &str = "error,evm=debug,sc_rpc_server=info,runtime::revive=debug"; const PROXY_LOG_ENV: &str = "info,eth-rpc=debug"; - const KITCHENSINK_STDOUT_LOG_FILE_NAME: &str = "node_stdout.log"; - const KITCHENSINK_STDERR_LOG_FILE_NAME: &str = "node_stderr.log"; + const SUBSTRATE_STDOUT_LOG_FILE_NAME: &str = "node_stdout.log"; + const SUBSTRATE_STDERR_LOG_FILE_NAME: &str = "node_stderr.log"; const PROXY_STDOUT_LOG_FILE_NAME: &str = "proxy_stdout.log"; const PROXY_STDERR_LOG_FILE_NAME: &str = "proxy_stderr.log"; + pub const KITCHENSINK_EXPORT_CHAINSPEC_COMMAND: &str = "export-chain-spec"; + pub const REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND: &str = "build-spec"; + + pub fn new( + node_path: PathBuf, + export_chainspec_command: &str, + context: impl AsRef + + AsRef + + AsRef, + ) -> Self { + let working_directory_path = + AsRef::::as_ref(&context).as_path(); + let eth_rpc_path = AsRef::::as_ref(&context) + .path + .as_path(); + let wallet = AsRef::::as_ref(&context).wallet(); + + let substrate_directory = working_directory_path.join(Self::BASE_DIRECTORY); + let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); + let base_directory = substrate_directory.join(id.to_string()); + let logs_directory = base_directory.join(Self::LOGS_DIRECTORY); + + Self { + id, + node_binary: node_path, + eth_proxy_binary: eth_rpc_path.to_path_buf(), + export_chainspec_command: export_chainspec_command.to_string(), + rpc_url: String::new(), + base_directory, + logs_directory, + process_substrate: None, + process_proxy: None, + wallet: wallet.clone(), + chain_id_filler: Default::default(), + nonce_manager: Default::default(), + logs_file_to_flush: Vec::with_capacity(4), + } + } + fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> { let _ = clear_directory(&self.base_directory); let _ = clear_directory(&self.logs_directory); create_dir_all(&self.base_directory) - .context("Failed to create base directory for kitchensink node")?; + .context("Failed to create base directory for substrate node")?; create_dir_all(&self.logs_directory) - .context("Failed to create logs directory for kitchensink node")?; + .context("Failed to create logs directory for substrate node")?; let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE); // Note: we do not pipe the logs of this process to a separate file since this is just a // once-off export of the default chain spec and not part of the long-running node process. - let output = if self.use_kitchensink_not_dev_node { - Command::new(&self.substrate_binary) - .arg("export-chain-spec") - .arg("--chain") - .arg("dev") - .output() - .context("Failed to export the chain-spec")? - } else { - Command::new(&self.dev_node_binary) - .arg("build-spec") - .arg("--chain") - .arg("dev") - .output() - .context("Failed to export the chain-spec")? - }; + let output = Command::new(&self.node_binary) + .arg(self.export_chainspec_command.as_str()) + .arg("--chain") + .arg("dev") + .output() + .context("Failed to export the chain-spec")?; if !output.status.success() { anyhow::bail!( - "substrate-node export-chain-spec failed: {}", + "Substrate-node export-chain-spec failed: {}", String::from_utf8_lossy(&output.stderr) ); } let content = String::from_utf8(output.stdout) - .context("Failed to decode substrate export-chain-spec output as UTF-8")?; + .context("Failed to decode Substrate export-chain-spec output as UTF-8")?; let mut chainspec_json: JsonValue = - serde_json::from_str(&content).context("Failed to parse substrate chain spec JSON")?; + serde_json::from_str(&content).context("Failed to parse Substrate chain spec JSON")?; let existing_chainspec_balances = chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"] @@ -172,10 +202,10 @@ impl KitchensinkNode { serde_json::to_writer_pretty( std::fs::File::create(&template_chainspec_path) - .context("Failed to create kitchensink template chainspec file")?, + .context("Failed to create substrate template chainspec file")?, &chainspec_json, ) - .context("Failed to write kitchensink template chainspec JSON")?; + .context("Failed to write substrate template chainspec JSON")?; Ok(self) } @@ -199,19 +229,15 @@ impl KitchensinkNode { }; // Start Substrate node - let kitchensink_stdout_logs_file = open_options + let substrate_stdout_logs_file = open_options .clone() - .open(self.kitchensink_stdout_log_file_path()) - .context("Failed to open kitchensink stdout logs file")?; - let kitchensink_stderr_logs_file = open_options + .open(self.substrate_stdout_log_file_path()) + .context("Failed to open substrate stdout logs file")?; + let substrate_stderr_logs_file = open_options .clone() - .open(self.kitchensink_stderr_log_file_path()) - .context("Failed to open kitchensink stderr logs file")?; - let node_binary_path = if self.use_kitchensink_not_dev_node { - self.substrate_binary.as_path() - } else { - self.dev_node_binary.as_path() - }; + .open(self.substrate_stderr_log_file_path()) + .context("Failed to open substrate stderr logs file")?; + let node_binary_path = self.node_binary.as_path(); self.process_substrate = Command::new(node_binary_path) .arg("--dev") .arg("--chain") @@ -221,7 +247,7 @@ impl KitchensinkNode { .arg("--rpc-port") .arg(substrate_rpc_port.to_string()) .arg("--name") - .arg(format!("revive-kitchensink-{}", self.id)) + .arg(format!("revive-substrate-{}", self.id)) .arg("--force-authoring") .arg("--rpc-methods") .arg("Unsafe") @@ -231,27 +257,27 @@ impl KitchensinkNode { .arg(u32::MAX.to_string()) .env("RUST_LOG", Self::SUBSTRATE_LOG_ENV) .stdout( - kitchensink_stdout_logs_file + substrate_stdout_logs_file .try_clone() - .context("Failed to clone kitchensink stdout log file handle")?, + .context("Failed to clone substrate stdout log file handle")?, ) .stderr( - kitchensink_stderr_logs_file + substrate_stderr_logs_file .try_clone() - .context("Failed to clone kitchensink stderr log file handle")?, + .context("Failed to clone substrate stderr log file handle")?, ) .spawn() - .context("Failed to spawn substrate node process")? + .context("Failed to spawn Substrate node process")? .into(); // Give the node a moment to boot if let Err(error) = Self::wait_ready( - self.kitchensink_stderr_log_file_path().as_path(), + self.substrate_stderr_log_file_path().as_path(), Self::SUBSTRATE_READY_MARKER, Duration::from_secs(60), ) { self.shutdown() - .context("Failed to gracefully shutdown after substrate start error")?; + .context("Failed to gracefully shutdown after Substrate start error")?; return Err(error); }; @@ -296,8 +322,8 @@ impl KitchensinkNode { }; self.logs_file_to_flush.extend([ - kitchensink_stdout_logs_file, - kitchensink_stderr_logs_file, + substrate_stdout_logs_file, + substrate_stderr_logs_file, eth_proxy_stdout_logs_file, eth_proxy_stderr_logs_file, ]); @@ -365,14 +391,14 @@ impl KitchensinkNode { Ok(String::from_utf8_lossy(&output).trim().to_string()) } - fn kitchensink_stdout_log_file_path(&self) -> PathBuf { + fn substrate_stdout_log_file_path(&self) -> PathBuf { self.logs_directory - .join(Self::KITCHENSINK_STDOUT_LOG_FILE_NAME) + .join(Self::SUBSTRATE_STDOUT_LOG_FILE_NAME) } - fn kitchensink_stderr_log_file_path(&self) -> PathBuf { + fn substrate_stderr_log_file_path(&self) -> PathBuf { self.logs_directory - .join(Self::KITCHENSINK_STDERR_LOG_FILE_NAME) + .join(Self::SUBSTRATE_STDERR_LOG_FILE_NAME) } fn proxy_stdout_log_file_path(&self) -> PathBuf { @@ -386,15 +412,11 @@ impl KitchensinkNode { async fn provider( &self, ) -> anyhow::Result< - FillProvider< - impl TxFiller, - impl Provider, - KitchenSinkNetwork, - >, + FillProvider, impl Provider, ReviveNetwork>, > { ProviderBuilder::new() .disable_recommended_fillers() - .network::() + .network::() .filler(FallbackGasFiller::new( 25_000_000, 1_000_000_000, @@ -409,235 +431,247 @@ impl KitchensinkNode { } } -impl EthereumNode for KitchensinkNode { - async fn execute_transaction( - &self, - transaction: alloy::rpc::types::TransactionRequest, - ) -> anyhow::Result { - let receipt = self - .provider() - .await - .context("Failed to create provider for transaction submission")? - .send_transaction(transaction) - .await - .context("Failed to submit transaction to kitchensink proxy")? - .get_receipt() - .await - .context("Failed to fetch transaction receipt from kitchensink proxy")?; - Ok(receipt) - } - - async fn trace_transaction( - &self, - transaction: &TransactionReceipt, - trace_options: GethDebugTracingOptions, - ) -> anyhow::Result { - let tx_hash = transaction.transaction_hash; - self.provider() - .await - .context("Failed to create provider for debug tracing")? - .debug_trace_transaction(tx_hash, trace_options) - .await - .context("Failed to obtain debug trace from kitchensink proxy") - } - - async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result { - let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { - diff_mode: Some(true), - disable_code: None, - disable_storage: None, - }); - match self - .trace_transaction(transaction, trace_options) - .await? - .try_into_pre_state_frame()? - { - PreStateFrame::Diff(diff) => Ok(diff), - _ => anyhow::bail!("expected a diff mode trace"), - } - } - - async fn balance_of(&self, address: Address) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Kitchensink provider")? - .get_balance(address) - .await - .map_err(Into::into) - } - - async fn latest_state_proof( - &self, - address: Address, - keys: Vec, - ) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Kitchensink provider")? - .get_proof(address, keys) - .latest() - .await - .map_err(Into::into) - } -} - -impl ResolverApi for KitchensinkNode { - async fn chain_id(&self) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Kitchensink provider")? - .get_chain_id() - .await - .map_err(Into::into) - } - - async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Kitchensink provider")? - .get_transaction_receipt(*tx_hash) - .await? - .context("Failed to get the transaction receipt") - .map(|receipt| receipt.effective_gas_price) - } - - async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Kitchensink provider")? - .get_block_by_number(number) - .await - .context("Failed to get the kitchensink block")? - .context("Failed to get the Kitchensink block, perhaps the chain has no blocks?") - .map(|block| block.header.gas_limit as _) - } - - async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result
{ - self.provider() - .await - .context("Failed to get the Kitchensink provider")? - .get_block_by_number(number) - .await - .context("Failed to get the kitchensink block")? - .context("Failed to get the Kitchensink block, perhaps the chain has no blocks?") - .map(|block| block.header.beneficiary) - } - - async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Kitchensink provider")? - .get_block_by_number(number) - .await - .context("Failed to get the kitchensink block")? - .context("Failed to get the Kitchensink block, perhaps the chain has no blocks?") - .map(|block| U256::from_be_bytes(block.header.mix_hash.0)) - } - - async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Kitchensink provider")? - .get_block_by_number(number) - .await - .context("Failed to get the kitchensink block")? - .context("Failed to get the Kitchensink block, perhaps the chain has no blocks?") - .and_then(|block| { - block - .header - .base_fee_per_gas - .context("Failed to get the base fee per gas") - }) - } - - async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Kitchensink provider")? - .get_block_by_number(number) - .await - .context("Failed to get the kitchensink block")? - .context("Failed to get the Kitchensink block, perhaps the chain has no blocks?") - .map(|block| block.header.hash) - } - - async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Kitchensink provider")? - .get_block_by_number(number) - .await - .context("Failed to get the kitchensink block")? - .context("Failed to get the Kitchensink block, perhaps the chain has no blocks?") - .map(|block| block.header.timestamp) - } - - async fn last_block_number(&self) -> anyhow::Result { - self.provider() - .await - .context("Failed to get the Kitchensink provider")? - .get_block_number() - .await - .map_err(Into::into) - } -} - -impl Node for KitchensinkNode { - fn new( - context: impl AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + AsRef - + Clone, - ) -> Self { - let kitchensink_configuration = AsRef::::as_ref(&context); - let dev_node_configuration = AsRef::::as_ref(&context); - let eth_rpc_configuration = AsRef::::as_ref(&context); - let working_directory_configuration = - AsRef::::as_ref(&context); - let wallet_configuration = AsRef::::as_ref(&context); - - let kitchensink_directory = working_directory_configuration - .as_path() - .join(Self::BASE_DIRECTORY); - let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); - let base_directory = kitchensink_directory.join(id.to_string()); - let logs_directory = base_directory.join(Self::LOGS_DIRECTORY); - - let wallet = wallet_configuration.wallet(); - - Self { - id, - substrate_binary: kitchensink_configuration.path.clone(), - dev_node_binary: dev_node_configuration.path.clone(), - eth_proxy_binary: eth_rpc_configuration.path.clone(), - rpc_url: String::new(), - base_directory, - logs_directory, - process_substrate: None, - process_proxy: None, - wallet: wallet.clone(), - chain_id_filler: Default::default(), - nonce_manager: Default::default(), - use_kitchensink_not_dev_node: kitchensink_configuration.use_kitchensink, - // We know that we only need to be storing 4 files so we can specify that when creating - // the vector. It's the stdout and stderr of the substrate-node and the eth-rpc. - logs_file_to_flush: Vec::with_capacity(4), - } - } - +impl EthereumNode for SubstrateNode { fn id(&self) -> usize { self.id as _ } - fn connection_string(&self) -> String { - self.rpc_url.clone() + fn connection_string(&self) -> &str { + &self.rpc_url } + fn execute_transaction( + &self, + transaction: alloy::rpc::types::TransactionRequest, + ) -> Pin> + '_>> { + Box::pin(async move { + let receipt = self + .provider() + .await + .context("Failed to create provider for transaction submission")? + .send_transaction(transaction) + .await + .context("Failed to submit transaction to substrate proxy")? + .get_receipt() + .await + .context("Failed to fetch transaction receipt from substrate proxy")?; + Ok(receipt) + }) + } + + fn trace_transaction( + &self, + tx_hash: TxHash, + trace_options: GethDebugTracingOptions, + ) -> Pin> + '_>> + { + Box::pin(async move { + self.provider() + .await + .context("Failed to create provider for debug tracing")? + .debug_trace_transaction(tx_hash, trace_options) + .await + .context("Failed to obtain debug trace from substrate proxy") + }) + } + + fn state_diff( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { + let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { + diff_mode: Some(true), + disable_code: None, + disable_storage: None, + }); + match self + .trace_transaction(tx_hash, trace_options) + .await? + .try_into_pre_state_frame()? + { + PreStateFrame::Diff(diff) => Ok(diff), + _ => anyhow::bail!("expected a diff mode trace"), + } + }) + } + + fn balance_of( + &self, + address: Address, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider() + .await + .context("Failed to get the substrate provider")? + .get_balance(address) + .await + .map_err(Into::into) + }) + } + + fn latest_state_proof( + &self, + address: Address, + keys: Vec, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider() + .await + .context("Failed to get the substrate provider")? + .get_proof(address, keys) + .latest() + .await + .map_err(Into::into) + }) + } + + fn resolver( + &self, + ) -> Pin>> + '_>> { + Box::pin(async move { + let id = self.id; + let provider = self.provider().await?; + Ok(Arc::new(SubstrateNodeResolver { id, provider }) as Arc) + }) + } + + fn evm_version(&self) -> EVMVersion { + EVMVersion::Cancun + } +} + +pub struct SubstrateNodeResolver, P: Provider> { + id: u32, + provider: FillProvider, +} + +impl, P: Provider> ResolverApi + for SubstrateNodeResolver +{ + #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] + fn chain_id( + &self, + ) -> Pin> + '_>> { + Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) }) + } + + #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] + fn transaction_gas_price( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_transaction_receipt(tx_hash) + .await? + .context("Failed to get the transaction receipt") + .map(|receipt| receipt.effective_gas_price) + }) + } + + #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] + fn block_gas_limit( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the substrate block")? + .context("Failed to get the substrate block, perhaps the chain has no blocks?") + .map(|block| block.header.gas_limit as _) + }) + } + + #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] + fn block_coinbase( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the substrate block")? + .context("Failed to get the substrate block, perhaps the chain has no blocks?") + .map(|block| block.header.beneficiary) + }) + } + + #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] + fn block_difficulty( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the substrate block")? + .context("Failed to get the substrate block, perhaps the chain has no blocks?") + .map(|block| U256::from_be_bytes(block.header.mix_hash.0)) + }) + } + + #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] + fn block_base_fee( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the substrate block")? + .context("Failed to get the substrate block, perhaps the chain has no blocks?") + .and_then(|block| { + block + .header + .base_fee_per_gas + .context("Failed to get the base fee per gas") + }) + }) + } + + #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] + fn block_hash( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the substrate block")? + .context("Failed to get the substrate block, perhaps the chain has no blocks?") + .map(|block| block.header.hash) + }) + } + + #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] + fn block_timestamp( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the substrate block")? + .context("Failed to get the substrate block, perhaps the chain has no blocks?") + .map(|block| block.header.timestamp) + }) + } + + #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] + fn last_block_number(&self) -> Pin> + '_>> { + Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) }) + } +} + +impl Node for SubstrateNode { fn shutdown(&mut self) -> anyhow::Result<()> { // Terminate the processes in a graceful manner to allow for the output to be flushed. if let Some(mut child) = self.process_proxy.take() { @@ -647,7 +681,7 @@ impl Node for KitchensinkNode { } if let Some(mut child) = self.process_substrate.take() { child.kill().map_err(|error| { - anyhow::anyhow!("Failed to kill the substrate process: {error:?}") + anyhow::anyhow!("Failed to kill the Substrate process: {error:?}") })?; } @@ -669,41 +703,30 @@ impl Node for KitchensinkNode { } fn version(&self) -> anyhow::Result { - let output = Command::new(&self.substrate_binary) + let output = Command::new(&self.node_binary) .arg("--version") .stdin(Stdio::null()) .stdout(Stdio::piped()) .stderr(Stdio::null()) .spawn() - .context("Failed to spawn kitchensink --version")? + .context("Failed to spawn substrate --version")? .wait_with_output() - .context("Failed to wait for kitchensink --version")? + .context("Failed to wait for substrate --version")? .stdout; Ok(String::from_utf8_lossy(&output).into()) } - - fn matches_target(targets: Option<&[String]>) -> bool { - match targets { - None => true, - Some(targets) => targets.iter().any(|str| str.as_str() == "pvm"), - } - } - - fn evm_version() -> EVMVersion { - EVMVersion::Cancun - } } -impl Drop for KitchensinkNode { +impl Drop for SubstrateNode { fn drop(&mut self) { self.shutdown().expect("Failed to shutdown") } } #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -struct KitchenSinkNetwork; +pub struct ReviveNetwork; -impl Network for KitchenSinkNetwork { +impl Network for ReviveNetwork { type TxType = ::TxType; type TxEnvelope = ::TxEnvelope; @@ -712,7 +735,7 @@ impl Network for KitchenSinkNetwork { type ReceiptEnvelope = ::ReceiptEnvelope; - type Header = KitchenSinkHeader; + type Header = ReviveHeader; type TransactionRequest = ::TransactionRequest; @@ -720,12 +743,12 @@ impl Network for KitchenSinkNetwork { type ReceiptResponse = ::ReceiptResponse; - type HeaderResponse = Header; + type HeaderResponse = Header; - type BlockResponse = Block, Header>; + type BlockResponse = Block, Header>; } -impl TransactionBuilder for ::TransactionRequest { +impl TransactionBuilder for ::TransactionRequest { fn chain_id(&self) -> Option { <::TransactionRequest as TransactionBuilder>::chain_id(self) } @@ -857,7 +880,7 @@ impl TransactionBuilder for ::Transacti fn complete_type( &self, - ty: ::TxType, + ty: ::TxType, ) -> Result<(), Vec<&'static str>> { <::TransactionRequest as TransactionBuilder>::complete_type( self, ty, @@ -874,13 +897,13 @@ impl TransactionBuilder for ::Transacti <::TransactionRequest as TransactionBuilder>::can_build(self) } - fn output_tx_type(&self) -> ::TxType { + fn output_tx_type(&self) -> ::TxType { <::TransactionRequest as TransactionBuilder>::output_tx_type( self, ) } - fn output_tx_type_checked(&self) -> Option<::TxType> { + fn output_tx_type_checked(&self) -> Option<::TxType> { <::TransactionRequest as TransactionBuilder>::output_tx_type_checked( self, ) @@ -894,15 +917,14 @@ impl TransactionBuilder for ::Transacti fn build_unsigned( self, - ) -> alloy::network::BuildResult<::UnsignedTx, KitchenSinkNetwork> - { + ) -> alloy::network::BuildResult<::UnsignedTx, ReviveNetwork> { let result = <::TransactionRequest as TransactionBuilder>::build_unsigned( self, ); match result { Ok(unsigned_tx) => Ok(unsigned_tx), Err(UnbuiltTransactionError { request, error }) => { - Err(UnbuiltTransactionError:: { + Err(UnbuiltTransactionError:: { request, error: match error { TransactionBuilderError::InvalidTransactionRequest(tx_type, items) => { @@ -923,20 +945,18 @@ impl TransactionBuilder for ::Transacti } } - async fn build>( + async fn build>( self, wallet: &W, - ) -> Result< - ::TxEnvelope, - TransactionBuilderError, - > { + ) -> Result<::TxEnvelope, TransactionBuilderError> + { Ok(wallet.sign_request(self).await?) } } #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct KitchenSinkHeader { +pub struct ReviveHeader { /// The Keccak 256-bit hash of the parent /// block’s header, in its entirety; formally Hp. pub parent_hash: B256, @@ -1039,7 +1059,7 @@ pub struct KitchenSinkHeader { pub requests_hash: Option, } -impl BlockHeader for KitchenSinkHeader { +impl BlockHeader for ReviveHeader { fn parent_hash(&self) -> B256 { self.parent_hash } @@ -1137,13 +1157,13 @@ mod tests { use super::*; use crate::Node; - fn test_config() -> ExecutionContext { - let mut context = ExecutionContext::default(); + fn test_config() -> TestExecutionContext { + let mut context = TestExecutionContext::default(); context.kitchensink_configuration.use_kitchensink = true; context } - fn new_node() -> (ExecutionContext, KitchensinkNode) { + fn new_node() -> (TestExecutionContext, SubstrateNode) { // Note: When we run the tests in the CI we found that if they're all // run in parallel then the CI is unable to start all of the nodes in // time and their start up times-out. Therefore, we want all of the @@ -1163,7 +1183,11 @@ mod tests { let _guard = NODE_START_MUTEX.lock().unwrap(); let context = test_config(); - let mut node = KitchensinkNode::new(&context); + let mut node = SubstrateNode::new( + context.kitchensink_configuration.path.clone(), + SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND, + &context, + ); node.init(context.genesis_configuration.genesis().unwrap().clone()) .expect("Failed to initialize the node") .spawn_process() @@ -1172,8 +1196,8 @@ mod tests { } /// A shared node that multiple tests can use. It starts up once. - fn shared_node() -> &'static KitchensinkNode { - static NODE: LazyLock<(ExecutionContext, KitchensinkNode)> = LazyLock::new(|| { + fn shared_node() -> &'static SubstrateNode { + static NODE: LazyLock<(TestExecutionContext, SubstrateNode)> = LazyLock::new(|| { let (context, node) = new_node(); (context, node) }); @@ -1222,7 +1246,12 @@ mod tests { } "#; - let mut dummy_node = KitchensinkNode::new(&test_config()); + let context = test_config(); + let mut dummy_node = SubstrateNode::new( + context.kitchensink_configuration.path.clone(), + SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND, + &context, + ); // Call `init()` dummy_node @@ -1232,16 +1261,16 @@ mod tests { // Check that the patched chainspec file was generated let final_chainspec_path = dummy_node .base_directory - .join(KitchensinkNode::CHAIN_SPEC_JSON_FILE); + .join(SubstrateNode::CHAIN_SPEC_JSON_FILE); assert!(final_chainspec_path.exists(), "Chainspec file should exist"); let contents = fs::read_to_string(&final_chainspec_path).expect("Failed to read chainspec"); // Validate that the Substrate addresses derived from the Ethereum addresses are in the file - let first_eth_addr = KitchensinkNode::eth_to_substrate_address( + let first_eth_addr = SubstrateNode::eth_to_substrate_address( &"90F8bf6A479f320ead074411a4B0e7944Ea8c9C1".parse().unwrap(), ); - let second_eth_addr = KitchensinkNode::eth_to_substrate_address( + let second_eth_addr = SubstrateNode::eth_to_substrate_address( &"Ab8483F64d9C6d1EcF9b849Ae677dD3315835cb2".parse().unwrap(), ); @@ -1268,7 +1297,12 @@ mod tests { } "#; - let node = KitchensinkNode::new(&test_config()); + let context = test_config(); + let node = SubstrateNode::new( + context.kitchensink_configuration.path.clone(), + SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND, + &context, + ); let result = node .extract_balance_from_genesis_file(&serde_json::from_str(genesis_json).unwrap()) @@ -1301,7 +1335,7 @@ mod tests { ]; for eth_addr in eth_addresses { - let ss58 = KitchensinkNode::eth_to_substrate_address(ð_addr.parse().unwrap()); + let ss58 = SubstrateNode::eth_to_substrate_address(ð_addr.parse().unwrap()); println!("Ethereum: {eth_addr} -> Substrate SS58: {ss58}"); } @@ -1329,7 +1363,7 @@ mod tests { ]; for (eth_addr, expected_ss58) in cases { - let result = KitchensinkNode::eth_to_substrate_address(ð_addr.parse().unwrap()); + let result = SubstrateNode::eth_to_substrate_address(ð_addr.parse().unwrap()); assert_eq!( result, expected_ss58, "Mismatch for Ethereum address {eth_addr}" @@ -1345,7 +1379,7 @@ mod tests { assert!( version.starts_with("substrate-node"), - "Expected substrate-node version string, got: {version}" + "Expected Substrate-node version string, got: {version}" ); } @@ -1367,7 +1401,7 @@ mod tests { let node = shared_node(); // Act - let chain_id = node.chain_id().await; + let chain_id = node.resolver().await.unwrap().chain_id().await; // Assert let chain_id = chain_id.expect("Failed to get the chain id"); @@ -1380,7 +1414,12 @@ mod tests { let node = shared_node(); // Act - let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest).await; + let gas_limit = node + .resolver() + .await + .unwrap() + .block_gas_limit(BlockNumberOrTag::Latest) + .await; // Assert let _ = gas_limit.expect("Failed to get the gas limit"); @@ -1392,7 +1431,12 @@ mod tests { let node = shared_node(); // Act - let coinbase = node.block_coinbase(BlockNumberOrTag::Latest).await; + let coinbase = node + .resolver() + .await + .unwrap() + .block_coinbase(BlockNumberOrTag::Latest) + .await; // Assert let _ = coinbase.expect("Failed to get the coinbase"); @@ -1404,7 +1448,12 @@ mod tests { let node = shared_node(); // Act - let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest).await; + let block_difficulty = node + .resolver() + .await + .unwrap() + .block_difficulty(BlockNumberOrTag::Latest) + .await; // Assert let _ = block_difficulty.expect("Failed to get the block difficulty"); @@ -1416,7 +1465,12 @@ mod tests { let node = shared_node(); // Act - let block_hash = node.block_hash(BlockNumberOrTag::Latest).await; + let block_hash = node + .resolver() + .await + .unwrap() + .block_hash(BlockNumberOrTag::Latest) + .await; // Assert let _ = block_hash.expect("Failed to get the block hash"); @@ -1428,7 +1482,12 @@ mod tests { let node = shared_node(); // Act - let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest).await; + let block_timestamp = node + .resolver() + .await + .unwrap() + .block_timestamp(BlockNumberOrTag::Latest) + .await; // Assert let _ = block_timestamp.expect("Failed to get the block timestamp"); @@ -1440,7 +1499,7 @@ mod tests { let node = shared_node(); // Act - let block_number = node.last_block_number().await; + let block_number = node.resolver().await.unwrap().last_block_number().await; // Assert let _ = block_number.expect("Failed to get the block number"); diff --git a/crates/report/src/aggregator.rs b/crates/report/src/aggregator.rs index 222de4d..b3a693d 100644 --- a/crates/report/src/aggregator.rs +++ b/crates/report/src/aggregator.rs @@ -11,8 +11,9 @@ use std::{ use alloy_primitives::Address; use anyhow::{Context as _, Result}; use indexmap::IndexMap; +use revive_dt_common::types::PlatformIdentifier; use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode}; -use revive_dt_config::{Context, TestingPlatform}; +use revive_dt_config::Context; use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance}; use semver::Version; use serde::Serialize; @@ -84,11 +85,8 @@ impl ReportAggregator { RunnerEvent::TestIgnored(event) => { self.handle_test_ignored_event(*event); } - RunnerEvent::LeaderNodeAssigned(event) => { - self.handle_leader_node_assigned_event(*event); - } - RunnerEvent::FollowerNodeAssigned(event) => { - self.handle_follower_node_assigned_event(*event); + RunnerEvent::NodeAssigned(event) => { + self.handle_node_assigned_event(*event); } RunnerEvent::PreLinkContractsCompilationSucceeded(event) => { self.handle_pre_link_contracts_compilation_succeeded_event(*event) @@ -257,28 +255,15 @@ impl ReportAggregator { let _ = self.listener_tx.send(event); } - fn handle_leader_node_assigned_event(&mut self, event: LeaderNodeAssignedEvent) { + fn handle_node_assigned_event(&mut self, event: NodeAssignedEvent) { let execution_information = self.execution_information(&ExecutionSpecifier { test_specifier: event.test_specifier, node_id: event.id, - node_designation: NodeDesignation::Leader, + platform_identifier: event.platform_identifier, }); execution_information.node = Some(TestCaseNodeInformation { id: event.id, - platform: event.platform, - connection_string: event.connection_string, - }); - } - - fn handle_follower_node_assigned_event(&mut self, event: FollowerNodeAssignedEvent) { - let execution_information = self.execution_information(&ExecutionSpecifier { - test_specifier: event.test_specifier, - node_id: event.id, - node_designation: NodeDesignation::Follower, - }); - execution_information.node = Some(TestCaseNodeInformation { - id: event.id, - platform: event.platform, + platform_identifier: event.platform_identifier, connection_string: event.connection_string, }); } @@ -413,14 +398,11 @@ impl ReportAggregator { specifier: &ExecutionSpecifier, ) -> &mut ExecutionInformation { let test_case_report = self.test_case_report(&specifier.test_specifier); - match specifier.node_designation { - NodeDesignation::Leader => test_case_report - .leader_execution_information - .get_or_insert_default(), - NodeDesignation::Follower => test_case_report - .follower_execution_information - .get_or_insert_default(), - } + test_case_report + .platform_execution + .entry(specifier.platform_identifier) + .or_default() + .get_or_insert_default() } } @@ -455,12 +437,8 @@ pub struct TestCaseReport { /// Information on the status of the test case and whether it succeeded, failed, or was ignored. #[serde(skip_serializing_if = "Option::is_none")] pub status: Option, - /// Information related to the execution on the leader. - #[serde(skip_serializing_if = "Option::is_none")] - pub leader_execution_information: Option, - /// Information related to the execution on the follower. - #[serde(skip_serializing_if = "Option::is_none")] - pub follower_execution_information: Option, + /// Information related to the execution on one of the platforms. + pub platform_execution: BTreeMap>, } /// Information related to the status of the test. Could be that the test succeeded, failed, or that @@ -488,18 +466,18 @@ pub enum TestCaseStatus { }, } -/// Information related to the leader or follower node that's being used to execute the step. +/// Information related to the platform node that's being used to execute the step. #[derive(Clone, Debug, Serialize)] pub struct TestCaseNodeInformation { /// The ID of the node that this case is being executed on. pub id: usize, /// The platform of the node. - pub platform: TestingPlatform, + pub platform_identifier: PlatformIdentifier, /// The connection string of the node. pub connection_string: String, } -/// Execution information tied to the leader or the follower. +/// Execution information tied to the platform. #[derive(Clone, Debug, Default, Serialize)] pub struct ExecutionInformation { /// Information related to the node assigned to this test case. diff --git a/crates/report/src/common.rs b/crates/report/src/common.rs index 5b6e3f1..af08632 100644 --- a/crates/report/src/common.rs +++ b/crates/report/src/common.rs @@ -2,7 +2,7 @@ use std::{path::PathBuf, sync::Arc}; -use revive_dt_common::define_wrapper_type; +use revive_dt_common::{define_wrapper_type, types::PlatformIdentifier}; use revive_dt_compiler::Mode; use revive_dt_format::{case::CaseIdx, input::StepIdx}; use serde::{Deserialize, Serialize}; @@ -22,18 +22,12 @@ pub struct TestSpecifier { } /// An absolute path for a test that also includes information about the node that it's assigned to -/// and whether it's the leader or follower. +/// and what platform it belongs to. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct ExecutionSpecifier { pub test_specifier: Arc, pub node_id: usize, - pub node_designation: NodeDesignation, -} - -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub enum NodeDesignation { - Leader, - Follower, + pub platform_identifier: PlatformIdentifier, } #[derive(Clone, Debug, PartialEq, Eq, Hash)] diff --git a/crates/report/src/runner_event.rs b/crates/report/src/runner_event.rs index bdd6c0e..361555c 100644 --- a/crates/report/src/runner_event.rs +++ b/crates/report/src/runner_event.rs @@ -6,8 +6,8 @@ use std::{collections::BTreeMap, path::PathBuf, sync::Arc}; use alloy_primitives::Address; use anyhow::Context as _; use indexmap::IndexMap; +use revive_dt_common::types::PlatformIdentifier; use revive_dt_compiler::{CompilerInput, CompilerOutput}; -use revive_dt_config::TestingPlatform; use revive_dt_format::metadata::Metadata; use revive_dt_format::{corpus::Corpus, metadata::ContractInstance}; use semver::Version; @@ -412,14 +412,14 @@ macro_rules! define_event { pub fn execution_specific_reporter( &self, node_id: impl Into, - node_designation: impl Into<$crate::common::NodeDesignation> + platform_identifier: impl Into ) -> [< $ident ExecutionSpecificReporter >] { [< $ident ExecutionSpecificReporter >] { reporter: self.reporter.clone(), execution_specifier: Arc::new($crate::common::ExecutionSpecifier { test_specifier: self.test_specifier.clone(), node_id: node_id.into(), - node_designation: node_designation.into(), + platform_identifier: platform_identifier.into(), }) } } @@ -434,7 +434,7 @@ macro_rules! define_event { } /// A reporter that's tied to a specific execution of the test case such as execution on - /// a specific node like the leader or follower. + /// a specific node from a specific platform. #[derive(Clone, Debug)] pub struct [< $ident ExecutionSpecificReporter >] { $vis reporter: [< $ident Reporter >], @@ -520,25 +520,14 @@ define_event! { /// A reason for the failure of the test. reason: String, }, - /// An event emitted when the test case is assigned a leader node. - LeaderNodeAssigned { + /// An event emitted when the test case is assigned a platform node. + NodeAssigned { /// A specifier for the test that the assignment is for. test_specifier: Arc, /// The ID of the node that this case is being executed on. id: usize, - /// The platform of the node. - platform: TestingPlatform, - /// The connection string of the node. - connection_string: String, - }, - /// An event emitted when the test case is assigned a follower node. - FollowerNodeAssigned { - /// A specifier for the test that the assignment is for. - test_specifier: Arc, - /// The ID of the node that this case is being executed on. - id: usize, - /// The platform of the node. - platform: TestingPlatform, + /// The identifier of the platform used. + platform_identifier: PlatformIdentifier, /// The connection string of the node. connection_string: String, }, diff --git a/run_tests.sh b/run_tests.sh index 88f718a..32791a8 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -89,10 +89,13 @@ echo "This may take a while..." echo "" # Run the tool -RUST_LOG="error" cargo run --release -- execute-tests \ +RUST_LOG="info" cargo run --release -- execute-tests \ + --platform geth-evm-solc \ + --platform revive-dev-node-polkavm-resolc \ --corpus "$CORPUS_FILE" \ --working-directory "$WORKDIR" \ --concurrency.number-of-nodes 5 \ + --concurrency.ignore-concurrency-limit \ --kitchensink.path "$SUBSTRATE_NODE_BIN" \ --revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \ --eth-rpc.path "$ETH_RPC_BIN" \