Compare commits

...

23 Commits

Author SHA1 Message Date
pgherveou 2809c2a72c fix 2025-10-08 11:02:08 +00:00
pgherveou dffb80ac0a fixes 2025-10-08 11:18:31 +02:00
pgherveou 43a1114337 custom rpc port 2025-10-08 11:10:46 +02:00
pgherveou 3a07ea042b fix 2025-10-08 10:45:49 +02:00
pgherveou 9e2aa972db fix 2025-10-08 10:33:59 +02:00
pgherveou 86f2173e8b nit 2025-10-08 10:14:22 +02:00
pgherveou 6e658aec49 Merge branch 'pg/fmt' into pg/ml-runner 2025-10-08 10:04:38 +02:00
pgherveou 1aba74ec3e fix 2025-10-08 10:03:00 +02:00
pgherveou 180bd64bc5 Merge branch 'pg/fmt' into pg/ml-runner 2025-10-08 10:01:36 +02:00
pgherveou 967cbac349 fix 2025-10-08 10:00:32 +02:00
pgherveou a8d84c8360 fix 2025-10-08 09:59:53 +02:00
pgherveou c83a755416 Merge branch 'main' into pg/fmt 2025-10-08 09:59:42 +02:00
pgherveou 0711216539 add fmt check 2025-10-08 09:57:28 +02:00
pgherveou b40c17c0af fixes 2025-10-08 09:52:13 +02:00
pgherveou 8ae994f9de fixes 2025-10-08 09:43:36 +02:00
pgherveou 3f3cbfa934 Merge branch 'pg/fmt' into pg/ml-runner 2025-10-08 09:28:45 +02:00
pgherveou c676114fe1 apply fmt 2025-10-08 09:27:11 +02:00
pgherveou 92885351ed use polkadot-sdk rustfmt 2025-10-08 09:26:24 +02:00
pgherveou e16f8ebf59 Merge branch 'pg/fmt' into pg/ml-runner 2025-10-08 09:19:21 +02:00
pgherveou d482808eb2 add rustfmt.toml 2025-10-08 07:18:17 +00:00
pgherveou 1f84ce6f61 fix lint 2025-10-08 06:28:57 +00:00
pgherveou 765569a8b6 fix 2025-10-08 08:22:26 +02:00
pgherveou 6e64f678ee ml-runner init 2025-10-07 16:10:43 +00:00
36 changed files with 1121 additions and 197 deletions
+19
View File
@@ -18,9 +18,28 @@ env:
POLKADOT_VERSION: polkadot-stable2506-2 POLKADOT_VERSION: polkadot-stable2506-2
jobs: jobs:
fmt:
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@v4
- name: Setup Rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Install nightly toolchain
run: rustup toolchain install nightly
- name: Install rustfmt for nightly
run: rustup component add --toolchain nightly rustfmt
- name: Cargo fmt
run: cargo +nightly fmt --all -- --check
cache-polkadot: cache-polkadot:
name: Build and cache Polkadot binaries on ${{ matrix.os }} name: Build and cache Polkadot binaries on ${{ matrix.os }}
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
needs: [fmt]
strategy: strategy:
matrix: matrix:
os: [ubuntu-24.04, macos-14] os: [ubuntu-24.04, macos-14]
-1
View File
@@ -13,4 +13,3 @@ resolc-compiler-tests
workdir workdir
!/schema.json !/schema.json
!/dev-genesis.json
Generated
+21
View File
@@ -4526,6 +4526,27 @@ dependencies = [
"windows-sys 0.59.0", "windows-sys 0.59.0",
] ]
[[package]]
name = "ml-test-runner"
version = "0.1.0"
dependencies = [
"alloy",
"anyhow",
"clap",
"revive-dt-common",
"revive-dt-compiler",
"revive-dt-config",
"revive-dt-core",
"revive-dt-format",
"revive-dt-node",
"revive-dt-node-interaction",
"revive-dt-report",
"temp-dir",
"tokio",
"tracing",
"tracing-subscriber",
]
[[package]] [[package]]
name = "moka" name = "moka"
version = "0.12.10" version = "0.12.10"
+5 -3
View File
@@ -1,9 +1,11 @@
//! This module implements a cached file system allowing for results to be stored in-memory rather //! This module implements a cached file system allowing for results to be stored in-memory rather
//! rather being queried from the file system again. //! rather being queried from the file system again.
use std::fs; use std::{
use std::io::{Error, Result}; fs,
use std::path::{Path, PathBuf}; io::{Error, Result},
path::{Path, PathBuf},
};
use moka::sync::Cache; use moka::sync::Cache;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
+1 -2
View File
@@ -1,5 +1,4 @@
use std::ops::ControlFlow; use std::{ops::ControlFlow, time::Duration};
use std::time::Duration;
use anyhow::{Context as _, Result, anyhow}; use anyhow::{Context as _, Result, anyhow};
@@ -135,6 +135,6 @@ macro_rules! define_wrapper_type {
}; };
} }
/// Technically not needed but this allows for the macro to be found in the `macros` module of the /// Technically not needed but this allows for the macro to be found in the `macros` module of
/// crate in addition to being found in the root of the crate. /// the crate in addition to being found in the root of the crate.
pub use {define_wrapper_type, impl_for_wrapper}; pub use {define_wrapper_type, impl_for_wrapper};
+1 -3
View File
@@ -1,9 +1,7 @@
use crate::types::VersionOrRequirement; use crate::types::VersionOrRequirement;
use semver::Version; use semver::Version;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::fmt::Display; use std::{fmt::Display, str::FromStr, sync::LazyLock};
use std::str::FromStr;
use std::sync::LazyLock;
/// This represents a mode that a given test should be run with, if possible. /// This represents a mode that a given test should be run with, if possible.
/// ///
@@ -1,5 +1,4 @@
use alloy::primitives::U256; use alloy::{primitives::U256, signers::local::PrivateKeySigner};
use alloy::signers::local::PrivateKeySigner;
use anyhow::{Context, Result, bail}; use anyhow::{Context, Result, bail};
/// This is a sequential private key allocator. When instantiated, it allocated private keys in /// This is a sequential private key allocator. When instantiated, it allocated private keys in
+1 -2
View File
@@ -10,8 +10,7 @@ use std::{
pin::Pin, pin::Pin,
}; };
use alloy::json_abi::JsonAbi; use alloy::{json_abi::JsonAbi, primitives::Address};
use alloy::primitives::Address;
use anyhow::{Context as _, Result}; use anyhow::{Context as _, Result};
use semver::Version; use semver::Version;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
+4 -5
View File
@@ -253,7 +253,8 @@ impl SolidityCompiler for Resolc {
.evm .evm
.and_then(|evm| evm.bytecode.clone()) .and_then(|evm| evm.bytecode.clone())
.context("Unexpected - Contract compiled with resolc has no bytecode")?; .context("Unexpected - Contract compiled with resolc has no bytecode")?;
let abi = { let abi =
{
let metadata = contract_information let metadata = contract_information
.metadata .metadata
.as_ref() .as_ref()
@@ -277,10 +278,8 @@ impl SolidityCompiler for Resolc {
anyhow::bail!("Unsupported type of metadata {metadata:?}") anyhow::bail!("Unsupported type of metadata {metadata:?}")
} }
}; };
let solc_metadata = serde_json::from_str::<serde_json::Value>( let solc_metadata =
solc_metadata_str, serde_json::from_str::<serde_json::Value>(solc_metadata_str).context(
)
.context(
"Failed to deserialize the solc_metadata as a serde_json generic value", "Failed to deserialize the solc_metadata as a serde_json generic value",
)?; )?;
let output_value = solc_metadata let output_value = solc_metadata
+4 -4
View File
@@ -21,8 +21,7 @@ use foundry_compilers_artifacts::{
output_selection::{ output_selection::{
BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection, BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection,
}, },
solc::CompilerOutput as SolcOutput, solc::{CompilerOutput as SolcOutput, *},
solc::*,
}; };
use semver::Version; use semver::Version;
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand}; use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
@@ -284,8 +283,9 @@ impl SolidityCompiler for Solc {
_optimize_setting: ModeOptimizerSetting, _optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline, pipeline: ModePipeline,
) -> bool { ) -> bool {
// solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support mode E // solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support
// (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler is new enough. // mode E (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler
// is new enough.
pipeline == ModePipeline::ViaEVMAssembly pipeline == ModePipeline::ViaEVMAssembly
|| (pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul()) || (pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul())
} }
+14 -14
View File
@@ -202,11 +202,11 @@ impl AsRef<ReportConfiguration> for Context {
#[derive(Clone, Debug, Parser, Serialize)] #[derive(Clone, Debug, Parser, Serialize)]
pub struct TestExecutionContext { pub struct TestExecutionContext {
/// The working directory that the program will use for all of the temporary artifacts needed at /// The working directory that the program will use for all of the temporary artifacts needed
/// runtime. /// at runtime.
/// ///
/// If not specified, then a temporary directory will be created and used by the program for all /// If not specified, then a temporary directory will be created and used by the program for
/// temporary artifacts. /// all temporary artifacts.
#[clap( #[clap(
short, short,
long, long,
@@ -282,11 +282,11 @@ pub struct TestExecutionContext {
#[derive(Clone, Debug, Parser, Serialize)] #[derive(Clone, Debug, Parser, Serialize)]
pub struct BenchmarkingContext { pub struct BenchmarkingContext {
/// The working directory that the program will use for all of the temporary artifacts needed at /// The working directory that the program will use for all of the temporary artifacts needed
/// runtime. /// at runtime.
/// ///
/// If not specified, then a temporary directory will be created and used by the program for all /// If not specified, then a temporary directory will be created and used by the program for
/// temporary artifacts. /// all temporary artifacts.
#[clap( #[clap(
short, short,
long, long,
@@ -580,8 +580,8 @@ pub struct ResolcConfiguration {
pub struct PolkadotParachainConfiguration { pub struct PolkadotParachainConfiguration {
/// Specifies the path of the polkadot-parachain node to be used by the tool. /// Specifies the path of the polkadot-parachain node to be used by the tool.
/// ///
/// If this is not specified, then the tool assumes that it should use the polkadot-parachain binary /// If this is not specified, then the tool assumes that it should use the polkadot-parachain
/// that's provided in the user's $PATH. /// binary that's provided in the user's $PATH.
#[clap( #[clap(
id = "polkadot-parachain.path", id = "polkadot-parachain.path",
long = "polkadot-parachain.path", long = "polkadot-parachain.path",
@@ -624,8 +624,8 @@ pub struct GethConfiguration {
pub struct KurtosisConfiguration { pub struct KurtosisConfiguration {
/// Specifies the path of the kurtosis node to be used by the tool. /// Specifies the path of the kurtosis node to be used by the tool.
/// ///
/// If this is not specified, then the tool assumes that it should use the kurtosis binary that's /// If this is not specified, then the tool assumes that it should use the kurtosis binary
/// provided in the user's $PATH. /// that's provided in the user's $PATH.
#[clap( #[clap(
id = "kurtosis.path", id = "kurtosis.path",
long = "kurtosis.path", long = "kurtosis.path",
@@ -663,8 +663,8 @@ pub struct KitchensinkConfiguration {
pub struct ReviveDevNodeConfiguration { pub struct ReviveDevNodeConfiguration {
/// Specifies the path of the revive dev node to be used by the tool. /// Specifies the path of the revive dev node to be used by the tool.
/// ///
/// If this is not specified, then the tool assumes that it should use the revive dev node binary /// If this is not specified, then the tool assumes that it should use the revive dev node
/// that's provided in the user's $PATH. /// binary that's provided in the user's $PATH.
#[clap( #[clap(
id = "revive-dev-node.path", id = "revive-dev-node.path",
long = "revive-dev-node.path", long = "revive-dev-node.path",
@@ -59,8 +59,8 @@ pub struct Driver<'a, I> {
/// The definition of the test that the driver is instructed to execute. /// The definition of the test that the driver is instructed to execute.
test_definition: &'a TestDefinition<'a>, test_definition: &'a TestDefinition<'a>,
/// The private key allocator used by this driver and other drivers when account allocations are /// The private key allocator used by this driver and other drivers when account allocations
/// needed. /// are needed.
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>, private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
/// The execution state associated with the platform. /// The execution state associated with the platform.
@@ -10,14 +10,15 @@ use revive_dt_format::metadata::{ContractIdent, ContractInstance};
#[derive(Clone)] #[derive(Clone)]
/// The state associated with the test execution of one of the workloads. /// The state associated with the test execution of one of the workloads.
pub struct ExecutionState { pub struct ExecutionState {
/// The compiled contracts, these contracts have been compiled and have had the libraries linked /// The compiled contracts, these contracts have been compiled and have had the libraries
/// against them and therefore they're ready to be deployed on-demand. /// linked against them and therefore they're ready to be deployed on-demand.
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>, pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
/// A map of all of the deployed contracts and information about them. /// A map of all of the deployed contracts and information about them.
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>, pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
/// This map stores the variables used for each one of the cases contained in the metadata file. /// This map stores the variables used for each one of the cases contained in the metadata
/// file.
pub variables: HashMap<String, U256>, pub variables: HashMap<String, U256>,
} }
+22 -5
View File
@@ -131,8 +131,8 @@ pub struct PlatformDriver<'a, I> {
/// The definition of the test that the driver is instructed to execute. /// The definition of the test that the driver is instructed to execute.
test_definition: &'a TestDefinition<'a>, test_definition: &'a TestDefinition<'a>,
/// The private key allocator used by this driver and other drivers when account allocations are /// The private key allocator used by this driver and other drivers when account allocations
/// needed. /// are needed.
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>, private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
/// The execution state associated with the platform. /// The execution state associated with the platform.
@@ -415,9 +415,13 @@ where
let caller = { let caller = {
let context = self.default_resolution_context(); let context = self.default_resolution_context();
let resolver = self.platform_information.node.resolver().await?; let resolver = self.platform_information.node.resolver().await?;
step.caller let resolved = step
.caller
.resolve_address(resolver.as_ref(), context) .resolve_address(resolver.as_ref(), context)
.await? .await?;
self.platform_information
.node
.resolve_signer_or_default(resolved)
}; };
if let (_, _, Some(receipt)) = self if let (_, _, Some(receipt)) = self
.get_or_deploy_contract_instance(&instance, caller, calldata, value) .get_or_deploy_contract_instance(&instance, caller, calldata, value)
@@ -445,7 +449,7 @@ where
.context("Failed to find deployment receipt for constructor call"), .context("Failed to find deployment receipt for constructor call"),
Method::Fallback | Method::FunctionName(_) => { Method::Fallback | Method::FunctionName(_) => {
let resolver = self.platform_information.node.resolver().await?; let resolver = self.platform_information.node.resolver().await?;
let tx = match step let mut tx = match step
.as_transaction(resolver.as_ref(), self.default_resolution_context()) .as_transaction(resolver.as_ref(), self.default_resolution_context())
.await .await
{ {
@@ -455,6 +459,15 @@ where
} }
}; };
// Resolve the signer to ensure we use an address that has keys
if let Some(from) = tx.from {
tx.from = Some(
self.platform_information
.node
.resolve_signer_or_default(from),
);
}
self.platform_information.node.execute_transaction(tx).await self.platform_information.node.execute_transaction(tx).await
} }
} }
@@ -954,6 +967,10 @@ where
} }
let tx = { let tx = {
let deployer = self
.platform_information
.node
.resolve_signer_or_default(deployer);
let tx = TransactionRequest::default().from(deployer); let tx = TransactionRequest::default().from(deployer);
let tx = match value { let tx = match value {
Some(ref value) => tx.value(value.into_inner()), Some(ref value) => tx.value(value.into_inner()),
@@ -7,10 +7,10 @@ use std::{
time::{Duration, Instant}, time::{Duration, Instant},
}; };
use crate::Platform;
use anyhow::Context as _; use anyhow::Context as _;
use futures::{FutureExt, StreamExt}; use futures::{FutureExt, StreamExt};
use revive_dt_common::types::PrivateKeyAllocator; use revive_dt_common::types::PrivateKeyAllocator;
use revive_dt_core::Platform;
use tokio::sync::{Mutex, RwLock, Semaphore}; use tokio::sync::{Mutex, RwLock, Semaphore};
use tracing::{Instrument, error, info, info_span, instrument}; use tracing::{Instrument, error, info, info_span, instrument};
@@ -10,14 +10,15 @@ use revive_dt_format::metadata::{ContractIdent, ContractInstance};
#[derive(Clone)] #[derive(Clone)]
/// The state associated with the test execution of one of the tests. /// The state associated with the test execution of one of the tests.
pub struct ExecutionState { pub struct ExecutionState {
/// The compiled contracts, these contracts have been compiled and have had the libraries linked /// The compiled contracts, these contracts have been compiled and have had the libraries
/// against them and therefore they're ready to be deployed on-demand. /// linked against them and therefore they're ready to be deployed on-demand.
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>, pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
/// A map of all of the deployed contracts and information about them. /// A map of all of the deployed contracts and information about them.
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>, pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
/// This map stores the variables used for each one of the cases contained in the metadata file. /// This map stores the variables used for each one of the cases contained in the metadata
/// file.
pub variables: HashMap<String, U256>, pub variables: HashMap<String, U256>,
} }
+1 -1
View File
@@ -8,10 +8,10 @@ use std::{
sync::{Arc, LazyLock}, sync::{Arc, LazyLock},
}; };
use crate::Platform;
use futures::FutureExt; use futures::FutureExt;
use revive_dt_common::{iterators::FilesWithExtensionIterator, types::CompilerIdentifier}; use revive_dt_common::{iterators::FilesWithExtensionIterator, types::CompilerIdentifier};
use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler}; use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler};
use revive_dt_core::Platform;
use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata}; use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata};
use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address}; use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address};
+1 -1
View File
@@ -2,9 +2,9 @@
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use crate::Platform;
use anyhow::Context as _; use anyhow::Context as _;
use revive_dt_config::*; use revive_dt_config::*;
use revive_dt_core::Platform;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
/// The node pool starts one or more [Node] which then can be accessed /// The node pool starts one or more [Node] which then can be accessed
+7 -13
View File
@@ -1,28 +1,22 @@
use std::collections::BTreeMap; use std::{borrow::Cow, collections::BTreeMap, path::Path, sync::Arc};
use std::sync::Arc;
use std::{borrow::Cow, path::Path};
use futures::{Stream, StreamExt, stream}; use futures::{Stream, StreamExt, stream};
use indexmap::{IndexMap, indexmap}; use indexmap::{IndexMap, indexmap};
use revive_dt_common::iterators::EitherIter; use revive_dt_common::{iterators::EitherIter, types::PlatformIdentifier};
use revive_dt_common::types::PlatformIdentifier;
use revive_dt_config::Context; use revive_dt_config::Context;
use revive_dt_format::mode::ParsedMode; use revive_dt_format::mode::ParsedMode;
use serde_json::{Value, json}; use serde_json::{Value, json};
use revive_dt_compiler::Mode; use revive_dt_compiler::{Mode, SolidityCompiler};
use revive_dt_compiler::SolidityCompiler;
use revive_dt_format::{ use revive_dt_format::{
case::{Case, CaseIdx}, case::{Case, CaseIdx},
metadata::MetadataFile, metadata::MetadataFile,
}; };
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
use revive_dt_report::{ExecutionSpecificReporter, Reporter}; use revive_dt_report::{ExecutionSpecificReporter, Reporter, TestSpecificReporter, TestSpecifier};
use revive_dt_report::{TestSpecificReporter, TestSpecifier};
use tracing::{debug, error, info}; use tracing::{debug, error, info};
use crate::Platform; use crate::{Platform, helpers::NodePool};
use crate::helpers::NodePool;
pub async fn create_test_definitions_stream<'a>( pub async fn create_test_definitions_stream<'a>(
// This is only required for creating the compiler objects and is not used anywhere else in the // This is only required for creating the compiler objects and is not used anywhere else in the
@@ -69,8 +63,8 @@ pub async fn create_test_definitions_stream<'a>(
) )
}) })
}) })
// Inform the reporter of each one of the test cases that were discovered which we expect to // Inform the reporter of each one of the test cases that were discovered which we
// run. // expect to run.
.inspect(|(_, _, _, _, reporter)| { .inspect(|(_, _, _, _, reporter)| {
reporter reporter
.report_test_case_discovery_event() .report_test_case_discovery_event()
+10 -3
View File
@@ -3,6 +3,9 @@
//! This crate defines the testing configuration and //! This crate defines the testing configuration and
//! provides a helper utility to execute tests. //! provides a helper utility to execute tests.
pub mod differential_tests;
pub mod helpers;
use std::{ use std::{
pin::Pin, pin::Pin,
thread::{self, JoinHandle}, thread::{self, JoinHandle},
@@ -14,13 +17,17 @@ use revive_dt_common::types::*;
use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc}; use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc};
use revive_dt_config::*; use revive_dt_config::*;
use revive_dt_node::{ use revive_dt_node::{
Node, node_implementations::geth::GethNode, Node,
node_implementations::lighthouse_geth::LighthouseGethNode, node_implementations::{
node_implementations::substrate::SubstrateNode, node_implementations::zombienet::ZombieNode, geth::GethNode, lighthouse_geth::LighthouseGethNode, substrate::SubstrateNode,
zombienet::ZombieNode,
},
}; };
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
use tracing::info; use tracing::info;
pub use helpers::CachedCompiler;
/// A trait that describes the interface for the platforms that are supported by the tool. /// A trait that describes the interface for the platforms that are supported by the tool.
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
pub trait Platform { pub trait Platform {
+2 -2
View File
@@ -45,8 +45,8 @@ pub struct Case {
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub expected: Option<Expected>, pub expected: Option<Expected>,
/// An optional boolean which defines if the case as a whole should be ignored. If null then the /// An optional boolean which defines if the case as a whole should be ignored. If null then
/// case will not be ignored. /// the case will not be ignored.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub ignore: Option<bool>, pub ignore: Option<bool>,
} }
+13 -11
View File
@@ -31,8 +31,8 @@ pub struct MetadataFile {
/// The path of the metadata file. This will either be a JSON or solidity file. /// The path of the metadata file. This will either be a JSON or solidity file.
pub metadata_file_path: PathBuf, pub metadata_file_path: PathBuf,
/// This is the path contained within the corpus file. This could either be the path of some dir /// This is the path contained within the corpus file. This could either be the path of some
/// or could be the actual metadata file path. /// dir or could be the actual metadata file path.
pub corpus_file_path: PathBuf, pub corpus_file_path: PathBuf,
/// The metadata contained within the file. /// The metadata contained within the file.
@@ -69,13 +69,13 @@ impl Deref for MetadataFile {
/// of steps and assertions that should be performed as part of the test case. /// of steps and assertions that should be performed as part of the test case.
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema, Clone, Eq, PartialEq)] #[derive(Debug, Default, Serialize, Deserialize, JsonSchema, Clone, Eq, PartialEq)]
pub struct Metadata { pub struct Metadata {
/// This is an optional comment on the metadata file which has no impact on the execution in any /// This is an optional comment on the metadata file which has no impact on the execution in
/// way. /// any way.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
/// An optional boolean which defines if the metadata file as a whole should be ignored. If null /// An optional boolean which defines if the metadata file as a whole should be ignored. If
/// then the metadata file will not be ignored. /// null then the metadata file will not be ignored.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub ignore: Option<bool>, pub ignore: Option<bool>,
@@ -94,8 +94,8 @@ pub struct Metadata {
/// This is a map where the key is the name of the contract instance and the value is the /// This is a map where the key is the name of the contract instance and the value is the
/// contract's path and ident in the file. /// contract's path and ident in the file.
/// ///
/// If any contract is to be used by the test then it must be included in here first so that the /// If any contract is to be used by the test then it must be included in here first so that
/// framework is aware of its path, compiles it, and prepares it. /// the framework is aware of its path, compiles it, and prepares it.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub contracts: Option<BTreeMap<ContractInstance, ContractPathAndIdent>>, pub contracts: Option<BTreeMap<ContractInstance, ContractPathAndIdent>>,
@@ -123,8 +123,9 @@ pub struct Metadata {
pub required_evm_version: Option<EvmVersionRequirement>, pub required_evm_version: Option<EvmVersionRequirement>,
/// A set of compilation directives that will be passed to the compiler whenever the contracts /// A set of compilation directives that will be passed to the compiler whenever the contracts
/// for the test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`] /// for the test are being compiled. Note that this differs from the [`Mode`]s in that a
/// is just a filter for when a test can run whereas this is an instruction to the compiler. /// [`Mode`] is just a filter for when a test can run whereas this is an instruction to the
/// compiler.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub compiler_directives: Option<CompilationDirectives>, pub compiler_directives: Option<CompilationDirectives>,
} }
@@ -326,7 +327,8 @@ define_wrapper_type!(
)] )]
#[serde(try_from = "String", into = "String")] #[serde(try_from = "String", into = "String")]
pub struct ContractPathAndIdent { pub struct ContractPathAndIdent {
/// The path of the contract source code relative to the directory containing the metadata file. /// The path of the contract source code relative to the directory containing the metadata
/// file.
pub contract_source_path: PathBuf, pub contract_source_path: PathBuf,
/// The identifier of the contract. /// The identifier of the contract.
+5 -6
View File
@@ -1,13 +1,12 @@
use anyhow::Context as _; use anyhow::Context as _;
use regex::Regex; use regex::Regex;
use revive_dt_common::iterators::EitherIter; use revive_dt_common::{
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline}; iterators::EitherIter,
types::{Mode, ModeOptimizerSetting, ModePipeline},
};
use schemars::JsonSchema; use schemars::JsonSchema;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::HashSet; use std::{collections::HashSet, fmt::Display, str::FromStr, sync::LazyLock};
use std::fmt::Display;
use std::str::FromStr;
use std::sync::LazyLock;
/// This represents a mode that has been parsed from test metadata. /// This represents a mode that has been parsed from test metadata.
/// ///
+25 -20
View File
@@ -1,11 +1,10 @@
use std::{collections::HashMap, fmt::Display, str::FromStr}; use std::{collections::HashMap, fmt::Display, str::FromStr};
use alloy::primitives::{FixedBytes, utils::parse_units};
use alloy::{ use alloy::{
eips::BlockNumberOrTag, eips::BlockNumberOrTag,
json_abi::Function, json_abi::Function,
network::TransactionBuilder, network::TransactionBuilder,
primitives::{Address, Bytes, U256}, primitives::{Address, Bytes, FixedBytes, U256, utils::parse_units},
rpc::types::TransactionRequest, rpc::types::TransactionRequest,
}; };
use anyhow::Context as _; use anyhow::Context as _;
@@ -17,8 +16,10 @@ use serde::{Deserialize, Serialize};
use revive_dt_common::macros::define_wrapper_type; use revive_dt_common::macros::define_wrapper_type;
use tracing::{Instrument, info_span, instrument}; use tracing::{Instrument, info_span, instrument};
use crate::traits::ResolverApi; use crate::{
use crate::{metadata::ContractInstance, traits::ResolutionContext}; metadata::ContractInstance,
traits::{ResolutionContext, ResolverApi},
};
/// A test step. /// A test step.
/// ///
@@ -147,8 +148,8 @@ pub struct FunctionCallStep {
#[schemars(skip)] #[schemars(skip)]
pub storage: Option<HashMap<String, Calldata>>, pub storage: Option<HashMap<String, Calldata>>,
/// Variable assignment to perform in the framework allowing us to reference them again later on /// Variable assignment to perform in the framework allowing us to reference them again later
/// during the execution. /// on during the execution.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub variable_assignments: Option<VariableAssignments>, pub variable_assignments: Option<VariableAssignments>,
} }
@@ -488,14 +489,13 @@ impl FunctionCallStep {
anyhow::bail!("ABI for instance '{}' not found", self.instance.as_ref()); anyhow::bail!("ABI for instance '{}' not found", self.instance.as_ref());
}; };
// We follow the same logic that's implemented in the matter-labs-tester where they resolve // We follow the same logic that's implemented in the matter-labs-tester where they
// the function name into a function selector and they assume that he function doesn't have // resolve the function name into a function selector and they assume that he
// any existing overloads. // function doesn't have any existing overloads.
// Overloads are handled by providing the full function signature in the "function // Overloads are handled by providing the full function signature in the "function
// name". // name".
// https://github.com/matter-labs/era-compiler-tester/blob/1dfa7d07cba0734ca97e24704f12dd57f6990c2c/compiler_tester/src/test/case/input/mod.rs#L158-L190 // https://github.com/matter-labs/era-compiler-tester/blob/1dfa7d07cba0734ca97e24704f12dd57f6990c2c/compiler_tester/src/test/case/input/mod.rs#L158-L190
let selector = let selector = if function_name.contains('(') && function_name.contains(')') {
if function_name.contains('(') && function_name.contains(')') {
Function::parse(function_name) Function::parse(function_name)
.context( .context(
"Failed to parse the provided function name into a function signature", "Failed to parse the provided function name into a function signature",
@@ -511,19 +511,21 @@ impl FunctionCallStep {
&self.instance &self.instance
) )
}) })
.with_context(|| format!( .with_context(|| {
format!(
"Failed to resolve function selector for {:?} on instance {:?}", "Failed to resolve function selector for {:?} on instance {:?}",
function_name, &self.instance function_name, &self.instance
))? )
})?
.selector() .selector()
}; };
// Allocating a vector that we will be using for the calldata. The vector size will be: // Allocating a vector that we will be using for the calldata. The vector size will
// 4 bytes for the function selector. // be: 4 bytes for the function selector.
// function.inputs.len() * 32 bytes for the arguments (each argument is a U256). // function.inputs.len() * 32 bytes for the arguments (each argument is a U256).
// //
// We're using indices in the following code in order to avoid the need for us to allocate // We're using indices in the following code in order to avoid the need for us to
// a new buffer for each one of the resolved arguments. // allocate a new buffer for each one of the resolved arguments.
let mut calldata = Vec::<u8>::with_capacity(4 + self.calldata.size_requirement()); let mut calldata = Vec::<u8>::with_capacity(4 + self.calldata.size_requirement());
calldata.extend(selector.0); calldata.extend(selector.0);
self.calldata self.calldata
@@ -959,9 +961,12 @@ impl<'de> Deserialize<'de> for EtherValue {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use alloy::primitives::{BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, address}; use alloy::{
use alloy::sol_types::SolValue; eips::BlockNumberOrTag,
use alloy::{eips::BlockNumberOrTag, json_abi::JsonAbi}; json_abi::JsonAbi,
primitives::{BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, address},
sol_types::SolValue,
};
use std::{collections::HashMap, pin::Pin}; use std::{collections::HashMap, pin::Pin};
use super::*; use super::*;
+6 -6
View File
@@ -1,10 +1,10 @@
use std::collections::HashMap; use std::{collections::HashMap, pin::Pin};
use std::pin::Pin;
use alloy::eips::BlockNumberOrTag; use alloy::{
use alloy::json_abi::JsonAbi; eips::BlockNumberOrTag,
use alloy::primitives::TxHash; json_abi::JsonAbi,
use alloy::primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, U256}; primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, U256},
};
use anyhow::Result; use anyhow::Result;
use crate::metadata::{ContractIdent, ContractInstance}; use crate::metadata::{ContractIdent, ContractInstance};
+34
View File
@@ -0,0 +1,34 @@
[package]
name = "ml-test-runner"
description = "ML-based test runner for executing differential tests file by file"
version.workspace = true
authors.workspace = true
license.workspace = true
edition.workspace = true
repository.workspace = true
rust-version.workspace = true
[[bin]]
name = "ml-test-runner"
path = "src/main.rs"
[dependencies]
revive-dt-common = { workspace = true }
revive-dt-compiler = { workspace = true }
revive-dt-config = { workspace = true }
revive-dt-core = { workspace = true }
revive-dt-format = { workspace = true }
revive-dt-node = { workspace = true }
revive-dt-node-interaction = { workspace = true }
revive-dt-report = { workspace = true }
alloy = { workspace = true }
anyhow = { workspace = true }
clap = { workspace = true }
tokio = { workspace = true }
temp-dir = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
[lints]
workspace = true
+74
View File
@@ -0,0 +1,74 @@
# ML Test Runner
A test runner for executing Revive differential tests file-by-file with cargo-test-style output.
This is similar to the `retester` binary but designed for ML-based test execution with a focus on:
- Running tests file-by-file (rather than in bulk)
- Caching passed tests to skip them in future runs
- Providing cargo-test-style output for easy integration with ML pipelines
- Single platform testing (rather than differential testing)
## Features
- **File-by-file execution**: Run tests on individual `.sol` files, corpus files (`.json`), or recursively walk directories
- **Cached results**: Skip tests that have already passed using `--cached-passed`
- **Fail fast**: Stop on first failure with `--bail`
- **Cargo-like output**: Familiar test output format with colored pass/fail indicators
- **Platform support**: Test against `geth` or `kitchensink` platforms
## Usage
```bash
# Run a single .sol file (compile-only mode, default)
./ml-test-runner path/to/test.sol --platform geth
# Run all tests in a corpus file
./ml-test-runner path/to/corpus.json --platform kitchensink
# Walk a directory recursively for .sol files
./ml-test-runner path/to/tests/ --platform geth
# Use cached results and bail on first failure
./ml-test-runner path/to/tests/ --cached-passed ./cache.txt --bail
# Start the platform and execute tests (full mode)
./ml-test-runner path/to/tests/ --platform geth --start-platform
# Enable verbose logging (info, debug, or trace level)
RUST_LOG=info ./ml-test-runner path/to/tests/
RUST_LOG=debug ./ml-test-runner path/to/tests/ --start-platform
RUST_LOG=trace ./ml-test-runner path/to/tests/ --start-platform
```
## Arguments
- `<PATH>` - Path to test file (`.sol`), corpus file (`.json`), or folder of `.sol` files
- `--cached-passed <FILE>` - File to track tests that have already passed
- `--bail` - Stop after the first file failure
- `--platform <PLATFORM>` - Platform to test against (`geth`, `kitchensink`, or `zombienet`, default: `geth`)
- `--start-platform` - Start the platform and execute tests (default: `false`, compile-only mode)
## Output Format
The runner produces cargo-test-style output:
```
test path/to/test1.sol ... ok
test path/to/test2.sol ... FAILED
test path/to/test3.sol ... cached
failures:
---- path/to/test2.sol ----
Error: ...
test result: FAILED. 1 passed; 1 failed; 1 cached; finished in 2.34s
```
## Building
```bash
cargo build --release -p ml-test-runner
```
The binary will be available at `target/release/ml-test-runner`.
+541
View File
@@ -0,0 +1,541 @@
use anyhow::Context;
use clap::Parser;
use revive_dt_common::{
iterators::FilesWithExtensionIterator,
types::{PlatformIdentifier, PrivateKeyAllocator},
};
use revive_dt_config::TestExecutionContext;
use revive_dt_core::{
CachedCompiler, Platform,
helpers::{TestDefinition, TestPlatformInformation},
};
use revive_dt_format::{
case::CaseIdx,
corpus::Corpus,
metadata::{Metadata, MetadataFile},
};
use std::{
borrow::Cow,
collections::{BTreeMap, HashSet},
fs::File,
io::{BufRead, BufReader, BufWriter, Write},
path::{Path, PathBuf},
sync::Arc,
time::Instant,
};
use temp_dir::TempDir;
use tokio::sync::Mutex;
use tracing::info;
use tracing_subscriber::{EnvFilter, FmtSubscriber};
/// ML-based test runner for executing differential tests file by file
#[derive(Debug, Parser)]
#[command(name = "ml-test-runner")]
struct MlTestRunnerArgs {
/// Path to test file (.sol), corpus file (.json), or folder containing .sol files
#[arg(value_name = "PATH")]
path: PathBuf,
/// File to cache tests that have already passed
#[arg(long = "cached-passed")]
cached_passed: Option<PathBuf>,
/// Stop after the first file failure
#[arg(long = "bail")]
bail: bool,
/// Platform to test against (e.g., geth-evm-solc, kitchensink-polkavm-resolc)
#[arg(long = "platform", default_value = "geth-evm-solc")]
platform: PlatformIdentifier,
/// Start the platform and wait for RPC readiness
#[arg(long = "start-platform", default_value = "false")]
start_platform: bool,
/// Private key to use for wallet initialization (hex string with or without 0x prefix)
#[arg(
long = "private-key",
default_value = "0x5fb92d6e98884f76de468fa3f6278f8807c48bebc13595d45af5bdc4da702133"
)]
private_key: String,
/// RPC port to connect to when using existing node
#[arg(long = "rpc-port", default_value = "8545")]
rpc_port: u16,
}
fn main() -> anyhow::Result<()> {
let subscriber = FmtSubscriber::builder()
.with_env_filter(EnvFilter::from_default_env())
.with_writer(std::io::stderr)
.finish();
tracing::subscriber::set_global_default(subscriber).expect("Failed to set tracing subscriber");
let args = MlTestRunnerArgs::parse();
info!("ML test runner starting");
info!("Platform: {:?}", args.platform);
info!("Start platform: {}", args.start_platform);
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.expect("Failed building the Runtime")
.block_on(run(args))
}
async fn run(args: MlTestRunnerArgs) -> anyhow::Result<()> {
let start_time = Instant::now();
info!("Discovering test files from: {}", args.path.display());
let test_files = discover_test_files(&args.path)?;
info!("Found {} test file(s)", test_files.len());
let cached_passed = if let Some(cache_file) = &args.cached_passed {
let cached = load_cached_passed(cache_file)?;
info!("Loaded {} cached passed test(s)", cached.len());
cached
} else {
HashSet::new()
};
let cached_passed = Arc::new(Mutex::new(cached_passed));
let mut passed_files = 0;
let mut failed_files = 0;
let mut skipped_files = 0;
let mut failures = Vec::new();
const GREEN: &str = "\x1B[32m";
const RED: &str = "\x1B[31m";
const YELLOW: &str = "\x1B[33m";
const COLOUR_RESET: &str = "\x1B[0m";
const BOLD: &str = "\x1B[1m";
const BOLD_RESET: &str = "\x1B[22m";
for test_file in test_files {
let file_display = test_file.display().to_string();
// Check if already passed
{
let cache = cached_passed.lock().await;
if cache.contains(&file_display) {
println!("test {} ... {YELLOW}cached{COLOUR_RESET}", file_display);
skipped_files += 1;
continue;
}
}
info!("Loading metadata from: {}", test_file.display());
let metadata_file = match load_metadata_file(&test_file) {
Ok(mf) => {
info!("Loaded metadata with {} case(s)", mf.cases.len());
mf
}
Err(e) => {
println!("test {} ... {RED}FAILED{COLOUR_RESET}", file_display);
println!(" Error loading metadata: {}", e);
failed_files += 1;
failures.push((
file_display.clone(),
format!("Error loading metadata: {}", e),
));
if args.bail {
break;
}
continue;
}
};
info!("Executing test file: {}", file_display);
match execute_test_file(&args, &metadata_file).await {
Ok(_) => {
println!("test {} ... {GREEN}ok{COLOUR_RESET}", file_display);
info!("Test file passed: {}", file_display);
passed_files += 1;
{
let mut cache = cached_passed.lock().await;
cache.insert(file_display);
}
}
Err(e) => {
println!("test {} ... {RED}FAILED{COLOUR_RESET}", file_display);
failed_files += 1;
failures.push((file_display, format!("{:?}", e)));
if args.bail {
info!("Bailing after first failure");
break;
}
}
}
}
if let Some(cache_file) = &args.cached_passed {
let cache = cached_passed.lock().await;
info!("Saving {} cached passed test(s)", cache.len());
save_cached_passed(cache_file, &cache)?;
}
// Print summary
println!();
if !failures.is_empty() {
println!("{BOLD}failures:{BOLD_RESET}");
println!();
for (file, error) in &failures {
println!("---- {} ----", file);
println!("{}", error);
println!();
}
}
let elapsed = start_time.elapsed();
println!(
"test result: {}. {} passed; {} failed; {} cached; finished in {:.2}s",
if failed_files == 0 {
format!("{GREEN}ok{COLOUR_RESET}")
} else {
format!("{RED}FAILED{COLOUR_RESET}")
},
passed_files,
failed_files,
skipped_files,
elapsed.as_secs_f64()
);
if failed_files > 0 {
std::process::exit(1);
}
Ok(())
}
/// Discover test files from the given path
fn discover_test_files(path: &Path) -> anyhow::Result<Vec<PathBuf>> {
if !path.exists() {
anyhow::bail!("Path does not exist: {}", path.display());
}
let mut files = Vec::new();
if path.is_file() {
let extension = path.extension().and_then(|s| s.to_str()).unwrap_or("");
match extension {
"sol" => {
// Single .sol file
files.push(path.to_path_buf());
}
"json" => {
// Corpus file - enumerate its tests
let corpus = Corpus::try_from_path(path)?;
let metadata_files = corpus.enumerate_tests();
for metadata in metadata_files {
files.push(metadata.metadata_file_path);
}
}
_ => anyhow::bail!(
"Unsupported file extension: {}. Expected .sol or .json",
extension
),
}
} else if path.is_dir() {
// Walk directory recursively for .sol files
for entry in FilesWithExtensionIterator::new(path)
.with_allowed_extension("sol")
.with_use_cached_fs(true)
{
files.push(entry);
}
} else {
anyhow::bail!("Path is neither a file nor a directory: {}", path.display());
}
Ok(files)
}
/// Load metadata from a test file
fn load_metadata_file(path: &Path) -> anyhow::Result<MetadataFile> {
let metadata = Metadata::try_from_file(path)
.ok_or_else(|| anyhow::anyhow!("Failed to load metadata from {}", path.display()))?;
Ok(MetadataFile {
metadata_file_path: path.to_path_buf(),
corpus_file_path: path.to_path_buf(),
content: metadata,
})
}
/// Execute all test cases in a metadata file
async fn execute_test_file(
args: &MlTestRunnerArgs,
metadata_file: &MetadataFile,
) -> anyhow::Result<()> {
if metadata_file.cases.is_empty() {
anyhow::bail!("No test cases found in file");
}
info!("Processing {} test case(s)", metadata_file.cases.len());
// Get the platform based on CLI args
let platform: &dyn Platform = match args.platform {
PlatformIdentifier::GethEvmSolc => &revive_dt_core::GethEvmSolcPlatform,
PlatformIdentifier::LighthouseGethEvmSolc => &revive_dt_core::LighthouseGethEvmSolcPlatform,
PlatformIdentifier::KitchensinkPolkavmResolc => {
&revive_dt_core::KitchensinkPolkavmResolcPlatform
}
PlatformIdentifier::KitchensinkRevmSolc => &revive_dt_core::KitchensinkRevmSolcPlatform,
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
&revive_dt_core::ReviveDevNodePolkavmResolcPlatform
}
PlatformIdentifier::ReviveDevNodeRevmSolc => &revive_dt_core::ReviveDevNodeRevmSolcPlatform,
PlatformIdentifier::ZombienetPolkavmResolc => {
&revive_dt_core::ZombienetPolkavmResolcPlatform
}
PlatformIdentifier::ZombienetRevmSolc => &revive_dt_core::ZombienetRevmSolcPlatform,
};
let temp_dir = TempDir::new()?;
info!("Created temporary directory: {}", temp_dir.path().display());
let test_context = TestExecutionContext::default();
let context = revive_dt_config::Context::Test(Box::new(test_context));
let node: &'static dyn revive_dt_node_interaction::EthereumNode = if args.start_platform {
info!("Starting blockchain node...");
let node_handle = platform
.new_node(context.clone())
.context("Failed to spawn node thread")?;
info!("Waiting for node to start...");
let node = node_handle
.join()
.map_err(|e| anyhow::anyhow!("Node thread panicked: {:?}", e))?
.context("Failed to start node")?;
info!(
"Node started with ID: {}, connection: {}",
node.id(),
node.connection_string()
);
let node = Box::leak(node);
info!("Running pre-transactions...");
node.pre_transactions()
.await
.context("Failed to run pre-transactions")?;
info!("Pre-transactions completed");
node
} else {
info!("Using existing node");
let existing_node: Box<dyn revive_dt_node_interaction::EthereumNode> = match args.platform {
PlatformIdentifier::GethEvmSolc | PlatformIdentifier::LighthouseGethEvmSolc => {
Box::new(
revive_dt_node::node_implementations::geth::GethNode::new_existing(
&args.private_key,
args.rpc_port,
)
.await?,
)
}
PlatformIdentifier::KitchensinkPolkavmResolc
| PlatformIdentifier::KitchensinkRevmSolc
| PlatformIdentifier::ReviveDevNodePolkavmResolc
| PlatformIdentifier::ReviveDevNodeRevmSolc
| PlatformIdentifier::ZombienetPolkavmResolc
| PlatformIdentifier::ZombienetRevmSolc => Box::new(
revive_dt_node::node_implementations::substrate::SubstrateNode::new_existing(
&args.private_key,
args.rpc_port,
)
.await?,
),
};
Box::leak(existing_node)
};
info!("Initializing cached compiler");
let cached_compiler = CachedCompiler::new(temp_dir.path().join("compilation_cache"), false)
.await
.map(Arc::new)
.context("Failed to create cached compiler")?;
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
alloy::primitives::U256::from(100),
)));
let (reporter, report_task) =
revive_dt_report::ReportAggregator::new(context.clone()).into_task();
tokio::spawn(report_task);
info!(
"Building test definitions for {} case(s)",
metadata_file.cases.len()
);
let mut test_definitions = Vec::new();
for (case_idx, case) in metadata_file.cases.iter().enumerate() {
info!("Building test definition for case {}", case_idx);
let test_def = build_test_definition(
metadata_file,
case,
case_idx,
platform,
node,
&context,
&reporter,
)
.await?;
if let Some(test_def) = test_def {
info!("Test definition for case {} created successfully", case_idx);
test_definitions.push(test_def);
}
}
info!("Executing {} test definition(s)", test_definitions.len());
for (idx, test_definition) in test_definitions.iter().enumerate() {
info!("─────────────────────────────────────────────────────────────────");
info!(
"Executing case {}/{}: case_idx={}, mode={}, steps={}",
idx + 1,
test_definitions.len(),
test_definition.case_idx,
test_definition.mode,
test_definition.case.steps.len()
);
info!("Creating driver for case {}", test_definition.case_idx);
let driver = revive_dt_core::differential_tests::Driver::new_root(
test_definition,
private_key_allocator.clone(),
&cached_compiler,
)
.await
.context("Failed to create driver")?;
info!(
"Running {} step(s) for case {}",
test_definition.case.steps.len(),
test_definition.case_idx
);
let steps_executed = driver.execute_all().await.context(format!(
"Failed to execute case {}",
test_definition.case_idx
))?;
info!(
"✓ Case {} completed successfully, executed {} step(s)",
test_definition.case_idx, steps_executed
);
}
info!("─────────────────────────────────────────────────────────────────");
info!(
"All {} test case(s) executed successfully",
test_definitions.len()
);
Ok(())
}
/// Build a test definition for a single test case
async fn build_test_definition<'a>(
metadata_file: &'a MetadataFile,
case: &'a revive_dt_format::case::Case,
case_idx: usize,
platform: &'a dyn Platform,
node: &'a dyn revive_dt_node_interaction::EthereumNode,
context: &revive_dt_config::Context,
reporter: &revive_dt_report::Reporter,
) -> anyhow::Result<Option<TestDefinition<'a>>> {
let mode = case
.modes
.as_ref()
.or(metadata_file.modes.as_ref())
.and_then(|modes| modes.first())
.and_then(|parsed_mode| parsed_mode.to_modes().next())
.map(Cow::Owned)
.or_else(|| revive_dt_compiler::Mode::all().next().map(Cow::Borrowed))
.unwrap();
let compiler = platform
.new_compiler(context.clone(), mode.version.clone().map(Into::into))
.await
.context("Failed to create compiler")?;
let test_reporter =
reporter.test_specific_reporter(Arc::new(revive_dt_report::TestSpecifier {
solc_mode: mode.as_ref().clone(),
metadata_file_path: metadata_file.metadata_file_path.clone(),
case_idx: CaseIdx::new(case_idx),
}));
let execution_reporter =
test_reporter.execution_specific_reporter(node.id(), platform.platform_identifier());
let mut platforms = BTreeMap::new();
platforms.insert(
platform.platform_identifier(),
TestPlatformInformation {
platform,
node,
compiler,
reporter: execution_reporter,
},
);
let test_definition = TestDefinition {
metadata: metadata_file,
metadata_file_path: &metadata_file.metadata_file_path,
mode,
case_idx: CaseIdx::new(case_idx),
case,
platforms,
reporter: test_reporter,
};
if let Err((reason, _)) = test_definition.check_compatibility() {
println!(" Skipping case {}: {}", case_idx, reason);
return Ok(None);
}
Ok(Some(test_definition))
}
/// Load cached passed tests from file
fn load_cached_passed(path: &Path) -> anyhow::Result<HashSet<String>> {
if !path.exists() {
return Ok(HashSet::new());
}
let file = File::open(path).context("Failed to open cached-passed file")?;
let reader = BufReader::new(file);
let mut cache = HashSet::new();
for line in reader.lines() {
let line = line?;
let trimmed = line.trim();
if !trimmed.is_empty() {
cache.insert(trimmed.to_string());
}
}
Ok(cache)
}
/// Save cached passed tests to file
fn save_cached_passed(path: &Path, cache: &HashSet<String>) -> anyhow::Result<()> {
let file = File::create(path).context("Failed to create cached-passed file")?;
let mut writer = BufWriter::new(file);
let mut entries: Vec<_> = cache.iter().collect();
entries.sort();
for entry in entries {
writeln!(writer, "{}", entry)?;
}
writer.flush()?;
Ok(())
}
+12 -5
View File
@@ -1,11 +1,14 @@
//! This crate implements all node interactions. //! This crate implements all node interactions.
use std::pin::Pin; use std::{pin::Pin, sync::Arc};
use std::sync::Arc;
use alloy::primitives::{Address, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256}; use alloy::{
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace}; primitives::{Address, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest}; rpc::types::{
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace},
},
};
use anyhow::Result; use anyhow::Result;
use futures::Stream; use futures::Stream;
@@ -74,6 +77,10 @@ pub trait EthereumNode {
+ '_, + '_,
>, >,
>; >;
/// Checks if the provided address is in the wallet. If it is, returns the address.
/// Otherwise, returns the default signer's address.
fn resolve_signer_or_default(&self, address: Address) -> Address;
} }
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+143 -17
View File
@@ -18,7 +18,9 @@ use alloy::{
eips::BlockNumberOrTag, eips::BlockNumberOrTag,
genesis::{Genesis, GenesisAccount}, genesis::{Genesis, GenesisAccount},
network::{Ethereum, EthereumWallet, NetworkWallet}, network::{Ethereum, EthereumWallet, NetworkWallet},
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256}, primitives::{
Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, StorageKey, TxHash, U256,
},
providers::{ providers::{
Provider, Provider,
ext::DebugApi, ext::DebugApi,
@@ -75,6 +77,7 @@ pub struct GethNode {
wallet: Arc<EthereumWallet>, wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>, provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
chain_id: ChainId,
} }
impl GethNode { impl GethNode {
@@ -125,9 +128,120 @@ impl GethNode {
wallet: wallet.clone(), wallet: wallet.clone(),
nonce_manager: Default::default(), nonce_manager: Default::default(),
provider: Default::default(), provider: Default::default(),
chain_id: CHAIN_ID,
} }
} }
pub async fn new_existing(private_key: &str, rpc_port: u16) -> anyhow::Result<Self> {
use alloy::{
primitives::FixedBytes,
providers::{Provider, ProviderBuilder},
signers::local::PrivateKeySigner,
};
let key_str = private_key
.trim()
.strip_prefix("0x")
.unwrap_or(private_key.trim());
let key_bytes = alloy::hex::decode(key_str)
.map_err(|e| anyhow::anyhow!("Failed to decode private key hex: {}", e))?;
if key_bytes.len() != 32 {
anyhow::bail!(
"Private key must be 32 bytes (64 hex characters), got {}",
key_bytes.len()
);
}
let mut bytes = [0u8; 32];
bytes.copy_from_slice(&key_bytes);
let signer = PrivateKeySigner::from_bytes(&FixedBytes(bytes))
.map_err(|e| anyhow::anyhow!("Failed to create signer from private key: {}", e))?;
let address = signer.address();
let wallet = Arc::new(EthereumWallet::new(signer));
let connection_string = format!("http://localhost:{}", rpc_port);
let chain_id = ProviderBuilder::new()
.connect_http(connection_string.parse()?)
.get_chain_id()
.await
.context("Failed to query chain ID from RPC")?;
let node = Self {
connection_string: format!("http://localhost:{}", rpc_port),
base_directory: PathBuf::new(),
data_directory: PathBuf::new(),
logs_directory: PathBuf::new(),
geth: PathBuf::new(),
id: 0,
chain_id,
handle: None,
start_timeout: Duration::from_secs(0),
wallet,
nonce_manager: Default::default(),
provider: Default::default(),
};
// Check balance and fund if needed
node.ensure_funded(address).await?;
Ok(node)
}
/// Ensure that the given address has at least 1000 ETH, funding it from the node's managed
/// account if necessary.
async fn ensure_funded(&self, address: Address) -> anyhow::Result<()> {
use alloy::{
primitives::utils::{format_ether, parse_ether},
providers::{Provider, ProviderBuilder},
};
let provider = ProviderBuilder::new().connect_http(self.connection_string.parse()?);
let balance = provider.get_balance(address).await?;
let min_balance = parse_ether("1000")?;
if balance >= min_balance {
tracing::info!(
"Wallet {} already has sufficient balance: {} ETH",
address,
format_ether(balance)
);
return Ok(());
}
tracing::info!(
"Funding wallet {} (current: {} ETH, target: 1000 ETH)",
address,
format_ether(balance)
);
// Get the node's managed account
let accounts = provider.get_accounts().await?;
if accounts.is_empty() {
anyhow::bail!("No managed accounts available on the node to fund wallet");
}
let from_account = accounts[0];
let funding_amount = min_balance - balance;
let tx = TransactionRequest::default()
.from(from_account)
.to(address)
.value(funding_amount);
provider
.send_transaction(tx)
.await?
.get_receipt()
.await
.context("Failed to get receipt for funding transaction")?;
tracing::info!("Successfully funded wallet {}", address);
Ok(())
}
/// Create the node directory and call `geth init` to configure the genesis. /// Create the node directory and call `geth init` to configure the genesis.
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> { fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
@@ -255,7 +369,7 @@ impl GethNode {
construct_concurrency_limited_provider::<Ethereum, _>( construct_concurrency_limited_provider::<Ethereum, _>(
self.connection_string.as_str(), self.connection_string.as_str(),
FallbackGasFiller::default(), FallbackGasFiller::default(),
ChainIdFiller::new(Some(CHAIN_ID)), ChainIdFiller::new(Some(self.chain_id)),
NonceFiller::new(self.nonce_manager.clone()), NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(), self.wallet.clone(),
) )
@@ -349,23 +463,25 @@ impl EthereumNode for GethNode {
.context("Failed to submit transaction to geth node")?; .context("Failed to submit transaction to geth node")?;
let transaction_hash = *pending_transaction.tx_hash(); let transaction_hash = *pending_transaction.tx_hash();
// The following is a fix for the "transaction indexing is in progress" error that we used // The following is a fix for the "transaction indexing is in progress" error that we
// to get. You can find more information on this in the following GH issue in geth // used to get. You can find more information on this in the following GH issue in
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on, // geth https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
// before we can get the receipt of the transaction it needs to have been indexed by the // before we can get the receipt of the transaction it needs to have been indexed by the
// node's indexer. Just because the transaction has been confirmed it doesn't mean that it // node's indexer. Just because the transaction has been confirmed it doesn't mean that
// has been indexed. When we call alloy's `get_receipt` it checks if the transaction was // it has been indexed. When we call alloy's `get_receipt` it checks if the
// confirmed. If it has been, then it will call `eth_getTransactionReceipt` method which // transaction was confirmed. If it has been, then it will call
// _might_ return the above error if the tx has not yet been indexed yet. So, we need to // `eth_getTransactionReceipt` method which _might_ return the above error if the tx
// implement a retry mechanism for the receipt to keep retrying to get it until it // has not yet been indexed yet. So, we need to implement a retry mechanism for the
// eventually works, but we only do that if the error we get back is the "transaction // receipt to keep retrying to get it until it eventually works, but we only do that
// indexing is in progress" error or if the receipt is None. // if the error we get back is the "transaction indexing is in progress" error or if
// the receipt is None.
// //
// Getting the transaction indexed and taking a receipt can take a long time especially when // Getting the transaction indexed and taking a receipt can take a long time especially
// a lot of transactions are being submitted to the node. Thus, while initially we only // when a lot of transactions are being submitted to the node. Thus, while initially
// allowed for 60 seconds of waiting with a 1 second delay in polling, we need to allow for // we only allowed for 60 seconds of waiting with a 1 second delay in polling, we
// a larger wait time. Therefore, in here we allow for 5 minutes of waiting with exponential // need to allow for a larger wait time. Therefore, in here we allow for 5 minutes of
// backoff each time we attempt to get the receipt and find that it's not available. // waiting with exponential backoff each time we attempt to get the receipt and find
// that it's not available.
poll( poll(
Self::RECEIPT_POLLING_DURATION, Self::RECEIPT_POLLING_DURATION,
PollingWaitBehavior::Constant(Duration::from_millis(200)), PollingWaitBehavior::Constant(Duration::from_millis(200)),
@@ -542,6 +658,16 @@ impl EthereumNode for GethNode {
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>) as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
}) })
} }
fn resolve_signer_or_default(&self, address: Address) -> Address {
let signer_addresses: Vec<_> =
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet).collect();
if signer_addresses.contains(&address) {
address
} else {
self.wallet.default_signer().address()
}
}
} }
pub struct GethNodeResolver { pub struct GethNodeResolver {
@@ -761,6 +761,16 @@ impl EthereumNode for LighthouseGethNode {
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>) as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
}) })
} }
fn resolve_signer_or_default(&self, address: Address) -> Address {
let signer_addresses: Vec<_> =
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet).collect();
if signer_addresses.contains(&address) {
address
} else {
self.wallet.default_signer().address()
}
}
} }
pub struct LighthouseGethNodeResolver<F: TxFiller<Ethereum>, P: Provider<Ethereum>> { pub struct LighthouseGethNodeResolver<F: TxFiller<Ethereum>, P: Provider<Ethereum>> {
@@ -80,6 +80,7 @@ pub struct SubstrateNode {
wallet: Arc<EthereumWallet>, wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<ReviveNetwork, Arc<EthereumWallet>>>, provider: OnceCell<ConcreteProvider<ReviveNetwork, Arc<EthereumWallet>>>,
chain_id: alloy::primitives::ChainId,
} }
impl SubstrateNode { impl SubstrateNode {
@@ -131,9 +132,64 @@ impl SubstrateNode {
wallet: wallet.clone(), wallet: wallet.clone(),
nonce_manager: Default::default(), nonce_manager: Default::default(),
provider: Default::default(), provider: Default::default(),
chain_id: CHAIN_ID,
} }
} }
pub async fn new_existing(private_key: &str, rpc_port: u16) -> anyhow::Result<Self> {
use alloy::{
primitives::FixedBytes,
providers::{Provider, ProviderBuilder},
signers::local::PrivateKeySigner,
};
let key_str = private_key
.trim()
.strip_prefix("0x")
.unwrap_or(private_key.trim());
let key_bytes = alloy::hex::decode(key_str)
.map_err(|e| anyhow::anyhow!("Failed to decode private key hex: {}", e))?;
if key_bytes.len() != 32 {
anyhow::bail!(
"Private key must be 32 bytes (64 hex characters), got {}",
key_bytes.len()
);
}
let mut bytes = [0u8; 32];
bytes.copy_from_slice(&key_bytes);
let signer = PrivateKeySigner::from_bytes(&FixedBytes(bytes))
.map_err(|e| anyhow::anyhow!("Failed to create signer from private key: {}", e))?;
let wallet = Arc::new(EthereumWallet::new(signer));
let rpc_url = format!("http://localhost:{}", rpc_port);
// Query the chain ID from the RPC
let chain_id = ProviderBuilder::new()
.connect_http(rpc_url.parse()?)
.get_chain_id()
.await
.context("Failed to query chain ID from RPC")?;
Ok(Self {
id: 0,
node_binary: PathBuf::new(),
eth_proxy_binary: PathBuf::new(),
export_chainspec_command: String::new(),
rpc_url,
base_directory: PathBuf::new(),
logs_directory: PathBuf::new(),
substrate_process: None,
eth_proxy_process: None,
wallet,
nonce_manager: Default::default(),
provider: Default::default(),
chain_id,
})
}
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> { fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
let _ = remove_dir_all(self.base_directory.as_path()); let _ = remove_dir_all(self.base_directory.as_path());
let _ = clear_directory(&self.base_directory); let _ = clear_directory(&self.base_directory);
@@ -350,7 +406,7 @@ impl SubstrateNode {
construct_concurrency_limited_provider::<ReviveNetwork, _>( construct_concurrency_limited_provider::<ReviveNetwork, _>(
self.rpc_url.as_str(), self.rpc_url.as_str(),
FallbackGasFiller::new(u64::MAX, 5_000_000_000, 1_000_000_000), FallbackGasFiller::new(u64::MAX, 5_000_000_000, 1_000_000_000),
ChainIdFiller::new(Some(CHAIN_ID)), ChainIdFiller::new(Some(self.chain_id)),
NonceFiller::new(self.nonce_manager.clone()), NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(), self.wallet.clone(),
) )
@@ -541,6 +597,16 @@ impl EthereumNode for SubstrateNode {
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>) as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
}) })
} }
fn resolve_signer_or_default(&self, address: Address) -> Address {
let signer_addresses: Vec<_> =
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet).collect();
if signer_addresses.contains(&address) {
address
} else {
self.wallet.default_signer().address()
}
}
} }
pub struct SubstrateNodeResolver { pub struct SubstrateNodeResolver {
@@ -3,25 +3,17 @@
//! ## Required Binaries //! ## Required Binaries
//! This module requires the following binaries to be compiled and available in your PATH: //! This module requires the following binaries to be compiled and available in your PATH:
//! //!
//! 1. **polkadot-parachain**: //! 1. **polkadot-parachain**: ```bash git clone https://github.com/paritytech/polkadot-sdk.git cd
//! ```bash //! polkadot-sdk cargo build --release --locked -p polkadot-parachain-bin --bin
//! git clone https://github.com/paritytech/polkadot-sdk.git //! polkadot-parachain ```
//! cd polkadot-sdk
//! cargo build --release --locked -p polkadot-parachain-bin --bin polkadot-parachain
//! ```
//! //!
//! 2. **eth-rpc** (Revive EVM RPC server): //! 2. **eth-rpc** (Revive EVM RPC server): ```bash git clone https://github.com/paritytech/polkadot-sdk.git
//! ```bash //! cd polkadot-sdk cargo build --locked --profile production -p pallet-revive-eth-rpc --bin
//! git clone https://github.com/paritytech/polkadot-sdk.git //! eth-rpc ```
//! cd polkadot-sdk
//! cargo build --locked --profile production -p pallet-revive-eth-rpc --bin eth-rpc
//! ```
//! //!
//! 3. **polkadot** (for the relay chain): //! 3. **polkadot** (for the relay chain): ```bash # In polkadot-sdk directory cargo build --locked
//! ```bash //! --profile testnet --features fast-runtime --bin polkadot --bin polkadot-prepare-worker --bin
//! # In polkadot-sdk directory //! polkadot-execute-worker ```
//! cargo build --locked --profile testnet --features fast-runtime --bin polkadot --bin polkadot-prepare-worker --bin polkadot-execute-worker
//! ```
//! //!
//! Make sure to add the build output directories to your PATH or provide //! Make sure to add the build output directories to your PATH or provide
//! the full paths in your configuration. //! the full paths in your configuration.
@@ -585,6 +577,16 @@ impl EthereumNode for ZombieNode {
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>) as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
}) })
} }
fn resolve_signer_or_default(&self, address: Address) -> Address {
let signer_addresses: Vec<_> =
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet).collect();
if signer_addresses.contains(&address) {
address
} else {
self.wallet.default_signer().address()
}
}
} }
pub struct ZombieNodeResolver<F: TxFiller<ReviveNetwork>, P: Provider<ReviveNetwork>> { pub struct ZombieNodeResolver<F: TxFiller<ReviveNetwork>, P: Provider<ReviveNetwork>> {
+2 -1
View File
@@ -442,7 +442,8 @@ impl Report {
#[derive(Clone, Debug, Serialize, Default)] #[derive(Clone, Debug, Serialize, Default)]
pub struct TestCaseReport { pub struct TestCaseReport {
/// Information on the status of the test case and whether it succeeded, failed, or was ignored. /// Information on the status of the test case and whether it succeeded, failed, or was
/// ignored.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub status: Option<TestCaseStatus>, pub status: Option<TestCaseStatus>,
/// Information related to the execution on one of the platforms. /// Information related to the execution on one of the platforms.
+4 -2
View File
@@ -8,8 +8,10 @@ use anyhow::Context as _;
use indexmap::IndexMap; use indexmap::IndexMap;
use revive_dt_common::types::PlatformIdentifier; use revive_dt_common::types::PlatformIdentifier;
use revive_dt_compiler::{CompilerInput, CompilerOutput}; use revive_dt_compiler::{CompilerInput, CompilerOutput};
use revive_dt_format::metadata::Metadata; use revive_dt_format::{
use revive_dt_format::{corpus::Corpus, metadata::ContractInstance}; corpus::Corpus,
metadata::{ContractInstance, Metadata},
};
use semver::Version; use semver::Version;
use tokio::sync::{broadcast, oneshot}; use tokio::sync::{broadcast, oneshot};